code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowercase : Dict = logging.get_logger(__name__)
lowercase : Tuple = {
'microsoft/swin-tiny-patch4-window7-224': (
'https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json'
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class lowerCamelCase__ ( __lowercase , __lowercase):
'''simple docstring'''
_A = 'swin'
_A = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self :List[Any] , a :Union[str, Any]=2_2_4 , a :Any=4 , a :str=3 , a :str=9_6 , a :Optional[int]=[2, 2, 6, 2] , a :Optional[int]=[3, 6, 1_2, 2_4] , a :Optional[int]=7 , a :int=4.0 , a :Optional[Any]=True , a :int=0.0 , a :Dict=0.0 , a :List[Any]=0.1 , a :Union[str, Any]="gelu" , a :Any=False , a :List[str]=0.02 , a :int=1E-5 , a :Optional[Any]=3_2 , a :int=None , a :List[Any]=None , **a :Any , ) -> Union[str, Any]:
super().__init__(**a )
__UpperCamelCase : List[str] = image_size
__UpperCamelCase : Optional[Any] = patch_size
__UpperCamelCase : List[str] = num_channels
__UpperCamelCase : Dict = embed_dim
__UpperCamelCase : Optional[int] = depths
__UpperCamelCase : Union[str, Any] = len(a )
__UpperCamelCase : List[str] = num_heads
__UpperCamelCase : List[Any] = window_size
__UpperCamelCase : str = mlp_ratio
__UpperCamelCase : Any = qkv_bias
__UpperCamelCase : Union[str, Any] = hidden_dropout_prob
__UpperCamelCase : str = attention_probs_dropout_prob
__UpperCamelCase : Dict = drop_path_rate
__UpperCamelCase : Optional[Any] = hidden_act
__UpperCamelCase : str = use_absolute_embeddings
__UpperCamelCase : List[str] = layer_norm_eps
__UpperCamelCase : List[Any] = initializer_range
__UpperCamelCase : Union[str, Any] = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__UpperCamelCase : Union[str, Any] = int(embed_dim * 2 ** (len(a ) - 1) )
__UpperCamelCase : Any = ["stem"] + [f'stage{idx}' for idx in range(1 , len(a ) + 1 )]
__UpperCamelCase , __UpperCamelCase : Optional[int] = get_aligned_output_features_output_indices(
out_features=a , out_indices=a , stage_names=self.stage_names )
class lowerCamelCase__ ( __lowercase):
'''simple docstring'''
_A = version.parse('1.11')
@property
def _lowerCamelCase ( self :List[Any] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def _lowerCamelCase ( self :Dict ) -> float:
return 1E-4 | 232 |
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase__ ( __lowercase , unittest.TestCase):
'''simple docstring'''
_A = KandinskyVaaPriorPipeline
_A = ['prompt']
_A = ['prompt', 'negative_prompt']
_A = [
'num_images_per_prompt',
'generator',
'num_inference_steps',
'latents',
'negative_prompt',
'guidance_scale',
'output_type',
'return_dict',
]
_A = False
@property
def _lowerCamelCase ( self :Tuple ) -> Optional[Any]:
return 3_2
@property
def _lowerCamelCase ( self :List[str] ) -> List[str]:
return 3_2
@property
def _lowerCamelCase ( self :Union[str, Any] ) -> Tuple:
return self.time_input_dim
@property
def _lowerCamelCase ( self :Union[str, Any] ) -> Any:
return self.time_input_dim * 4
@property
def _lowerCamelCase ( self :Tuple ) -> str:
return 1_0_0
@property
def _lowerCamelCase ( self :Union[str, Any] ) -> Any:
__UpperCamelCase : int = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
return tokenizer
@property
def _lowerCamelCase ( self :str ) -> int:
torch.manual_seed(0 )
__UpperCamelCase : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModelWithProjection(a )
@property
def _lowerCamelCase ( self :int ) -> Optional[Any]:
torch.manual_seed(0 )
__UpperCamelCase : Optional[int] = {
"num_attention_heads": 2,
"attention_head_dim": 1_2,
"embedding_dim": self.text_embedder_hidden_size,
"num_layers": 1,
}
__UpperCamelCase : List[Any] = PriorTransformer(**a )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
__UpperCamelCase : Optional[int] = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def _lowerCamelCase ( self :Dict ) -> Union[str, Any]:
torch.manual_seed(0 )
__UpperCamelCase : Union[str, Any] = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=2_2_4 , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1_4 , )
__UpperCamelCase : int = CLIPVisionModelWithProjection(a )
return model
@property
def _lowerCamelCase ( self :Dict ) -> Optional[Any]:
__UpperCamelCase : List[str] = CLIPImageProcessor(
crop_size=2_2_4 , do_center_crop=a , do_normalize=a , do_resize=a , image_mean=[0.48145466, 0.4578275, 0.40821073] , image_std=[0.26862954, 0.26130258, 0.27577711] , resample=3 , size=2_2_4 , )
return image_processor
def _lowerCamelCase ( self :str ) -> Optional[int]:
__UpperCamelCase : str = self.dummy_prior
__UpperCamelCase : int = self.dummy_image_encoder
__UpperCamelCase : Tuple = self.dummy_text_encoder
__UpperCamelCase : int = self.dummy_tokenizer
__UpperCamelCase : Optional[Any] = self.dummy_image_processor
__UpperCamelCase : int = UnCLIPScheduler(
variance_type="fixed_small_log" , prediction_type="sample" , num_train_timesteps=1_0_0_0 , clip_sample=a , clip_sample_range=10.0 , )
__UpperCamelCase : List[Any] = {
"prior": prior,
"image_encoder": image_encoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"scheduler": scheduler,
"image_processor": image_processor,
}
return components
def _lowerCamelCase ( self :Optional[Any] , a :int , a :Union[str, Any]=0 ) -> Any:
if str(a ).startswith("mps" ):
__UpperCamelCase : int = torch.manual_seed(a )
else:
__UpperCamelCase : List[Any] = torch.Generator(device=a ).manual_seed(a )
__UpperCamelCase : int = {
"prompt": "horse",
"generator": generator,
"guidance_scale": 4.0,
"num_inference_steps": 2,
"output_type": "np",
}
return inputs
def _lowerCamelCase ( self :List[Any] ) -> Dict:
__UpperCamelCase : int = "cpu"
__UpperCamelCase : List[str] = self.get_dummy_components()
__UpperCamelCase : List[str] = self.pipeline_class(**a )
__UpperCamelCase : Tuple = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
__UpperCamelCase : int = pipe(**self.get_dummy_inputs(a ) )
__UpperCamelCase : int = output.image_embeds
__UpperCamelCase : Optional[int] = pipe(
**self.get_dummy_inputs(a ) , return_dict=a , )[0]
__UpperCamelCase : Union[str, Any] = image[0, -1_0:]
__UpperCamelCase : List[str] = image_from_tuple[0, -1_0:]
assert image.shape == (1, 3_2)
__UpperCamelCase : List[Any] = np.array(
[-0.0532, 1.7120, 0.3656, -1.0852, -0.8946, -1.1756, 0.4348, 0.2482, 0.5146, -0.1156] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def _lowerCamelCase ( self :int ) -> Union[str, Any]:
__UpperCamelCase : str = torch_device == "cpu"
__UpperCamelCase : List[str] = True
__UpperCamelCase : List[Any] = False
self._test_inference_batch_single_identical(
test_max_difference=a , relax_max_difference=a , test_mean_pixel_difference=a , )
@skip_mps
def _lowerCamelCase ( self :Any ) -> int:
__UpperCamelCase : Optional[Any] = torch_device == "cpu"
__UpperCamelCase : Dict = False
self._test_attention_slicing_forward_pass(
test_max_difference=a , test_mean_pixel_difference=a , ) | 232 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
lowercase_ = {
"albert-base-v1": "https://huggingface.co/albert-base-v1/resolve/main/config.json",
"albert-large-v1": "https://huggingface.co/albert-large-v1/resolve/main/config.json",
"albert-xlarge-v1": "https://huggingface.co/albert-xlarge-v1/resolve/main/config.json",
"albert-xxlarge-v1": "https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json",
"albert-base-v2": "https://huggingface.co/albert-base-v2/resolve/main/config.json",
"albert-large-v2": "https://huggingface.co/albert-large-v2/resolve/main/config.json",
"albert-xlarge-v2": "https://huggingface.co/albert-xlarge-v2/resolve/main/config.json",
"albert-xxlarge-v2": "https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json",
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = 'albert'
def __init__( self , _a=30_000 , _a=128 , _a=4_096 , _a=12 , _a=1 , _a=64 , _a=16_384 , _a=1 , _a="gelu_new" , _a=0 , _a=0 , _a=512 , _a=2 , _a=0.02 , _a=1E-12 , _a=0.1 , _a="absolute" , _a=0 , _a=2 , _a=3 , **_a , ):
super().__init__(pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , **_a )
__a = vocab_size
__a = embedding_size
__a = hidden_size
__a = num_hidden_layers
__a = num_hidden_groups
__a = num_attention_heads
__a = inner_group_num
__a = hidden_act
__a = intermediate_size
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = initializer_range
__a = layer_norm_eps
__a = classifier_dropout_prob
__a = position_embedding_type
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def __UpperCAmelCase ( self ):
if self.task == "multiple-choice":
__a = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__a = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 11 |
"""simple docstring"""
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
lowercase_ = logging.get_logger(__name__)
@add_end_docstrings(__SCREAMING_SNAKE_CASE )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , **_a ):
super().__init__(**_a )
if self.framework != "pt":
raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' )
# No specific FOR_XXX available yet
def __call__( self , _a , **_a ):
return super().__call__(_a , **_a )
def __UpperCAmelCase ( self , **_a ):
__a = {}
if "candidate_labels" in kwargs:
__a = kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
__a = kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def __UpperCAmelCase ( self , _a , _a=None , _a="This is a sound of {}." ):
if isinstance(_a , _a ):
if audio.startswith('''http://''' ) or audio.startswith('''https://''' ):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
__a = requests.get(_a ).content
else:
with open(_a , '''rb''' ) as f:
__a = f.read()
if isinstance(_a , _a ):
__a = ffmpeg_read(_a , self.feature_extractor.sampling_rate )
if not isinstance(_a , np.ndarray ):
raise ValueError('''We expect a numpy ndarray as input''' )
if len(audio.shape ) != 1:
raise ValueError('''We expect a single channel audio input for ZeroShotAudioClassificationPipeline''' )
__a = self.feature_extractor(
[audio] , sampling_rate=self.feature_extractor.sampling_rate , return_tensors='''pt''' )
__a = candidate_labels
__a = [hypothesis_template.format(_a ) for x in candidate_labels]
__a = self.tokenizer(_a , return_tensors=self.framework , padding=_a )
__a = [text_inputs]
return inputs
def __UpperCAmelCase ( self , _a ):
__a = model_inputs.pop('''candidate_labels''' )
__a = model_inputs.pop('''text_inputs''' )
if isinstance(text_inputs[0] , _a ):
__a = text_inputs[0]
else:
# Batching case.
__a = text_inputs[0][0]
__a = self.model(**_a , **_a )
__a = {
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_audio,
}
return model_outputs
def __UpperCAmelCase ( self , _a ):
__a = model_outputs.pop('''candidate_labels''' )
__a = model_outputs['''logits'''][0]
if self.framework == "pt":
__a = logits.softmax(dim=0 )
__a = probs.tolist()
else:
raise ValueError('''`tf` framework not supported.''' )
__a = [
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(_a , _a ) , key=lambda _a : -x[0] )
]
return result
| 11 | 1 |
"""simple docstring"""
import functools
import gc
import inspect
import torch
from .imports import is_npu_available, is_xpu_available
def lowercase__ ( *_UpperCAmelCase ) -> Dict:
'''simple docstring'''
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
lowercase : str = list(UpperCamelCase_ )
for i in range(len(UpperCamelCase_ ) ):
lowercase : List[Any] = None
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
return objects
def lowercase__ ( _UpperCAmelCase ) -> bool:
'''simple docstring'''
lowercase : str = [
'CUDA out of memory.', # CUDA OOM
'cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.', # CUDNN SNAFU
'DefaultCPUAllocator: can\'t allocate memory', # CPU OOM
]
if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and len(exception.args ) == 1:
return any(err in exception.args[0] for err in _statements )
return False
def lowercase__ ( _UpperCAmelCase = None , _UpperCAmelCase = 1_28 ) -> int:
'''simple docstring'''
if function is None:
return functools.partial(UpperCamelCase_ , starting_batch_size=UpperCamelCase_ )
lowercase : Any = starting_batch_size
def decorator(*_UpperCAmelCase , **_UpperCAmelCase ):
nonlocal batch_size
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
lowercase : List[str] = list(inspect.signature(UpperCamelCase_ ).parameters.keys() )
# Guard against user error
if len(UpperCamelCase_ ) < (len(UpperCamelCase_ ) + 1):
lowercase : List[Any] = ', '.join([f'''{arg}={value}''' for arg, value in zip(params[1:] , args[1:] )] )
raise TypeError(
f'''Batch size was passed into `{function.__name__}` as the first argument when called.'''
f'''Remove this as the decorator already does so: `{function.__name__}({arg_str})`''' )
while True:
if batch_size == 0:
raise RuntimeError('No executable batch size found, reached zero.' )
try:
return function(UpperCamelCase_ , *UpperCamelCase_ , **UpperCamelCase_ )
except Exception as e:
if should_reduce_batch_size(UpperCamelCase_ ):
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
batch_size //= 2
else:
raise
return decorator
| 255 |
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
__snake_case = NewType("""DataClass""", Any)
__snake_case = NewType("""DataClassType""", Any)
def _lowercase ( UpperCamelCase_ ) -> int:
'''simple docstring'''
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
F'Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).' )
def _lowercase ( UpperCamelCase_ ) -> Callable[[str], Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = {str(UpperCamelCase_ ): choice for choice in choices}
return lambda UpperCamelCase_ : str_to_choice.get(UpperCamelCase_ , UpperCamelCase_ )
def _lowercase ( *,
UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = dataclasses.MISSING , UpperCamelCase_ = dataclasses.MISSING , UpperCamelCase_ = None , **UpperCamelCase_ , ) -> dataclasses.Field:
'''simple docstring'''
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
SCREAMING_SNAKE_CASE__ = {}
if aliases is not None:
SCREAMING_SNAKE_CASE__ = aliases
if help is not None:
SCREAMING_SNAKE_CASE__ = help
return dataclasses.field(metadata=UpperCamelCase_ , default=UpperCamelCase_ , default_factory=UpperCamelCase_ , **UpperCamelCase_ )
class lowercase__ ( _UpperCAmelCase ):
A__ : Iterable[DataClassType]
def __init__( self : Union[str, Any] , UpperCAmelCase_ : Union[DataClassType, Iterable[DataClassType]] , **UpperCAmelCase_ : Optional[Any] ):
# To make the default appear when using --help
if "formatter_class" not in kwargs:
SCREAMING_SNAKE_CASE__ = ArgumentDefaultsHelpFormatter
super().__init__(**UpperCAmelCase_ )
if dataclasses.is_dataclass(UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = [dataclass_types]
SCREAMING_SNAKE_CASE__ = list(UpperCAmelCase_ )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(UpperCAmelCase_ )
@staticmethod
def A_ ( UpperCAmelCase_ : ArgumentParser , UpperCAmelCase_ : dataclasses.Field ):
SCREAMING_SNAKE_CASE__ = F'--{field.name}'
SCREAMING_SNAKE_CASE__ = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , UpperCAmelCase_ ):
raise RuntimeError(
'Unresolved type detected, which should have been done with the help of '
'`typing.get_type_hints` method by default' )
SCREAMING_SNAKE_CASE__ = kwargs.pop('aliases' , [] )
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = [aliases]
SCREAMING_SNAKE_CASE__ = getattr(field.type , '__origin__' , field.type )
if origin_type is Union or (hasattr(UpperCAmelCase_ , 'UnionType' ) and isinstance(UpperCAmelCase_ , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(UpperCAmelCase_ ) not in field.type.__args__
):
raise ValueError(
'Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because'
' the argument parser only supports one type per argument.'
F' Problem encountered in field \'{field.name}\'.' )
if type(UpperCAmelCase_ ) not in field.type.__args__:
# filter `str` in Union
SCREAMING_SNAKE_CASE__ = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
SCREAMING_SNAKE_CASE__ = getattr(field.type , '__origin__' , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
SCREAMING_SNAKE_CASE__ = (
field.type.__args__[0] if isinstance(UpperCAmelCase_ , field.type.__args__[1] ) else field.type.__args__[1]
)
SCREAMING_SNAKE_CASE__ = getattr(field.type , '__origin__' , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
SCREAMING_SNAKE_CASE__ = {}
if origin_type is Literal or (isinstance(field.type , UpperCAmelCase_ ) and issubclass(field.type , UpperCAmelCase_ )):
if origin_type is Literal:
SCREAMING_SNAKE_CASE__ = field.type.__args__
else:
SCREAMING_SNAKE_CASE__ = [x.value for x in field.type]
SCREAMING_SNAKE_CASE__ = make_choice_type_function(kwargs['choices'] )
if field.default is not dataclasses.MISSING:
SCREAMING_SNAKE_CASE__ = field.default
else:
SCREAMING_SNAKE_CASE__ = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
SCREAMING_SNAKE_CASE__ = copy(UpperCAmelCase_ )
# Hack because type=bool in argparse does not behave as we want.
SCREAMING_SNAKE_CASE__ = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
SCREAMING_SNAKE_CASE__ = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
SCREAMING_SNAKE_CASE__ = default
# This tells argparse we accept 0 or 1 value after --field_name
SCREAMING_SNAKE_CASE__ = '?'
# This is the value that will get picked if we do --field_name (without value)
SCREAMING_SNAKE_CASE__ = True
elif isclass(UpperCAmelCase_ ) and issubclass(UpperCAmelCase_ , UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = field.type.__args__[0]
SCREAMING_SNAKE_CASE__ = '+'
if field.default_factory is not dataclasses.MISSING:
SCREAMING_SNAKE_CASE__ = field.default_factory()
elif field.default is dataclasses.MISSING:
SCREAMING_SNAKE_CASE__ = True
else:
SCREAMING_SNAKE_CASE__ = field.type
if field.default is not dataclasses.MISSING:
SCREAMING_SNAKE_CASE__ = field.default
elif field.default_factory is not dataclasses.MISSING:
SCREAMING_SNAKE_CASE__ = field.default_factory()
else:
SCREAMING_SNAKE_CASE__ = True
parser.add_argument(UpperCAmelCase_ , *UpperCAmelCase_ , **UpperCAmelCase_ )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
SCREAMING_SNAKE_CASE__ = False
parser.add_argument(F'--no_{field.name}' , action='store_false' , dest=field.name , **UpperCAmelCase_ )
def A_ ( self : List[Any] , UpperCAmelCase_ : DataClassType ):
if hasattr(UpperCAmelCase_ , '_argument_group_name' ):
SCREAMING_SNAKE_CASE__ = self.add_argument_group(dtype._argument_group_name )
else:
SCREAMING_SNAKE_CASE__ = self
try:
SCREAMING_SNAKE_CASE__ = get_type_hints(UpperCAmelCase_ )
except NameError:
raise RuntimeError(
F'Type resolution failed for {dtype}. Try declaring the class in global scope or '
'removing line of `from __future__ import annotations` which opts in Postponed '
'Evaluation of Annotations (PEP 563)' )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = '.'.join(map(UpperCAmelCase_ , sys.version_info[:3] ) )
raise RuntimeError(
F'Type resolution failed for {dtype} on Python {python_version}. Try removing '
'line of `from __future__ import annotations` which opts in union types as '
'`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To '
'support Python versions that lower than 3.10, you need to use '
'`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of '
'`X | None`.' ) from ex
raise
for field in dataclasses.fields(UpperCAmelCase_ ):
if not field.init:
continue
SCREAMING_SNAKE_CASE__ = type_hints[field.name]
self._parse_dataclass_field(UpperCAmelCase_ , UpperCAmelCase_ )
def A_ ( self : Dict , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : List[Any]=False , UpperCAmelCase_ : Dict=True , UpperCAmelCase_ : str=None , UpperCAmelCase_ : str=None , ):
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
SCREAMING_SNAKE_CASE__ = []
if args_filename:
args_files.append(Path(UpperCAmelCase_ ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix('.args' ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
SCREAMING_SNAKE_CASE__ = ArgumentParser()
args_file_parser.add_argument(UpperCAmelCase_ , type=UpperCAmelCase_ , action='append' )
# Use only remaining args for further parsing (remove the args_file_flag)
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = args_file_parser.parse_known_args(args=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = vars(UpperCAmelCase_ ).get(args_file_flag.lstrip('-' ) , UpperCAmelCase_ )
if cmd_args_file_paths:
args_files.extend([Path(UpperCAmelCase_ ) for p in cmd_args_file_paths] )
SCREAMING_SNAKE_CASE__ = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
SCREAMING_SNAKE_CASE__ = file_args + args if args is not None else file_args + sys.argv[1:]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.parse_known_args(args=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = []
for dtype in self.dataclass_types:
SCREAMING_SNAKE_CASE__ = {f.name for f in dataclasses.fields(UpperCAmelCase_ ) if f.init}
SCREAMING_SNAKE_CASE__ = {k: v for k, v in vars(UpperCAmelCase_ ).items() if k in keys}
for k in keys:
delattr(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = dtype(**UpperCAmelCase_ )
outputs.append(UpperCAmelCase_ )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(UpperCAmelCase_ )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(F'Some specified arguments are not used by the HfArgumentParser: {remaining_args}' )
return (*outputs,)
def A_ ( self : str , UpperCAmelCase_ : Dict[str, Any] , UpperCAmelCase_ : bool = False ):
SCREAMING_SNAKE_CASE__ = set(args.keys() )
SCREAMING_SNAKE_CASE__ = []
for dtype in self.dataclass_types:
SCREAMING_SNAKE_CASE__ = {f.name for f in dataclasses.fields(UpperCAmelCase_ ) if f.init}
SCREAMING_SNAKE_CASE__ = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
SCREAMING_SNAKE_CASE__ = dtype(**UpperCAmelCase_ )
outputs.append(UpperCAmelCase_ )
if not allow_extra_keys and unused_keys:
raise ValueError(F'Some keys are not used by the HfArgumentParser: {sorted(UpperCAmelCase_ )}' )
return tuple(UpperCAmelCase_ )
def A_ ( self : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : bool = False ):
with open(Path(UpperCAmelCase_ ) , encoding='utf-8' ) as open_json_file:
SCREAMING_SNAKE_CASE__ = json.loads(open_json_file.read() )
SCREAMING_SNAKE_CASE__ = self.parse_dict(UpperCAmelCase_ , allow_extra_keys=UpperCAmelCase_ )
return tuple(UpperCAmelCase_ )
def A_ ( self : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : bool = False ):
SCREAMING_SNAKE_CASE__ = self.parse_dict(yaml.safe_load(Path(UpperCAmelCase_ ).read_text() ) , allow_extra_keys=UpperCAmelCase_ )
return tuple(UpperCAmelCase_ )
| 176 | 0 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
"""ut/deta""": """https://huggingface.co/ut/deta/resolve/main/config.json""",
}
class A ( lowerCAmelCase__ ):
__UpperCAmelCase : int = "deta"
__UpperCAmelCase : Union[str, Any] = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__(self : Any , __UpperCAmelCase : Tuple=None , __UpperCAmelCase : Tuple=9_0_0 , __UpperCAmelCase : int=2_0_4_8 , __UpperCAmelCase : Dict=6 , __UpperCAmelCase : Any=2_0_4_8 , __UpperCAmelCase : List[str]=8 , __UpperCAmelCase : Any=6 , __UpperCAmelCase : Tuple=1_0_2_4 , __UpperCAmelCase : Optional[Any]=8 , __UpperCAmelCase : str=0.0 , __UpperCAmelCase : Any=True , __UpperCAmelCase : Union[str, Any]="relu" , __UpperCAmelCase : Any=2_5_6 , __UpperCAmelCase : Dict=0.1 , __UpperCAmelCase : str=0.0 , __UpperCAmelCase : Dict=0.0 , __UpperCAmelCase : Union[str, Any]=0.02 , __UpperCAmelCase : Union[str, Any]=1.0 , __UpperCAmelCase : int=True , __UpperCAmelCase : int=False , __UpperCAmelCase : Dict="sine" , __UpperCAmelCase : Dict=5 , __UpperCAmelCase : List[str]=4 , __UpperCAmelCase : Tuple=4 , __UpperCAmelCase : str=True , __UpperCAmelCase : Union[str, Any]=3_0_0 , __UpperCAmelCase : Optional[int]=True , __UpperCAmelCase : Tuple=True , __UpperCAmelCase : Any=1 , __UpperCAmelCase : Optional[int]=5 , __UpperCAmelCase : List[str]=2 , __UpperCAmelCase : List[str]=1 , __UpperCAmelCase : Any=1 , __UpperCAmelCase : Optional[Any]=5 , __UpperCAmelCase : List[str]=2 , __UpperCAmelCase : Any=0.1 , __UpperCAmelCase : Tuple=0.25 , **__UpperCAmelCase : List[Any] , ) -> str:
"""simple docstring"""
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
UpperCAmelCase__ = CONFIG_MAPPING["resnet"](out_features=["stage2", "stage3", "stage4"] )
else:
if isinstance(a__ , a__ ):
UpperCAmelCase__ = backbone_config.pop("model_type" )
UpperCAmelCase__ = CONFIG_MAPPING[backbone_model_type]
UpperCAmelCase__ = config_class.from_dict(a__ )
UpperCAmelCase__ = backbone_config
UpperCAmelCase__ = num_queries
UpperCAmelCase__ = max_position_embeddings
UpperCAmelCase__ = d_model
UpperCAmelCase__ = encoder_ffn_dim
UpperCAmelCase__ = encoder_layers
UpperCAmelCase__ = encoder_attention_heads
UpperCAmelCase__ = decoder_ffn_dim
UpperCAmelCase__ = decoder_layers
UpperCAmelCase__ = decoder_attention_heads
UpperCAmelCase__ = dropout
UpperCAmelCase__ = attention_dropout
UpperCAmelCase__ = activation_dropout
UpperCAmelCase__ = activation_function
UpperCAmelCase__ = init_std
UpperCAmelCase__ = init_xavier_std
UpperCAmelCase__ = encoder_layerdrop
UpperCAmelCase__ = auxiliary_loss
UpperCAmelCase__ = position_embedding_type
# deformable attributes
UpperCAmelCase__ = num_feature_levels
UpperCAmelCase__ = encoder_n_points
UpperCAmelCase__ = decoder_n_points
UpperCAmelCase__ = two_stage
UpperCAmelCase__ = two_stage_num_proposals
UpperCAmelCase__ = with_box_refine
UpperCAmelCase__ = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError("If two_stage is True, with_box_refine must be True." )
# Hungarian matcher
UpperCAmelCase__ = class_cost
UpperCAmelCase__ = bbox_cost
UpperCAmelCase__ = giou_cost
# Loss coefficients
UpperCAmelCase__ = mask_loss_coefficient
UpperCAmelCase__ = dice_loss_coefficient
UpperCAmelCase__ = bbox_loss_coefficient
UpperCAmelCase__ = giou_loss_coefficient
UpperCAmelCase__ = eos_coefficient
UpperCAmelCase__ = focal_alpha
super().__init__(is_encoder_decoder=a__ , **a__ )
@property
def lowercase_ (self : Tuple ) -> List[str]:
"""simple docstring"""
return self.encoder_attention_heads
@property
def lowercase_ (self : str ) -> List[str]:
"""simple docstring"""
return self.d_model
def lowercase_ (self : Any ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = copy.deepcopy(self.__dict__ )
UpperCAmelCase__ = self.backbone_config.to_dict()
UpperCAmelCase__ = self.__class__.model_type
return output
| 364 | from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class A ( yaml.SafeLoader ):
def lowercase_ (self : Tuple , __UpperCAmelCase : str ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = [self.constructed_objects[key_node] for key_node, _ in node.value]
UpperCAmelCase__ = [tuple(__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else key for key in keys]
UpperCAmelCase__ = Counter(__UpperCAmelCase )
UpperCAmelCase__ = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(f"""Got duplicate yaml keys: {duplicate_keys}""" )
def lowercase_ (self : Dict , __UpperCAmelCase : Dict , __UpperCAmelCase : Any=False ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = super().construct_mapping(__UpperCAmelCase , deep=__UpperCAmelCase )
self._check_no_duplicates_on_constructed_node(__UpperCAmelCase )
return mapping
def lowerCAmelCase_ ( __A ) -> Tuple[Optional[str], str]:
'''simple docstring'''
UpperCAmelCase__ = list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
UpperCAmelCase__ = full_content[1:].index("---" ) + 1
UpperCAmelCase__ = "\n".join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(__A )
class A ( UpperCAmelCase_ ):
# class attributes
__UpperCAmelCase : Optional[Any] = {'train_eval_index'} # train-eval-index in the YAML metadata
@classmethod
def lowercase_ (cls : List[str] , __UpperCAmelCase : Path ) -> "DatasetMetadata":
"""simple docstring"""
with open(__UpperCAmelCase , encoding="utf-8" ) as readme_file:
UpperCAmelCase__ , UpperCAmelCase__ = _split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(__UpperCAmelCase )
else:
return cls()
def lowercase_ (self : int , __UpperCAmelCase : Path ) -> str:
"""simple docstring"""
if path.exists():
with open(__UpperCAmelCase , encoding="utf-8" ) as readme_file:
UpperCAmelCase__ = readme_file.read()
else:
UpperCAmelCase__ = None
UpperCAmelCase__ = self._to_readme(__UpperCAmelCase )
with open(__UpperCAmelCase , "w" , encoding="utf-8" ) as readme_file:
readme_file.write(__UpperCAmelCase )
def lowercase_ (self : str , __UpperCAmelCase : Optional[str] = None ) -> str:
"""simple docstring"""
if readme_content is not None:
UpperCAmelCase__ , UpperCAmelCase__ = _split_yaml_from_readme(__UpperCAmelCase )
UpperCAmelCase__ = "---\n" + self.to_yaml_string() + "---\n" + content
else:
UpperCAmelCase__ = "---\n" + self.to_yaml_string() + "---\n"
return full_content
@classmethod
def lowercase_ (cls : Optional[int] , __UpperCAmelCase : str ) -> "DatasetMetadata":
"""simple docstring"""
UpperCAmelCase__ = yaml.load(__UpperCAmelCase , Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
UpperCAmelCase__ = {
(key.replace("-" , "_" ) if key.replace("-" , "_" ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**__UpperCAmelCase )
def lowercase_ (self : List[str] ) -> str:
"""simple docstring"""
return yaml.safe_dump(
{
(key.replace("_" , "-" ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=__UpperCAmelCase , allow_unicode=__UpperCAmelCase , encoding="utf-8" , ).decode("utf-8" )
UpperCamelCase__ = {
'image-classification': [],
'translation': [],
'image-segmentation': [],
'fill-mask': [],
'automatic-speech-recognition': [],
'token-classification': [],
'sentence-similarity': [],
'audio-classification': [],
'question-answering': [],
'summarization': [],
'zero-shot-classification': [],
'table-to-text': [],
'feature-extraction': [],
'other': [],
'multiple-choice': [],
'text-classification': [],
'text-to-image': [],
'text2text-generation': [],
'zero-shot-image-classification': [],
'tabular-classification': [],
'tabular-regression': [],
'image-to-image': [],
'tabular-to-text': [],
'unconditional-image-generation': [],
'text-retrieval': [],
'text-to-speech': [],
'object-detection': [],
'audio-to-audio': [],
'text-generation': [],
'conversational': [],
'table-question-answering': [],
'visual-question-answering': [],
'image-to-text': [],
'reinforcement-learning': [],
'voice-activity-detection': [],
'time-series-forecasting': [],
'document-question-answering': [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
UpperCamelCase__ = ArgumentParser(usage='Validate the yaml metadata block of a README.md file.')
ap.add_argument('readme_filepath')
UpperCamelCase__ = ap.parse_args()
UpperCamelCase__ = Path(args.readme_filepath)
UpperCamelCase__ = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 143 | 0 |
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print('Googling.....')
lowerCamelCase = 'https://www.google.com/search?q=' + ' '.join(sys.argv[1:])
lowerCamelCase = requests.get(url, headers={'UserAgent': UserAgent().random})
# res.raise_for_status()
with open('project1a.html', 'wb') as out_file: # only for knowing the class
for data in res.iter_content(1_00_00):
out_file.write(data)
lowerCamelCase = BeautifulSoup(res.text, 'html.parser')
lowerCamelCase = list(soup.select('.eZt8xd'))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get('href'))
else:
webbrowser.open(F"""https://google.com{link.get("href")}""")
| 199 |
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class A :
UpperCamelCase__ : Union[str, Any] =XGLMConfig
UpperCamelCase__ : Dict ={}
UpperCamelCase__ : Tuple ='gelu'
def __init__( self : List[Any] , lowercase_ : List[str] , lowercase_ : Union[str, Any]=14 , lowercase_ : Dict=7 , lowercase_ : Union[str, Any]=True , lowercase_ : Optional[Any]=True , lowercase_ : Any=True , lowercase_ : Optional[int]=99 , lowercase_ : List[Any]=32 , lowercase_ : List[Any]=2 , lowercase_ : Dict=4 , lowercase_ : List[str]=37 , lowercase_ : int="gelu" , lowercase_ : List[Any]=0.1 , lowercase_ : Union[str, Any]=0.1 , lowercase_ : List[str]=512 , lowercase_ : Union[str, Any]=0.02 , ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase : Dict =parent
_lowerCamelCase : Optional[Any] =batch_size
_lowerCamelCase : Optional[int] =seq_length
_lowerCamelCase : Union[str, Any] =is_training
_lowerCamelCase : Tuple =use_input_mask
_lowerCamelCase : str =use_labels
_lowerCamelCase : Any =vocab_size
_lowerCamelCase : List[str] =d_model
_lowerCamelCase : List[Any] =num_hidden_layers
_lowerCamelCase : Union[str, Any] =num_attention_heads
_lowerCamelCase : List[Any] =ffn_dim
_lowerCamelCase : Optional[Any] =activation_function
_lowerCamelCase : Dict =activation_dropout
_lowerCamelCase : Tuple =attention_dropout
_lowerCamelCase : List[str] =max_position_embeddings
_lowerCamelCase : int =initializer_range
_lowerCamelCase : Optional[int] =None
_lowerCamelCase : Optional[Any] =0
_lowerCamelCase : List[str] =2
_lowerCamelCase : Any =1
def lowerCamelCase ( self : str ) -> int:
"""simple docstring"""
return XGLMConfig.from_pretrained('facebook/xglm-564M' )
def lowerCamelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] =tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
_lowerCamelCase : Any =None
if self.use_input_mask:
_lowerCamelCase : str =random_attention_mask([self.batch_size, self.seq_length] )
_lowerCamelCase : Optional[int] =self.get_config()
_lowerCamelCase : Optional[Any] =floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def lowerCamelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=lowercase_ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=lowercase_ , )
def lowerCamelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
_lowerCamelCase : str =self.prepare_config_and_inputs()
(
(
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) ,
) : Any =config_and_inputs
_lowerCamelCase : Union[str, Any] ={
'input_ids': input_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_tf
class A ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
UpperCamelCase__ : Union[str, Any] =(TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
UpperCamelCase__ : List[str] =(TFXGLMForCausalLM,) if is_tf_available() else ()
UpperCamelCase__ : Any =(
{'feature-extraction': TFXGLMModel, 'text-generation': TFXGLMForCausalLM} if is_tf_available() else {}
)
UpperCamelCase__ : str =False
UpperCamelCase__ : int =False
UpperCamelCase__ : int =False
def lowerCamelCase ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
_lowerCamelCase : Tuple =TFXGLMModelTester(self )
_lowerCamelCase : str =ConfigTester(self , config_class=lowercase_ , n_embd=37 )
def lowerCamelCase ( self : str ) -> Dict:
"""simple docstring"""
self.config_tester.run_common_tests()
@slow
def lowerCamelCase ( self : Any ) -> int:
"""simple docstring"""
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : int =TFXGLMModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
@unittest.skip(reason='Currently, model embeddings are going to undergo a major refactor.' )
def lowerCamelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
super().test_resize_token_embeddings()
@require_tf
class A ( unittest.TestCase ):
@slow
def lowerCamelCase ( self : str , lowercase_ : str=True ) -> Tuple:
"""simple docstring"""
_lowerCamelCase : Any =TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
_lowerCamelCase : List[Any] =tf.convert_to_tensor([[2, 268, 9865]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
_lowerCamelCase : int =[2, 268, 9865, 67, 11, 1988, 5_7252, 9865, 5, 984, 67, 1988, 21_3838, 1658, 53, 7_0446, 33, 6657, 278, 1581]
# fmt: on
_lowerCamelCase : Dict =model.generate(lowercase_ , do_sample=lowercase_ , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , lowercase_ )
@slow
def lowerCamelCase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : List[str] =XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
_lowerCamelCase : Any =TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
tf.random.set_seed(0 )
_lowerCamelCase : Tuple =tokenizer('Today is a nice day and' , return_tensors='tf' )
_lowerCamelCase : Optional[int] =tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(':/CPU:0' ):
_lowerCamelCase : List[Any] =model.generate(lowercase_ , do_sample=lowercase_ , seed=[7, 0] )
_lowerCamelCase : Union[str, Any] =tokenizer.decode(output_ids[0] , skip_special_tokens=lowercase_ )
_lowerCamelCase : Union[str, Any] =(
'Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due'
)
self.assertEqual(lowercase_ , lowercase_ )
@slow
def lowerCamelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : int =TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
_lowerCamelCase : Any =XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
_lowerCamelCase : Optional[Any] ='left'
# use different length sentences to test batching
_lowerCamelCase : int =[
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When',
'Hello, my dog is a little',
]
_lowerCamelCase : List[Any] =tokenizer(lowercase_ , return_tensors='tf' , padding=lowercase_ )
_lowerCamelCase : int =inputs['input_ids']
_lowerCamelCase : str =model.generate(input_ids=lowercase_ , attention_mask=inputs['attention_mask'] , max_new_tokens=12 )
_lowerCamelCase : Optional[Any] =tokenizer(sentences[0] , return_tensors='tf' ).input_ids
_lowerCamelCase : List[str] =model.generate(input_ids=lowercase_ , max_new_tokens=12 )
_lowerCamelCase : Tuple =tokenizer(sentences[1] , return_tensors='tf' ).input_ids
_lowerCamelCase : Dict =model.generate(input_ids=lowercase_ , max_new_tokens=12 )
_lowerCamelCase : str =tokenizer.batch_decode(lowercase_ , skip_special_tokens=lowercase_ )
_lowerCamelCase : str =tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowercase_ )
_lowerCamelCase : int =tokenizer.decode(output_padded[0] , skip_special_tokens=lowercase_ )
_lowerCamelCase : List[str] =[
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When left padding is applied, the sequence will be '
'a single',
'Hello, my dog is a little bit of a shy one, but he is very friendly',
]
self.assertListEqual(lowercase_ , lowercase_ )
self.assertListEqual(lowercase_ , [non_padded_sentence, padded_sentence] )
| 199 | 1 |
'''simple docstring'''
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCamelCase__( lowerCAmelCase ):
__magic_name__ : List[Any] = (IPNDMScheduler,)
__magic_name__ : Optional[Any] = (("num_inference_steps", 50),)
def a__( self : Any , **lowerCAmelCase : Tuple )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = {'''num_train_timesteps''': 1000}
config.update(**lowerCAmelCase )
return config
def a__( self : List[Any] , lowerCAmelCase : int=0 , **lowerCAmelCase : Tuple )-> Dict:
"""simple docstring"""
UpperCAmelCase = dict(self.forward_default_kwargs )
UpperCAmelCase = kwargs.pop('''num_inference_steps''' , lowerCAmelCase )
UpperCAmelCase = self.dummy_sample
UpperCAmelCase = 0.1 * sample
UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase = self.get_scheduler_config(**lowerCAmelCase )
UpperCAmelCase = scheduler_class(**lowerCAmelCase )
scheduler.set_timesteps(lowerCAmelCase )
# copy over dummy past residuals
UpperCAmelCase = dummy_past_residuals[:]
if time_step is None:
UpperCAmelCase = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCAmelCase )
UpperCAmelCase = scheduler_class.from_pretrained(lowerCAmelCase )
new_scheduler.set_timesteps(lowerCAmelCase )
# copy over dummy past residuals
UpperCAmelCase = dummy_past_residuals[:]
UpperCAmelCase = scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ).prev_sample
UpperCAmelCase = new_scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
UpperCAmelCase = scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ).prev_sample
UpperCAmelCase = new_scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def a__( self : Optional[Any] )-> Optional[int]:
"""simple docstring"""
pass
def a__( self : List[Any] , lowerCAmelCase : Dict=0 , **lowerCAmelCase : Any )-> str:
"""simple docstring"""
UpperCAmelCase = dict(self.forward_default_kwargs )
UpperCAmelCase = kwargs.pop('''num_inference_steps''' , lowerCAmelCase )
UpperCAmelCase = self.dummy_sample
UpperCAmelCase = 0.1 * sample
UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase = self.get_scheduler_config()
UpperCAmelCase = scheduler_class(**lowerCAmelCase )
scheduler.set_timesteps(lowerCAmelCase )
# copy over dummy past residuals (must be after setting timesteps)
UpperCAmelCase = dummy_past_residuals[:]
if time_step is None:
UpperCAmelCase = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCAmelCase )
UpperCAmelCase = scheduler_class.from_pretrained(lowerCAmelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowerCAmelCase )
# copy over dummy past residual (must be after setting timesteps)
UpperCAmelCase = dummy_past_residuals[:]
UpperCAmelCase = scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ).prev_sample
UpperCAmelCase = new_scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
UpperCAmelCase = scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ).prev_sample
UpperCAmelCase = new_scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def a__( self : List[str] , **lowerCAmelCase : Union[str, Any] )-> Dict:
"""simple docstring"""
UpperCAmelCase = self.scheduler_classes[0]
UpperCAmelCase = self.get_scheduler_config(**lowerCAmelCase )
UpperCAmelCase = scheduler_class(**lowerCAmelCase )
UpperCAmelCase = 10
UpperCAmelCase = self.dummy_model()
UpperCAmelCase = self.dummy_sample_deter
scheduler.set_timesteps(lowerCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
UpperCAmelCase = model(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase = scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
UpperCAmelCase = model(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase = scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ).prev_sample
return sample
def a__( self : Union[str, Any] )-> int:
"""simple docstring"""
UpperCAmelCase = dict(self.forward_default_kwargs )
UpperCAmelCase = kwargs.pop('''num_inference_steps''' , lowerCAmelCase )
for scheduler_class in self.scheduler_classes:
UpperCAmelCase = self.get_scheduler_config()
UpperCAmelCase = scheduler_class(**lowerCAmelCase )
UpperCAmelCase = self.dummy_sample
UpperCAmelCase = 0.1 * sample
if num_inference_steps is not None and hasattr(lowerCAmelCase , '''set_timesteps''' ):
scheduler.set_timesteps(lowerCAmelCase )
elif num_inference_steps is not None and not hasattr(lowerCAmelCase , '''set_timesteps''' ):
UpperCAmelCase = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
UpperCAmelCase = dummy_past_residuals[:]
UpperCAmelCase = scheduler.timesteps[5]
UpperCAmelCase = scheduler.timesteps[6]
UpperCAmelCase = scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ).prev_sample
UpperCAmelCase = scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
UpperCAmelCase = scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ).prev_sample
UpperCAmelCase = scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def a__( self : Any )-> Tuple:
"""simple docstring"""
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase , time_step=lowerCAmelCase )
def a__( self : int )-> Optional[int]:
"""simple docstring"""
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=lowerCAmelCase , time_step=lowerCAmelCase )
def a__( self : Optional[Any] )-> Any:
"""simple docstring"""
UpperCAmelCase = self.full_loop()
UpperCAmelCase = torch.mean(torch.abs(lowerCAmelCase ) )
assert abs(result_mean.item() - 2540529 ) < 10
| 91 |
'''simple docstring'''
def lowerCamelCase__ ( A : int , A : int ):
'''simple docstring'''
return int(input_a == input_a == 0 )
def lowerCamelCase__ ( ):
'''simple docstring'''
print('''Truth Table of NOR Gate:''' )
print('''| Input 1 | Input 2 | Output |''' )
print(f"""| 0 | 0 | {nor_gate(0 , 0 )} |""" )
print(f"""| 0 | 1 | {nor_gate(0 , 1 )} |""" )
print(f"""| 1 | 0 | {nor_gate(1 , 0 )} |""" )
print(f"""| 1 | 1 | {nor_gate(1 , 1 )} |""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 91 | 1 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
'''BridgeTower/bridgetower-base''': '''https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json''',
'''BridgeTower/bridgetower-base-itm-mlm''': (
'''https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json'''
),
}
class A ( __UpperCAmelCase ):
__snake_case = 'bridgetower_vision_model'
def __init__( self, UpperCamelCase__=768, UpperCamelCase__=12, UpperCamelCase__=3, UpperCamelCase__=16, UpperCamelCase__=288, UpperCamelCase__=1, UpperCamelCase__=1E-05, UpperCamelCase__=False, UpperCamelCase__=True, UpperCamelCase__=False, **UpperCamelCase__, ):
"""simple docstring"""
super().__init__(**UpperCamelCase__ )
lowerCAmelCase_ = hidden_size
lowerCAmelCase_ = num_hidden_layers
lowerCAmelCase_ = num_channels
lowerCAmelCase_ = patch_size
lowerCAmelCase_ = image_size
lowerCAmelCase_ = initializer_factor
lowerCAmelCase_ = layer_norm_eps
lowerCAmelCase_ = stop_gradient
lowerCAmelCase_ = share_layernorm
lowerCAmelCase_ = remove_last_layer
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls, UpperCamelCase__, **UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ , lowerCAmelCase_ = cls.get_config_dict(UpperCamelCase__, **UpperCamelCase__ )
if config_dict.get('''model_type''' ) == "bridgetower":
lowerCAmelCase_ = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls, '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(UpperCamelCase__, **UpperCamelCase__ )
class A ( __UpperCAmelCase ):
__snake_case = 'bridgetower_text_model'
def __init__( self, UpperCamelCase__=5_0265, UpperCamelCase__=768, UpperCamelCase__=12, UpperCamelCase__=12, UpperCamelCase__=1, UpperCamelCase__=3072, UpperCamelCase__="gelu", UpperCamelCase__=0.1, UpperCamelCase__=0.1, UpperCamelCase__=514, UpperCamelCase__=1, UpperCamelCase__=1E-05, UpperCamelCase__=1, UpperCamelCase__=0, UpperCamelCase__=2, UpperCamelCase__="absolute", UpperCamelCase__=True, **UpperCamelCase__, ):
"""simple docstring"""
super().__init__(**UpperCamelCase__ )
lowerCAmelCase_ = vocab_size
lowerCAmelCase_ = hidden_size
lowerCAmelCase_ = num_hidden_layers
lowerCAmelCase_ = num_attention_heads
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = initializer_factor
lowerCAmelCase_ = intermediate_size
lowerCAmelCase_ = hidden_dropout_prob
lowerCAmelCase_ = attention_probs_dropout_prob
lowerCAmelCase_ = max_position_embeddings
lowerCAmelCase_ = type_vocab_size
lowerCAmelCase_ = layer_norm_eps
lowerCAmelCase_ = position_embedding_type
lowerCAmelCase_ = use_cache
lowerCAmelCase_ = pad_token_id
lowerCAmelCase_ = bos_token_id
lowerCAmelCase_ = eos_token_id
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls, UpperCamelCase__, **UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ , lowerCAmelCase_ = cls.get_config_dict(UpperCamelCase__, **UpperCamelCase__ )
if config_dict.get('''model_type''' ) == "bridgetower":
lowerCAmelCase_ = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls, '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(UpperCamelCase__, **UpperCamelCase__ )
class A ( __UpperCAmelCase ):
__snake_case = 'bridgetower'
def __init__( self, UpperCamelCase__=True, UpperCamelCase__="gelu", UpperCamelCase__=768, UpperCamelCase__=1, UpperCamelCase__=1E-05, UpperCamelCase__=False, UpperCamelCase__="add", UpperCamelCase__=12, UpperCamelCase__=6, UpperCamelCase__=False, UpperCamelCase__=False, UpperCamelCase__=None, UpperCamelCase__=None, **UpperCamelCase__, ):
"""simple docstring"""
lowerCAmelCase_ = kwargs.pop('''text_config_dict''', UpperCamelCase__ )
lowerCAmelCase_ = kwargs.pop('''vision_config_dict''', UpperCamelCase__ )
super().__init__(**UpperCamelCase__ )
lowerCAmelCase_ = share_cross_modal_transformer_layers
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = hidden_size
lowerCAmelCase_ = initializer_factor
lowerCAmelCase_ = layer_norm_eps
lowerCAmelCase_ = share_link_tower_layers
lowerCAmelCase_ = link_tower_type
lowerCAmelCase_ = num_attention_heads
lowerCAmelCase_ = num_hidden_layers
lowerCAmelCase_ = tie_word_embeddings
lowerCAmelCase_ = init_layernorm_from_vision_encoder
if text_config is None:
lowerCAmelCase_ = {}
logger.info('''`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values.''' )
if vision_config is None:
lowerCAmelCase_ = {}
logger.info('''`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values.''' )
lowerCAmelCase_ = BridgeTowerTextConfig(**UpperCamelCase__ )
lowerCAmelCase_ = BridgeTowerVisionConfig(**UpperCamelCase__ )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls, UpperCamelCase__, UpperCamelCase__, **UpperCamelCase__ ):
"""simple docstring"""
return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = copy.deepcopy(self.__dict__ )
lowerCAmelCase_ = self.text_config.to_dict()
lowerCAmelCase_ = self.vision_config.to_dict()
lowerCAmelCase_ = self.__class__.model_type
return output
| 278 |
import importlib
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Union
import torch
from ..utils import BaseOutput
_A = '''scheduler_config.json'''
class A ( __UpperCAmelCase ):
__snake_case = 1
__snake_case = 2
__snake_case = 3
__snake_case = 4
__snake_case = 5
__snake_case = 6
__snake_case = 7
__snake_case = 8
__snake_case = 9
__snake_case = 10
__snake_case = 11
__snake_case = 12
__snake_case = 13
__snake_case = 14
@dataclass
class A ( __UpperCAmelCase ):
__snake_case = 42
class A :
__snake_case = SCHEDULER_CONFIG_NAME
__snake_case = []
__snake_case = True
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls, UpperCamelCase__ = None, UpperCamelCase__ = None, UpperCamelCase__=False, **UpperCamelCase__, ):
"""simple docstring"""
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = cls.load_config(
pretrained_model_name_or_path=UpperCamelCase__, subfolder=UpperCamelCase__, return_unused_kwargs=UpperCamelCase__, return_commit_hash=UpperCamelCase__, **UpperCamelCase__, )
return cls.from_config(UpperCamelCase__, return_unused_kwargs=UpperCamelCase__, **UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ = False, **UpperCamelCase__ ):
"""simple docstring"""
self.save_config(save_directory=UpperCamelCase__, push_to_hub=UpperCamelCase__, **UpperCamelCase__ )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return self._get_compatibles()
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls ):
"""simple docstring"""
lowerCAmelCase_ = list(set([cls.__name__] + cls._compatibles ) )
lowerCAmelCase_ = importlib.import_module(__name__.split('''.''' )[0] )
lowerCAmelCase_ = [
getattr(UpperCamelCase__, UpperCamelCase__ ) for c in compatible_classes_str if hasattr(UpperCamelCase__, UpperCamelCase__ )
]
return compatible_classes
| 278 | 1 |
def lowerCAmelCase_ ( __UpperCAmelCase: int = 1000 ) -> int:
UpperCamelCase__ : Tuple = 3
UpperCamelCase__ : str = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 15 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(F'''{solution() = }''')
| 247 |
def lowerCAmelCase_ ( __UpperCAmelCase: int ) -> list[int]:
if length <= 0 or not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise ValueError('''Length must be a positive integer.''' )
return [n * (2 * n - 1) for n in range(__UpperCAmelCase )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 247 | 1 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , )
@pytest.mark.usefixtures('''sm_env''' )
@parameterized_class(
[
{
'''framework''': '''pytorch''',
'''script''': '''run_glue_model_parallelism.py''',
'''model_name_or_path''': '''roberta-large''',
'''instance_type''': '''ml.p3dn.24xlarge''',
'''results''': {'''train_runtime''': 1_600, '''eval_accuracy''': 0.3, '''eval_loss''': 1.2},
},
{
'''framework''': '''pytorch''',
'''script''': '''run_glue.py''',
'''model_name_or_path''': '''roberta-large''',
'''instance_type''': '''ml.p3dn.24xlarge''',
'''results''': {'''train_runtime''': 1_600, '''eval_accuracy''': 0.3, '''eval_loss''': 1.2},
},
] )
class A ( unittest.TestCase ):
def _A (self ):
if self.framework == "pytorch":
subprocess.run(
f'cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'.split() , encoding='utf-8' , check=UpperCamelCase__ , )
assert hasattr(self , 'env' )
def _A (self , lowerCAmelCase ):
__lowercase= {
'enabled': True,
'processes_per_host': 8,
}
__lowercase= {
'enabled': True,
'parameters': {
'microbatches': 4,
'placement_strategy': 'spread',
'pipeline': 'interleaved',
'optimize': 'speed',
'partitions': 4,
'ddp': True,
},
}
__lowercase= {'smdistributed': {'modelparallel': smp_options}, 'mpi': mpi_options}
__lowercase= 'trainer' if self.script == 'run_glue.py' else 'smtrainer'
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f'{self.env.base_job_name}-{instance_count}-smp-{name_extension}' , instance_count=UpperCamelCase__ , instance_type=self.instance_type , debugger_hook_config=UpperCamelCase__ , hyperparameters={
**self.env.hyperparameters,
'model_name_or_path': self.model_name_or_path,
'max_steps': 5_0_0,
} , metric_definitions=self.env.metric_definitions , distribution=UpperCamelCase__ , py_version='py36' , )
def _A (self , lowerCAmelCase ):
TrainingJobAnalytics(UpperCamelCase__ ).export_csv(f'{self.env.test_path}/{job_name}_metrics.csv' )
@parameterized.expand([(1,)] )
def _A (self , lowerCAmelCase ):
__lowercase= self.create_estimator(UpperCamelCase__ )
# run training
estimator.fit()
# result dataframe
__lowercase= TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
__lowercase= list(result_metrics_df[result_metrics_df.metric_name == 'eval_accuracy']['value'] )
__lowercase= list(result_metrics_df[result_metrics_df.metric_name == 'eval_loss']['value'] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
__lowercase= (
Session().describe_training_job(estimator.latest_training_job.name ).get('TrainingTimeInSeconds' , 9_9_9_9_9_9 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['eval_accuracy'] for t in eval_accuracy )
assert all(t <= self.results['eval_loss'] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f'{estimator.latest_training_job.name}.json' , 'w' ) as outfile:
json.dump({'train_time': train_runtime, 'eval_accuracy': eval_accuracy, 'eval_loss': eval_loss} , UpperCamelCase__ )
| 295 |
'''simple docstring'''
_lowerCamelCase : int = "0.21.0"
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 28 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : List[str] = '''▁'''
SCREAMING_SNAKE_CASE : int = {'''vocab_file''': '''sentencepiece.bpe.model'''}
SCREAMING_SNAKE_CASE : Optional[int] = {
'''vocab_file''': {
'''facebook/mbart-large-50-one-to-many-mmt''': (
'''https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model'''
),
}
}
SCREAMING_SNAKE_CASE : str = {
'''facebook/mbart-large-50-one-to-many-mmt''': 1_0_2_4,
}
# fmt: off
SCREAMING_SNAKE_CASE : Tuple = ['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN''', '''af_ZA''', '''az_AZ''', '''bn_IN''', '''fa_IR''', '''he_IL''', '''hr_HR''', '''id_ID''', '''ka_GE''', '''km_KH''', '''mk_MK''', '''ml_IN''', '''mn_MN''', '''mr_IN''', '''pl_PL''', '''ps_AF''', '''pt_XX''', '''sv_SE''', '''sw_KE''', '''ta_IN''', '''te_IN''', '''th_TH''', '''tl_XX''', '''uk_UA''', '''ur_PK''', '''xh_ZA''', '''gl_ES''', '''sl_SI''']
class __lowerCamelCase ( __lowercase ):
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = ['input_ids', 'attention_mask']
__UpperCamelCase = []
__UpperCamelCase = []
def __init__(self , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase="</s>" , lowerCamelCase="</s>" , lowerCamelCase="<s>" , lowerCamelCase="<unk>" , lowerCamelCase="<pad>" , lowerCamelCase="<mask>" , lowerCamelCase = None , **lowerCamelCase , ):
'''simple docstring'''
_lowerCAmelCase = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else mask_token
_lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
_lowerCAmelCase = kwargs.get("""additional_special_tokens""" , [] )
kwargs["additional_special_tokens"] += [
code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=lowerCamelCase , tgt_lang=lowerCamelCase , eos_token=lowerCamelCase , unk_token=lowerCamelCase , sep_token=lowerCamelCase , cls_token=lowerCamelCase , pad_token=lowerCamelCase , mask_token=lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase , )
_lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowerCamelCase ) )
_lowerCAmelCase = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
_lowerCAmelCase = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
_lowerCAmelCase = 1
_lowerCAmelCase = len(self.sp_model )
_lowerCAmelCase = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(lowerCamelCase )
}
_lowerCAmelCase = {v: k for k, v in self.lang_code_to_id.items()}
_lowerCAmelCase = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
_lowerCAmelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
_lowerCAmelCase = src_lang if src_lang is not None else """en_XX"""
_lowerCAmelCase = self.lang_code_to_id[self._src_lang]
_lowerCAmelCase = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def A__ (self ):
'''simple docstring'''
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def A__ (self ):
'''simple docstring'''
return self._src_lang
@src_lang.setter
def A__ (self , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__(self ):
'''simple docstring'''
_lowerCAmelCase = self.__dict__.copy()
_lowerCAmelCase = None
return state
def __setstate__(self , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_lowerCAmelCase = {}
_lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = {self.convert_ids_to_tokens(lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def A__ (self , lowerCamelCase ):
'''simple docstring'''
return self.sp_model.encode(lowerCamelCase , out_type=lowerCamelCase )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_lowerCAmelCase = self.sp_model.PieceToId(lowerCamelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def A__ (self , lowerCamelCase ):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = []
_lowerCAmelCase = """"""
_lowerCAmelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowerCamelCase ) + token
_lowerCAmelCase = True
_lowerCAmelCase = []
else:
current_sub_tokens.append(lowerCamelCase )
_lowerCAmelCase = False
out_string += self.sp_model.decode(lowerCamelCase )
return out_string.strip()
def A__ (self , lowerCamelCase , lowerCamelCase = None ):
'''simple docstring'''
if not os.path.isdir(lowerCamelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
_lowerCAmelCase = os.path.join(
lowerCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCamelCase , """wb""" ) as fi:
_lowerCAmelCase = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase )
return (out_vocab_file,)
def A__ (self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase , token_ids_a=lowerCamelCase , already_has_special_tokens=lowerCamelCase )
_lowerCAmelCase = [1] * len(self.prefix_tokens )
_lowerCAmelCase = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(lowerCamelCase )) + suffix_ones
return prefix_ones + ([0] * len(lowerCamelCase )) + ([0] * len(lowerCamelCase )) + suffix_ones
def A__ (self , lowerCamelCase , lowerCamelCase = None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
_lowerCAmelCase = src_lang
_lowerCAmelCase = self(lowerCamelCase , add_special_tokens=lowerCamelCase , return_tensors=lowerCamelCase , **lowerCamelCase )
_lowerCAmelCase = self.convert_tokens_to_ids(lowerCamelCase )
_lowerCAmelCase = tgt_lang_id
return inputs
def A__ (self , lowerCamelCase , lowerCamelCase = "en_XX" , lowerCamelCase = None , lowerCamelCase = "ro_RO" , **lowerCamelCase , ):
'''simple docstring'''
_lowerCAmelCase = src_lang
_lowerCAmelCase = tgt_lang
return super().prepare_seqaseq_batch(lowerCamelCase , lowerCamelCase , **lowerCamelCase )
def A__ (self ):
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def A__ (self ):
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = self.lang_code_to_id[src_lang]
_lowerCAmelCase = [self.cur_lang_code_id]
_lowerCAmelCase = [self.eos_token_id]
def A__ (self , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = self.lang_code_to_id[tgt_lang]
_lowerCAmelCase = [self.cur_lang_code_id]
_lowerCAmelCase = [self.eos_token_id] | 356 |
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE : List[Any] = {'''configuration_focalnet''': ['''FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FocalNetConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = [
'''FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FocalNetForImageClassification''',
'''FocalNetForMaskedImageModeling''',
'''FocalNetBackbone''',
'''FocalNetModel''',
'''FocalNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 317 | 0 |
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
__UpperCamelCase : Optional[int] = logging.get_logger(__name__)
def a_ ( _A=None , _A=None ) -> List[str]:
"""simple docstring"""
return field(default_factory=lambda: default , metadata=_A )
@dataclass
class __SCREAMING_SNAKE_CASE:
_UpperCAmelCase = list_field(
default=[] , metadata={
"help": (
"Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version"
" of all available models"
)
} , )
_UpperCAmelCase = list_field(
default=[8] , metadata={"help": "List of batch sizes for which memory and time performance will be evaluated"} )
_UpperCAmelCase = list_field(
default=[8, 3_2, 1_2_8, 5_1_2] , metadata={"help": "List of sequence lengths for which memory and time performance will be evaluated"} , )
_UpperCAmelCase = field(
default=a_ , metadata={"help": "Whether to benchmark inference of model. Inference can be disabled via --no-inference."} , )
_UpperCAmelCase = field(
default=a_ , metadata={"help": "Whether to run on available cuda devices. Cuda can be disabled via --no-cuda."} , )
_UpperCAmelCase = field(
default=a_ , metadata={"help": "Whether to run on available tpu devices. TPU can be disabled via --no-tpu."} )
_UpperCAmelCase = field(default=a_ , metadata={"help": "Use FP16 to accelerate inference."} )
_UpperCAmelCase = field(default=a_ , metadata={"help": "Benchmark training of model"} )
_UpperCAmelCase = field(default=a_ , metadata={"help": "Verbose memory tracing"} )
_UpperCAmelCase = field(
default=a_ , metadata={"help": "Whether to perform speed measurements. Speed measurements can be disabled via --no-speed."} , )
_UpperCAmelCase = field(
default=a_ , metadata={
"help": "Whether to perform memory measurements. Memory measurements can be disabled via --no-memory"
} , )
_UpperCAmelCase = field(default=a_ , metadata={"help": "Trace memory line by line"} )
_UpperCAmelCase = field(default=a_ , metadata={"help": "Save result to a CSV file"} )
_UpperCAmelCase = field(default=a_ , metadata={"help": "Save all print statements in a log file"} )
_UpperCAmelCase = field(default=a_ , metadata={"help": "Whether to print environment information"} )
_UpperCAmelCase = field(
default=a_ , metadata={
"help": (
"Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use"
" multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled"
" for debugging / testing and on TPU."
)
} , )
_UpperCAmelCase = field(
default=F'''inference_time_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving time results to csv."} , )
_UpperCAmelCase = field(
default=F'''inference_memory_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving memory results to csv."} , )
_UpperCAmelCase = field(
default=F'''train_time_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving time results to csv for training."} , )
_UpperCAmelCase = field(
default=F'''train_memory_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving memory results to csv for training."} , )
_UpperCAmelCase = field(
default=F'''env_info_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving environment information."} , )
_UpperCAmelCase = field(
default=F'''log_{round(time() )}.csv''' , metadata={"help": "Log filename used if print statements are saved in log."} , )
_UpperCAmelCase = field(default=3 , metadata={"help": "Times an experiment will be run."} )
_UpperCAmelCase = field(
default=a_ , metadata={
"help": (
"Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain"
" model weights."
)
} , )
def lowerCAmelCase_ ( self: Tuple ) -> str:
warnings.warn(
F'''The class {self.__class__} is deprecated. Hugging Face Benchmarking utils'''
' are deprecated in general and it is advised to use external Benchmarking libraries '
' to benchmark Transformer models.' , UpperCamelCase , )
def lowerCAmelCase_ ( self: Any ) -> List[str]:
return json.dumps(dataclasses.asdict(self ) , indent=2 )
@property
def lowerCAmelCase_ ( self: Dict ) -> List[str]:
if len(self.models ) <= 0:
raise ValueError(
'Please make sure you provide at least one model name / model identifier, *e.g.* `--models'
' bert-base-cased` or `args.models = [\'bert-base-cased\'].' )
return self.models
@property
def lowerCAmelCase_ ( self: List[str] ) -> Optional[Any]:
if not self.multi_process:
return False
elif self.is_tpu:
logger.info('Multiprocessing is currently not possible on TPU.' )
return False
else:
return True
| 307 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
__UpperCamelCase : Any = {"""tokenization_byt5""": ["""ByT5Tokenizer"""]}
if TYPE_CHECKING:
from .tokenization_byta import ByTaTokenizer
else:
import sys
__UpperCamelCase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 307 | 1 |
import unittest
from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCAmelCase : List[Any] = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class __magic_name__ ( _snake_case , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase = XLNetTokenizer
__UpperCamelCase = XLNetTokenizerFast
__UpperCamelCase = True
__UpperCamelCase = True
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
A_ : Dict = XLNetTokenizer(UpperCamelCase__ , keep_accents=UpperCamelCase__ )
tokenizer.sanitize_special_tokens()
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE ( self :int ):
'''simple docstring'''
A_ : Tuple = "<s>"
A_ : Optional[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase__ ) , UpperCamelCase__ )
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
A_ : str = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "<eod>" )
self.assertEqual(len(UpperCamelCase__ ) , 1_006 )
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1_000 )
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
A_ : Dict = XLNetTokenizer(UpperCamelCase__ , keep_accents=UpperCamelCase__ )
A_ : Dict = tokenizer.tokenize("This is a test" )
self.assertListEqual(UpperCamelCase__ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , [285, 46, 10, 170, 382] )
A_ : int = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
UpperCamelCase__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
A_ : Tuple = tokenizer.convert_tokens_to_ids(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] )
A_ : Optional[int] = tokenizer.convert_ids_to_tokens(UpperCamelCase__ )
self.assertListEqual(
UpperCamelCase__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
def SCREAMING_SNAKE_CASE ( self :Optional[Any] ):
'''simple docstring'''
A_ : str = XLNetTokenizer(UpperCamelCase__ , do_lower_case=UpperCamelCase__ )
A_ : Dict = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
UpperCamelCase__ , [
SPIECE_UNDERLINE + "",
"i",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"se",
".",
] , )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["▁he", "ll", "o"] )
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
A_ : List[str] = XLNetTokenizer(UpperCamelCase__ , do_lower_case=UpperCamelCase__ )
A_ : Union[str, Any] = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
UpperCamelCase__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"se",
".",
] , )
@slow
def SCREAMING_SNAKE_CASE ( self :Optional[int] ):
'''simple docstring'''
A_ : str = XLNetTokenizer.from_pretrained("xlnet-base-cased" )
A_ : int = tokenizer.encode("sequence builders" , add_special_tokens=UpperCamelCase__ )
A_ : Any = tokenizer.encode("multi-sequence build" , add_special_tokens=UpperCamelCase__ )
A_ : int = tokenizer.build_inputs_with_special_tokens(UpperCamelCase__ )
A_ : Any = tokenizer.build_inputs_with_special_tokens(UpperCamelCase__ , UpperCamelCase__ )
assert encoded_sentence == text + [4, 3]
assert encoded_pair == text + [4] + text_a + [4, 3]
@slow
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
A_ : Union[str, Any] = {"input_ids": [[17, 21_442, 270, 17, 10, 14_645, 318, 34, 17, 4_546, 3_145, 787, 13, 7_752, 22_018, 23, 21, 17, 4_546, 3_145, 787, 13, 3_352, 14_431, 13, 5_500, 11, 1_176, 580, 13, 16_819, 4_797, 23, 17, 10, 17_135, 658, 19, 457, 7_932, 13, 184, 19, 3_154, 17_135, 6_468, 19, 1_404, 12_269, 19, 4_229, 5_356, 16_264, 46, 19, 17, 20_545, 10_395, 9, 9, 9, 11, 28, 6_421, 9_531, 20_729, 17, 10, 353, 17_022, 11, 21, 6_421, 9_531, 16_949, 17, 10, 11_509, 753, 11, 33, 95, 2_421, 7_385, 956, 14_431, 2_626, 25, 842, 7_385, 4_836, 21, 1_429, 2_272, 9_855, 3_120, 161, 24_738, 19, 13_203, 658, 218, 787, 21, 430, 18_482, 847, 2_637, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 322, 22_178, 27, 1_064, 22, 956, 13, 11_101, 1_429, 5_854, 24_313, 18_953, 40, 422, 24_366, 68, 1_758, 37, 10_483, 14_257, 31, 207, 263, 21, 203, 3_773, 25, 71, 9_735, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 32, 2_049, 3_442, 17, 13_894, 3_380, 23, 95, 18, 17_634, 2_288, 9, 4, 3]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase__ , model_name="xlnet-base-cased" , revision="c841166438c31ec7ca9a106dee7bb312b73ae511" , )
| 362 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase : Union[str, Any] = {
'''configuration_xmod''': [
'''XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XmodConfig''',
'''XmodOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : str = [
'''XMOD_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XmodForCausalLM''',
'''XmodForMaskedLM''',
'''XmodForMultipleChoice''',
'''XmodForQuestionAnswering''',
'''XmodForSequenceClassification''',
'''XmodForTokenClassification''',
'''XmodModel''',
'''XmodPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
_lowerCAmelCase : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 70 | 0 |
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
UpperCAmelCase_ = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
UpperCAmelCase_ = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
UpperCAmelCase_ = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1_000))
def lowerCamelCase__ ( A__ : str , A__ : str ):
'''simple docstring'''
__lowerCamelCase = len([g for position, g in enumerate(A__ ) if g == main_target[position]] )
return (item, float(A__ ))
def lowerCamelCase__ ( A__ : str , A__ : str ):
'''simple docstring'''
__lowerCamelCase = random.randint(0 , len(A__ ) - 1 )
__lowerCamelCase = parent_a[:random_slice] + parent_a[random_slice:]
__lowerCamelCase = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def lowerCamelCase__ ( A__ : str , A__ : list[str] ):
'''simple docstring'''
__lowerCamelCase = list(A__ )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
__lowerCamelCase = random.choice(A__ )
return "".join(A__ )
def lowerCamelCase__ ( A__ : tuple[str, float] , A__ : list[tuple[str, float]] , A__ : list[str] , ):
'''simple docstring'''
__lowerCamelCase = []
# Generate more children proportionally to the fitness score.
__lowerCamelCase = int(parent_a[1] * 100 ) + 1
__lowerCamelCase = 10 if child_n >= 10 else child_n
for _ in range(A__ ):
__lowerCamelCase = population_score[random.randint(0 , A__ )][0]
__lowerCamelCase, __lowerCamelCase = crossover(parent_a[0] , A__ )
# Append new string to the population list.
pop.append(mutate(A__ , A__ ) )
pop.append(mutate(A__ , A__ ) )
return pop
def lowerCamelCase__ ( A__ : str , A__ : list[str] , A__ : bool = True ):
'''simple docstring'''
if N_POPULATION < N_SELECTED:
__lowerCamelCase = f'{N_POPULATION} must be bigger than {N_SELECTED}'
raise ValueError(A__ )
# Verify that the target contains no genes besides the ones inside genes variable.
__lowerCamelCase = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
__lowerCamelCase = f'{not_in_genes_list} is not in genes list, evolution cannot converge'
raise ValueError(A__ )
# Generate random starting population.
__lowerCamelCase = []
for _ in range(A__ ):
population.append("""""".join([random.choice(A__ ) for i in range(len(A__ ) )] ) )
# Just some logs to know what the algorithms is doing.
__lowerCamelCase, __lowerCamelCase = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(A__ )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
__lowerCamelCase = [evaluate(A__ , A__ ) for item in population]
# Check if there is a matching evolution.
__lowerCamelCase = sorted(A__ , key=lambda A__ : x[1] , reverse=A__ )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
f'\nGeneration: {generation}'
f'\nTotal Population:{total_population}'
f'\nBest score: {population_score[0][1]}'
f'\nBest string: {population_score[0][0]}' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
__lowerCamelCase = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(A__ )
# Normalize population score to be between 0 and 1.
__lowerCamelCase = [
(item, score / len(A__ )) for item, score in population_score
]
# This is selection
for i in range(A__ ):
population.extend(select(population_score[int(A__ )] , A__ , A__ ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(A__ ) > N_POPULATION:
break
if __name__ == "__main__":
UpperCAmelCase_ = (
'This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!'
)
UpperCAmelCase_ = list(
' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm'
'nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\'
)
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = basic(target_str, genes_list)
print(
f"""\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}"""
)
| 12 | import requests
def UpperCAmelCase ( lowercase , lowercase ):
"""simple docstring"""
__lowercase = {'''Content-Type''': '''application/json'''}
__lowercase = requests.post(lowercase , json={'''text''': message_body} , headers=lowercase )
if response.status_code != 200:
__lowercase = (
'''Request to slack returned an error '''
F"{response.status_code}, the response is:\n{response.text}"
)
raise ValueError(lowercase )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message("""<YOUR MESSAGE BODY>""", """<SLACK CHANNEL URL>""") | 210 | 0 |
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def SCREAMING_SNAKE_CASE ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 1 / sqrt(2)) -> IIRFilter:
a = tau * frequency / samplerate
a = sin(__UpperCamelCase)
a = cos(__UpperCamelCase)
a = _sin / (2 * q_factor)
a = (1 - _cos) / 2
a = 1 - _cos
a = 1 + alpha
a = -2 * _cos
a = 1 - alpha
a = IIRFilter(2)
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba])
return filt
def SCREAMING_SNAKE_CASE ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 1 / sqrt(2)) -> IIRFilter:
a = tau * frequency / samplerate
a = sin(__UpperCamelCase)
a = cos(__UpperCamelCase)
a = _sin / (2 * q_factor)
a = (1 + _cos) / 2
a = -1 - _cos
a = 1 + alpha
a = -2 * _cos
a = 1 - alpha
a = IIRFilter(2)
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba])
return filt
def SCREAMING_SNAKE_CASE ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 1 / sqrt(2)) -> IIRFilter:
a = tau * frequency / samplerate
a = sin(__UpperCamelCase)
a = cos(__UpperCamelCase)
a = _sin / (2 * q_factor)
a = _sin / 2
a = 0
a = -ba
a = 1 + alpha
a = -2 * _cos
a = 1 - alpha
a = IIRFilter(2)
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba])
return filt
def SCREAMING_SNAKE_CASE ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 1 / sqrt(2)) -> IIRFilter:
a = tau * frequency / samplerate
a = sin(__UpperCamelCase)
a = cos(__UpperCamelCase)
a = _sin / (2 * q_factor)
a = 1 - alpha
a = -2 * _cos
a = 1 + alpha
a = IIRFilter(2)
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba])
return filt
def SCREAMING_SNAKE_CASE ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 1 / sqrt(2) , ) -> IIRFilter:
a = tau * frequency / samplerate
a = sin(__UpperCamelCase)
a = cos(__UpperCamelCase)
a = _sin / (2 * q_factor)
a = 10 ** (gain_db / 40)
a = 1 + alpha * big_a
a = -2 * _cos
a = 1 - alpha * big_a
a = 1 + alpha / big_a
a = -2 * _cos
a = 1 - alpha / big_a
a = IIRFilter(2)
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba])
return filt
def SCREAMING_SNAKE_CASE ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 1 / sqrt(2) , ) -> IIRFilter:
a = tau * frequency / samplerate
a = sin(__UpperCamelCase)
a = cos(__UpperCamelCase)
a = _sin / (2 * q_factor)
a = 10 ** (gain_db / 40)
a = (big_a + 1) - (big_a - 1) * _cos
a = (big_a + 1) + (big_a - 1) * _cos
a = (big_a - 1) - (big_a + 1) * _cos
a = (big_a - 1) + (big_a + 1) * _cos
a = 2 * sqrt(__UpperCamelCase) * alpha
a = big_a * (pmc + aaa)
a = 2 * big_a * mpc
a = big_a * (pmc - aaa)
a = ppmc + aaa
a = -2 * pmpc
a = ppmc - aaa
a = IIRFilter(2)
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba])
return filt
def SCREAMING_SNAKE_CASE ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 1 / sqrt(2) , ) -> IIRFilter:
a = tau * frequency / samplerate
a = sin(__UpperCamelCase)
a = cos(__UpperCamelCase)
a = _sin / (2 * q_factor)
a = 10 ** (gain_db / 40)
a = (big_a + 1) - (big_a - 1) * _cos
a = (big_a + 1) + (big_a - 1) * _cos
a = (big_a - 1) - (big_a + 1) * _cos
a = (big_a - 1) + (big_a + 1) * _cos
a = 2 * sqrt(__UpperCamelCase) * alpha
a = big_a * (ppmc + aaa)
a = -2 * big_a * pmpc
a = big_a * (ppmc - aaa)
a = pmc + aaa
a = 2 * mpc
a = pmc - aaa
a = IIRFilter(2)
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba])
return filt
| 356 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowercase__ : Optional[Any] = logging.get_logger(__name__)
lowercase__ : str = {
"microsoft/table-transformer-detection": (
"https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json"
),
}
class a__ ( UpperCamelCase__ ):
a : Optional[int] = """table-transformer"""
a : Tuple = ["""past_key_values"""]
a : int = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self , A=True , A=None , A=3 , A=100 , A=6 , A=2048 , A=8 , A=6 , A=2048 , A=8 , A=0.0 , A=0.0 , A=True , A="relu" , A=256 , A=0.1 , A=0.0 , A=0.0 , A=0.0_2 , A=1.0 , A=False , A="sine" , A="resnet50" , A=True , A=False , A=1 , A=5 , A=2 , A=1 , A=1 , A=5 , A=2 , A=0.1 , **A , ) -> List[str]:
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
a = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(A , A ):
a = backbone_config.get("model_type" )
a = CONFIG_MAPPING[backbone_model_type]
a = config_class.from_dict(A )
# set timm attributes to None
a , a , a = None, None, None
a = use_timm_backbone
a = backbone_config
a = num_channels
a = num_queries
a = d_model
a = encoder_ffn_dim
a = encoder_layers
a = encoder_attention_heads
a = decoder_ffn_dim
a = decoder_layers
a = decoder_attention_heads
a = dropout
a = attention_dropout
a = activation_dropout
a = activation_function
a = init_std
a = init_xavier_std
a = encoder_layerdrop
a = decoder_layerdrop
a = encoder_layers
a = auxiliary_loss
a = position_embedding_type
a = backbone
a = use_pretrained_backbone
a = dilation
# Hungarian matcher
a = class_cost
a = bbox_cost
a = giou_cost
# Loss coefficients
a = mask_loss_coefficient
a = dice_loss_coefficient
a = bbox_loss_coefficient
a = giou_loss_coefficient
a = eos_coefficient
super().__init__(is_encoder_decoder=A , **A )
@property
def lowerCAmelCase_ ( self ) -> int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def lowerCAmelCase_ ( self ) -> int:
'''simple docstring'''
return self.d_model
class a__ ( UpperCamelCase__ ):
a : Any = version.parse("""1.11""" )
@property
def lowerCAmelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def lowerCAmelCase_ ( self ) -> float:
'''simple docstring'''
return 1e-5
@property
def lowerCAmelCase_ ( self ) -> int:
'''simple docstring'''
return 12
| 180 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase__ = {
"""configuration_bert""": ["""BERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BertConfig""", """BertOnnxConfig"""],
"""tokenization_bert""": ["""BasicTokenizer""", """BertTokenizer""", """WordpieceTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ["""BertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
"""BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BertForMaskedLM""",
"""BertForMultipleChoice""",
"""BertForNextSentencePrediction""",
"""BertForPreTraining""",
"""BertForQuestionAnswering""",
"""BertForSequenceClassification""",
"""BertForTokenClassification""",
"""BertLayer""",
"""BertLMHeadModel""",
"""BertModel""",
"""BertPreTrainedModel""",
"""load_tf_weights_in_bert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
"""TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBertEmbeddings""",
"""TFBertForMaskedLM""",
"""TFBertForMultipleChoice""",
"""TFBertForNextSentencePrediction""",
"""TFBertForPreTraining""",
"""TFBertForQuestionAnswering""",
"""TFBertForSequenceClassification""",
"""TFBertForTokenClassification""",
"""TFBertLMHeadModel""",
"""TFBertMainLayer""",
"""TFBertModel""",
"""TFBertPreTrainedModel""",
]
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ["""TFBertTokenizer"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
"""FlaxBertForCausalLM""",
"""FlaxBertForMaskedLM""",
"""FlaxBertForMultipleChoice""",
"""FlaxBertForNextSentencePrediction""",
"""FlaxBertForPreTraining""",
"""FlaxBertForQuestionAnswering""",
"""FlaxBertForSequenceClassification""",
"""FlaxBertForTokenClassification""",
"""FlaxBertModel""",
"""FlaxBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_fast import BertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_tf import TFBertTokenizer
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_bert import (
FlaxBertForCausalLM,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 68 |
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase__ = get_tests_dir("""fixtures/test_sentencepiece_bpe_char.model""")
@require_sentencepiece
@require_tokenizers
class a__ ( snake_case , unittest.TestCase ):
"""simple docstring"""
__lowerCamelCase = SpeechTaTokenizer
__lowerCamelCase = False
__lowerCamelCase = True
def UpperCamelCase ( self ) -> Any:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
A__ = SpeechTaTokenizer(lowercase )
A__ = AddedToken("<mask>" , lstrip=lowercase , rstrip=lowercase )
A__ = mask_token
tokenizer.add_special_tokens({"mask_token": mask_token} )
tokenizer.add_tokens(["<ctc_blank>"] )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase ( self , lowercase ) -> Union[str, Any]:
'''simple docstring'''
A__ = "this is a test"
A__ = "this is a test"
return input_text, output_text
def UpperCamelCase ( self , lowercase , lowercase=False , lowercase=20 , lowercase=5 ) -> Optional[Any]:
'''simple docstring'''
A__ , A__ = self.get_input_output_texts(lowercase )
A__ = tokenizer.encode(lowercase , add_special_tokens=lowercase )
A__ = tokenizer.decode(lowercase , clean_up_tokenization_spaces=lowercase )
return text, ids
def UpperCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
A__ = "<pad>"
A__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase ) , lowercase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase ) , lowercase )
def UpperCamelCase ( self ) -> List[str]:
'''simple docstring'''
A__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-4] , "œ" )
self.assertEqual(vocab_keys[-2] , "<mask>" )
self.assertEqual(vocab_keys[-1] , "<ctc_blank>" )
self.assertEqual(len(lowercase ) , 81 )
def UpperCamelCase ( self ) -> Dict:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 79 )
def UpperCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
A__ = self.get_tokenizers(do_lower_case=lowercase )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
A__ = tokenizer.vocab_size
A__ = len(lowercase )
self.assertNotEqual(lowercase , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
A__ = ["aaaaa bbbbbb", "cccccccccdddddddd"]
A__ = tokenizer.add_tokens(lowercase )
A__ = tokenizer.vocab_size
A__ = len(lowercase )
self.assertNotEqual(lowercase , 0 )
self.assertEqual(lowercase , lowercase )
self.assertEqual(lowercase , len(lowercase ) )
self.assertEqual(lowercase , all_size + len(lowercase ) )
A__ = tokenizer.encode("aaaaa bbbbbb low cccccccccdddddddd l" , add_special_tokens=lowercase )
self.assertGreaterEqual(len(lowercase ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
A__ = {"eos_token": ">>>>|||<||<<|<<", "pad_token": "<<<<<|||>|>>>>|>"}
A__ = tokenizer.add_special_tokens(lowercase )
A__ = tokenizer.vocab_size
A__ = len(lowercase )
self.assertNotEqual(lowercase , 0 )
self.assertEqual(lowercase , lowercase )
self.assertEqual(lowercase , len(lowercase ) )
self.assertEqual(lowercase , all_size_a + len(lowercase ) )
A__ = tokenizer.encode(
">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l" , add_special_tokens=lowercase )
self.assertGreaterEqual(len(lowercase ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
def UpperCamelCase ( self ) -> Tuple:
'''simple docstring'''
pass
def UpperCamelCase ( self ) -> Any:
'''simple docstring'''
pass
def UpperCamelCase ( self ) -> List[Any]:
'''simple docstring'''
A__ = self.get_tokenizer()
A__ = tokenizer.tokenize("This is a test" )
# fmt: off
self.assertListEqual(lowercase , [SPIECE_UNDERLINE, "T", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "a", SPIECE_UNDERLINE, "t", "e", "s", "t"] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowercase ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , )
A__ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
lowercase , [SPIECE_UNDERLINE, "I", SPIECE_UNDERLINE, "w", "a", "s", SPIECE_UNDERLINE, "b", "o", "r", "n", SPIECE_UNDERLINE, "i", "n", SPIECE_UNDERLINE, "92000", ",", SPIECE_UNDERLINE, "a", "n", "d", SPIECE_UNDERLINE, "t", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "f", "a", "l", "s", "é", "."] )
A__ = tokenizer.convert_tokens_to_ids(lowercase )
# fmt: off
self.assertListEqual(lowercase , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] )
# fmt: on
A__ = tokenizer.convert_ids_to_tokens(lowercase )
self.assertListEqual(
lowercase , [SPIECE_UNDERLINE, "I", SPIECE_UNDERLINE, "w", "a", "s", SPIECE_UNDERLINE, "b", "o", "r", "n", SPIECE_UNDERLINE, "i", "n", SPIECE_UNDERLINE, "<unk>", ",", SPIECE_UNDERLINE, "a", "n", "d", SPIECE_UNDERLINE, "t", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "f", "a", "l", "s", "é", "."] )
@slow
def UpperCamelCase ( self ) -> int:
'''simple docstring'''
A__ = [
"Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides "
"general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural "
"Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained "
"models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.",
"BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly "
"conditioning on both left and right context in all layers.",
"The quick brown fox jumps over the lazy dog.",
]
# fmt: off
A__ = {
"input_ids": [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
"attention_mask": [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowercase , model_name="microsoft/speecht5_asr" , revision="c5ef64c71905caeccde0e4462ef3f9077224c524" , sequences=lowercase , )
| 68 | 1 |
"""simple docstring"""
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
_UpperCamelCase = logging.get_logger(__name__)
@add_end_docstrings(
snake_case , R'''
top_k (`int`, defaults to 5):
The number of predictions to return.
targets (`str` or `List[str]`, *optional*):
When passed, the model will limit the scores to the passed targets instead of looking up in the whole
vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting
token will be used (with a warning, and that might be slower).
''' , )
class lowerCamelCase__ ( snake_case ):
def _UpperCamelCase ( self ,A ):
if self.framework == "tf":
UpperCAmelCase = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
UpperCAmelCase = torch.nonzero(input_ids == self.tokenizer.mask_token_id ,as_tuple=A )
else:
raise ValueError("""Unsupported framework""" )
return masked_index
def _UpperCamelCase ( self ,A ):
UpperCAmelCase = self.get_masked_index(A )
UpperCAmelCase = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
"""fill-mask""" ,self.model.base_model_prefix ,F'''No mask_token ({self.tokenizer.mask_token}) found on the input''' ,)
def _UpperCamelCase ( self ,A ):
if isinstance(A ,A ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input["""input_ids"""][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(A )
def _UpperCamelCase ( self ,A ,A=None ,**A ):
if return_tensors is None:
UpperCAmelCase = self.framework
UpperCAmelCase = self.tokenizer(A ,return_tensors=A )
self.ensure_exactly_one_mask_token(A )
return model_inputs
def _UpperCamelCase ( self ,A ):
UpperCAmelCase = self.model(**A )
UpperCAmelCase = model_inputs["""input_ids"""]
return model_outputs
def _UpperCamelCase ( self ,A ,A=5 ,A=None ):
# Cap top_k if there are targets
if target_ids is not None and target_ids.shape[0] < top_k:
UpperCAmelCase = target_ids.shape[0]
UpperCAmelCase = model_outputs["""input_ids"""][0]
UpperCAmelCase = model_outputs["""logits"""]
if self.framework == "tf":
UpperCAmelCase = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
UpperCAmelCase = outputs.numpy()
UpperCAmelCase = outputs[0, masked_index, :]
UpperCAmelCase = stable_softmax(A ,axis=-1 )
if target_ids is not None:
UpperCAmelCase = tf.gather_nd(tf.squeeze(A ,0 ) ,target_ids.reshape(-1 ,1 ) )
UpperCAmelCase = tf.expand_dims(A ,0 )
UpperCAmelCase = tf.math.top_k(A ,k=A )
UpperCAmelCase , UpperCAmelCase = topk.values.numpy(), topk.indices.numpy()
else:
UpperCAmelCase = torch.nonzero(input_ids == self.tokenizer.mask_token_id ,as_tuple=A ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
UpperCAmelCase = outputs[0, masked_index, :]
UpperCAmelCase = logits.softmax(dim=-1 )
if target_ids is not None:
UpperCAmelCase = probs[..., target_ids]
UpperCAmelCase , UpperCAmelCase = probs.topk(A )
UpperCAmelCase = []
UpperCAmelCase = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() ,predictions.tolist() ) ):
UpperCAmelCase = []
for v, p in zip(_values ,_predictions ):
# Copy is important since we're going to modify this array in place
UpperCAmelCase = input_ids.numpy().copy()
if target_ids is not None:
UpperCAmelCase = target_ids[p].tolist()
UpperCAmelCase = p
# Filter padding out:
UpperCAmelCase = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
UpperCAmelCase = self.tokenizer.decode(A ,skip_special_tokens=A )
UpperCAmelCase = {"""score""": v, """token""": p, """token_str""": self.tokenizer.decode([p] ), """sequence""": sequence}
row.append(A )
result.append(A )
if single_mask:
return result[0]
return result
def _UpperCamelCase ( self ,A ,A=None ):
if isinstance(A ,A ):
UpperCAmelCase = [targets]
try:
UpperCAmelCase = self.tokenizer.get_vocab()
except Exception:
UpperCAmelCase = {}
UpperCAmelCase = []
for target in targets:
UpperCAmelCase = vocab.get(A ,A )
if id_ is None:
UpperCAmelCase = self.tokenizer(
A ,add_special_tokens=A ,return_attention_mask=A ,return_token_type_ids=A ,max_length=1 ,truncation=A ,)["""input_ids"""]
if len(A ) == 0:
logger.warning(
F'''The specified target token `{target}` does not exist in the model vocabulary. '''
"""We cannot replace it with anything meaningful, ignoring it""" )
continue
UpperCAmelCase = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
F'''The specified target token `{target}` does not exist in the model vocabulary. '''
F'''Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.''' )
target_ids.append(id_ )
UpperCAmelCase = list(set(A ) )
if len(A ) == 0:
raise ValueError("""At least one target must be provided when passed.""" )
UpperCAmelCase = np.array(A )
return target_ids
def _UpperCamelCase ( self ,A=None ,A=None ):
UpperCAmelCase = {}
if targets is not None:
UpperCAmelCase = self.get_target_ids(A ,A )
UpperCAmelCase = target_ids
if top_k is not None:
UpperCAmelCase = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
"""fill-mask""" ,self.model.base_model_prefix ,"""The tokenizer does not define a `mask_token`.""" )
return {}, {}, postprocess_params
def __call__( self ,A ,*A ,**A ):
UpperCAmelCase = super().__call__(A ,**A )
if isinstance(A ,A ) and len(A ) == 1:
return outputs[0]
return outputs
| 234 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCamelCase = {"""configuration_mbart""": ["""MBART_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MBartConfig""", """MBartOnnxConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = ["""MBartTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = ["""MBartTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
"""MBART_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MBartForCausalLM""",
"""MBartForConditionalGeneration""",
"""MBartForQuestionAnswering""",
"""MBartForSequenceClassification""",
"""MBartModel""",
"""MBartPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
"""TFMBartForConditionalGeneration""",
"""TFMBartModel""",
"""TFMBartPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
"""FlaxMBartForConditionalGeneration""",
"""FlaxMBartForQuestionAnswering""",
"""FlaxMBartForSequenceClassification""",
"""FlaxMBartModel""",
"""FlaxMBartPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart import MBartTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart_fast import MBartTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mbart import (
MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 234 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
lowerCAmelCase__ = {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/config.json',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/config.json',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/config.json',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/config.json',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/config.json',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/config.json',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json',
}
class lowerCAmelCase__ ( a):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = "albert"
def __init__( self , __lowerCamelCase=3_0_0_0_0 , __lowerCamelCase=1_2_8 , __lowerCamelCase=4_0_9_6 , __lowerCamelCase=1_2 , __lowerCamelCase=1 , __lowerCamelCase=6_4 , __lowerCamelCase=1_6_3_8_4 , __lowerCamelCase=1 , __lowerCamelCase="gelu_new" , __lowerCamelCase=0 , __lowerCamelCase=0 , __lowerCamelCase=5_1_2 , __lowerCamelCase=2 , __lowerCamelCase=0.0_2 , __lowerCamelCase=1e-12 , __lowerCamelCase=0.1 , __lowerCamelCase="absolute" , __lowerCamelCase=0 , __lowerCamelCase=2 , __lowerCamelCase=3 , **__lowerCamelCase , ) -> Tuple:
super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase)
_A : Optional[Any] = vocab_size
_A : int = embedding_size
_A : int = hidden_size
_A : List[Any] = num_hidden_layers
_A : Dict = num_hidden_groups
_A : Optional[int] = num_attention_heads
_A : int = inner_group_num
_A : List[Any] = hidden_act
_A : List[Any] = intermediate_size
_A : Tuple = hidden_dropout_prob
_A : Tuple = attention_probs_dropout_prob
_A : Optional[Any] = max_position_embeddings
_A : List[Any] = type_vocab_size
_A : List[str] = initializer_range
_A : List[Any] = layer_norm_eps
_A : str = classifier_dropout_prob
_A : Optional[Any] = position_embedding_type
class lowerCAmelCase__ ( a):
'''simple docstring'''
@property
def _lowerCamelCase ( self) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_A : int = {0: "batch", 1: "choice", 2: "sequence"}
else:
_A : int = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
])
| 11 |
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class lowerCAmelCase__ ( unittest.TestCase):
'''simple docstring'''
def _lowerCamelCase ( self , __lowerCamelCase) -> Dict:
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["bs"] , model_result["ss"]):
_A : Optional[int] = model_result["result"][batch_size][sequence_length]
self.assertIsNotNone(__lowerCamelCase)
def _lowerCamelCase ( self) -> int:
_A : Optional[int] = "sshleifer/tiny-gpt2"
_A : int = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : List[str] = PyTorchBenchmark(__lowerCamelCase)
_A : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def _lowerCamelCase ( self) -> Dict:
_A : int = "sgugger/tiny-distilbert-classification"
_A : str = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , only_pretrain_model=__lowerCamelCase , )
_A : Dict = PyTorchBenchmark(__lowerCamelCase)
_A : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def _lowerCamelCase ( self) -> Optional[Any]:
_A : Tuple = "sshleifer/tiny-gpt2"
_A : Optional[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , torchscript=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : Union[str, Any] = PyTorchBenchmark(__lowerCamelCase)
_A : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
@unittest.skipIf(torch_device == "cpu" , "Cant do half precision")
def _lowerCamelCase ( self) -> int:
_A : Any = "sshleifer/tiny-gpt2"
_A : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , fpaa=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : Any = PyTorchBenchmark(__lowerCamelCase)
_A : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def _lowerCamelCase ( self) -> Any:
_A : Union[str, Any] = "sshleifer/tiny-gpt2"
_A : Any = AutoConfig.from_pretrained(__lowerCamelCase)
# set architectures equal to `None`
_A : Dict = None
_A : Any = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : Union[str, Any] = PyTorchBenchmark(__lowerCamelCase , configs=[config])
_A : int = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def _lowerCamelCase ( self) -> int:
_A : List[Any] = "sshleifer/tiny-gpt2"
_A : int = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : Optional[Any] = PyTorchBenchmark(__lowerCamelCase)
_A : int = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
@unittest.skipIf(torch_device == "cpu" , "Can't do half precision")
def _lowerCamelCase ( self) -> Optional[Any]:
_A : Any = "sshleifer/tiny-gpt2"
_A : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , fpaa=__lowerCamelCase , multi_process=__lowerCamelCase , )
_A : List[Any] = PyTorchBenchmark(__lowerCamelCase)
_A : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def _lowerCamelCase ( self) -> str:
_A : List[str] = "sshleifer/tiny-gpt2"
_A : Union[str, Any] = AutoConfig.from_pretrained(__lowerCamelCase)
_A : Any = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : Optional[Any] = PyTorchBenchmark(__lowerCamelCase , configs=[config])
_A : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def _lowerCamelCase ( self) -> int:
_A : Tuple = "sshleifer/tinier_bart"
_A : Optional[Any] = AutoConfig.from_pretrained(__lowerCamelCase)
_A : Optional[int] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : Dict = PyTorchBenchmark(__lowerCamelCase , configs=[config])
_A : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def _lowerCamelCase ( self) -> str:
_A : List[Any] = "sshleifer/tiny-gpt2"
_A : Optional[Any] = AutoConfig.from_pretrained(__lowerCamelCase)
_A : Optional[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : List[str] = PyTorchBenchmark(__lowerCamelCase , configs=[config])
_A : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def _lowerCamelCase ( self) -> int:
_A : int = "sshleifer/tinier_bart"
_A : str = AutoConfig.from_pretrained(__lowerCamelCase)
_A : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : Tuple = PyTorchBenchmark(__lowerCamelCase , configs=[config])
_A : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def _lowerCamelCase ( self) -> Dict:
_A : List[str] = "sshleifer/tiny-gpt2"
with tempfile.TemporaryDirectory() as tmp_dir:
_A : Optional[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , save_to_csv=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(__lowerCamelCase , "inf_time.csv") , train_memory_csv_file=os.path.join(__lowerCamelCase , "train_mem.csv") , inference_memory_csv_file=os.path.join(__lowerCamelCase , "inf_mem.csv") , train_time_csv_file=os.path.join(__lowerCamelCase , "train_time.csv") , env_info_csv_file=os.path.join(__lowerCamelCase , "env.csv") , multi_process=__lowerCamelCase , )
_A : Tuple = PyTorchBenchmark(__lowerCamelCase)
benchmark.run()
self.assertTrue(Path(os.path.join(__lowerCamelCase , "inf_time.csv")).exists())
self.assertTrue(Path(os.path.join(__lowerCamelCase , "train_time.csv")).exists())
self.assertTrue(Path(os.path.join(__lowerCamelCase , "inf_mem.csv")).exists())
self.assertTrue(Path(os.path.join(__lowerCamelCase , "train_mem.csv")).exists())
self.assertTrue(Path(os.path.join(__lowerCamelCase , "env.csv")).exists())
def _lowerCamelCase ( self) -> int:
_A : Dict = "sshleifer/tiny-gpt2"
def _check_summary_is_not_empty(__lowerCamelCase):
self.assertTrue(hasattr(__lowerCamelCase , "sequential"))
self.assertTrue(hasattr(__lowerCamelCase , "cumulative"))
self.assertTrue(hasattr(__lowerCamelCase , "current"))
self.assertTrue(hasattr(__lowerCamelCase , "total"))
with tempfile.TemporaryDirectory() as tmp_dir:
_A : Union[str, Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(__lowerCamelCase , "log.txt") , log_print=__lowerCamelCase , trace_memory_line_by_line=__lowerCamelCase , multi_process=__lowerCamelCase , )
_A : Optional[int] = PyTorchBenchmark(__lowerCamelCase)
_A : Dict = benchmark.run()
_check_summary_is_not_empty(result.inference_summary)
_check_summary_is_not_empty(result.train_summary)
self.assertTrue(Path(os.path.join(__lowerCamelCase , "log.txt")).exists())
| 11 | 1 |
'''simple docstring'''
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version(">=", FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
_SCREAMING_SNAKE_CASE : Tuple = get_logger(__name__)
def UpperCamelCase_( snake_case : Optional[Any] , snake_case : Optional[Any] , snake_case : Optional[Any] , snake_case : int , snake_case : List[Any]=0 ):
'''simple docstring'''
os.makedirs(snake_case , exist_ok=snake_case )
with FSDP.state_dict_type(
snake_case , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
snake_case_ = model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
snake_case_ = f'{MODEL_NAME}.bin' if model_index == 0 else f'{MODEL_NAME}_{model_index}.bin'
snake_case_ = os.path.join(snake_case , snake_case )
if accelerator.process_index == 0:
logger.info(f'Saving model to {output_model_file}' )
torch.save(snake_case , snake_case )
logger.info(f'Model saved to {output_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
snake_case_ = (
f'{MODEL_NAME}_rank{accelerator.process_index}.bin'
if model_index == 0
else f'{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'
)
snake_case_ = os.path.join(snake_case , snake_case )
logger.info(f'Saving model to {output_model_file}' )
torch.save(snake_case , snake_case )
logger.info(f'Model saved to {output_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
snake_case_ = os.path.join(snake_case , f'{MODEL_NAME}_{model_index}' )
os.makedirs(snake_case , exist_ok=snake_case )
logger.info(f'Saving model to {ckpt_dir}' )
snake_case_ = {"model": state_dict}
dist_cp.save_state_dict(
state_dict=snake_case , storage_writer=dist_cp.FileSystemWriter(snake_case ) , planner=DefaultSavePlanner() , )
logger.info(f'Model saved to {ckpt_dir}' )
def UpperCamelCase_( snake_case : Optional[int] , snake_case : Optional[Any] , snake_case : int , snake_case : Union[str, Any] , snake_case : Any=0 ):
'''simple docstring'''
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
snake_case , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(snake_case ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
"Set the `sync_module_states` flag to `True` so that model states are synced across processes when "
"initializing FSDP object" )
return
snake_case_ = f'{MODEL_NAME}.bin' if model_index == 0 else f'{MODEL_NAME}_{model_index}.bin'
snake_case_ = os.path.join(snake_case , snake_case )
logger.info(f'Loading model from {input_model_file}' )
snake_case_ = torch.load(snake_case )
logger.info(f'Model loaded from {input_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
snake_case_ = (
f'{MODEL_NAME}_rank{accelerator.process_index}.bin'
if model_index == 0
else f'{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'
)
snake_case_ = os.path.join(snake_case , snake_case )
logger.info(f'Loading model from {input_model_file}' )
snake_case_ = torch.load(snake_case )
logger.info(f'Model loaded from {input_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
snake_case_ = (
os.path.join(snake_case , f'{MODEL_NAME}_{model_index}' )
if f'{MODEL_NAME}' not in input_dir
else input_dir
)
logger.info(f'Loading model from {ckpt_dir}' )
snake_case_ = {"model": model.state_dict()}
dist_cp.load_state_dict(
state_dict=snake_case , storage_reader=dist_cp.FileSystemReader(snake_case ) , planner=DefaultLoadPlanner() , )
snake_case_ = state_dict["model"]
logger.info(f'Model loaded from {ckpt_dir}' )
model.load_state_dict(snake_case )
def UpperCamelCase_( snake_case : str , snake_case : List[str] , snake_case : Any , snake_case : Tuple , snake_case : Optional[Any] , snake_case : Tuple=0 ):
'''simple docstring'''
os.makedirs(snake_case , exist_ok=snake_case )
with FSDP.state_dict_type(
snake_case , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
snake_case_ = FSDP.optim_state_dict(snake_case , snake_case )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
snake_case_ = (
f'{OPTIMIZER_NAME}.bin' if optimizer_index == 0 else f'{OPTIMIZER_NAME}_{optimizer_index}.bin'
)
snake_case_ = os.path.join(snake_case , snake_case )
logger.info(f'Saving Optimizer state to {output_optimizer_file}' )
torch.save(snake_case , snake_case )
logger.info(f'Optimizer state saved in {output_optimizer_file}' )
else:
snake_case_ = os.path.join(snake_case , f'{OPTIMIZER_NAME}_{optimizer_index}' )
os.makedirs(snake_case , exist_ok=snake_case )
logger.info(f'Saving Optimizer state to {ckpt_dir}' )
dist_cp.save_state_dict(
state_dict={"optimizer": optim_state} , storage_writer=dist_cp.FileSystemWriter(snake_case ) , planner=DefaultSavePlanner() , )
logger.info(f'Optimizer state saved in {ckpt_dir}' )
def UpperCamelCase_( snake_case : Optional[Any] , snake_case : List[str] , snake_case : Union[str, Any] , snake_case : int , snake_case : Optional[int] , snake_case : Union[str, Any]=0 ):
'''simple docstring'''
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
snake_case , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
snake_case_ = None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
snake_case_ = (
f'{OPTIMIZER_NAME}.bin' if optimizer_index == 0 else f'{OPTIMIZER_NAME}_{optimizer_index}.bin'
)
snake_case_ = os.path.join(snake_case , snake_case )
logger.info(f'Loading Optimizer state from {input_optimizer_file}' )
snake_case_ = torch.load(snake_case )
logger.info(f'Optimizer state loaded from {input_optimizer_file}' )
else:
snake_case_ = (
os.path.join(snake_case , f'{OPTIMIZER_NAME}_{optimizer_index}' )
if f'{OPTIMIZER_NAME}' not in input_dir
else input_dir
)
logger.info(f'Loading Optimizer from {ckpt_dir}' )
snake_case_ = load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict() , optimizer_key="optimizer" , storage_reader=dist_cp.FileSystemReader(snake_case ) , )
snake_case_ = optim_state["optimizer"]
logger.info(f'Optimizer loaded from {ckpt_dir}' )
snake_case_ = FSDP.optim_state_dict_to_load(snake_case , snake_case , snake_case )
optimizer.load_state_dict(snake_case )
| 351 |
'''simple docstring'''
import argparse
from collections import defaultdict
import yaml
_SCREAMING_SNAKE_CASE : Optional[Any] = "docs/source/en/_toctree.yml"
def UpperCamelCase_( snake_case : Optional[Any] ):
'''simple docstring'''
snake_case_ = defaultdict(snake_case )
for doc in model_doc:
counts[doc["local"]] += 1
snake_case_ = [key for key, value in counts.items() if value > 1]
snake_case_ = []
for duplicate_key in duplicates:
snake_case_ = list({doc["title"] for doc in model_doc if doc["local"] == duplicate_key} )
if len(snake_case ) > 1:
raise ValueError(
f'{duplicate_key} is present several times in the documentation table of content at '
"`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the "
"others." )
# Only add this once
new_doc.append({"local": duplicate_key, "title": titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc["local"]] == 1] )
# Sort
return sorted(snake_case , key=lambda snake_case : s["title"].lower() )
def UpperCamelCase_( snake_case : Optional[int]=False ):
'''simple docstring'''
with open(snake_case , encoding="utf-8" ) as f:
snake_case_ = yaml.safe_load(f.read() )
# Get to the API doc
snake_case_ = 0
while content[api_idx]["title"] != "API":
api_idx += 1
snake_case_ = content[api_idx]["sections"]
# Then to the model doc
snake_case_ = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
snake_case_ = api_doc[model_idx]["sections"]
snake_case_ = [(idx, section) for idx, section in enumerate(snake_case ) if "sections" in section]
snake_case_ = False
for idx, modality_doc in modalities_docs:
snake_case_ = modality_doc["sections"]
snake_case_ = clean_model_doc_toc(snake_case )
if old_modality_doc != new_modality_doc:
snake_case_ = True
if overwrite:
snake_case_ = new_modality_doc
if diff:
if overwrite:
snake_case_ = model_doc
snake_case_ = api_doc
with open(snake_case , "w" , encoding="utf-8" ) as f:
f.write(yaml.dump(snake_case , allow_unicode=snake_case ) )
else:
raise ValueError(
"The model doc part of the table of content is not properly sorted, run `make style` to fix this." )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : List[str] = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
_SCREAMING_SNAKE_CASE : Any = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 92 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ : List[Any] = logging.get_logger(__name__)
A__ : List[str] = {
"""google/switch-base-8""": """https://huggingface.co/google/switch-base-8/blob/main/config.json""",
}
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase : List[str] = 'switch_transformers'
lowerCamelCase : Union[str, Any] = ['past_key_values']
lowerCamelCase : int = {'hidden_size': 'd_model', 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'}
def __init__( self , SCREAMING_SNAKE_CASE_=3_21_28 , SCREAMING_SNAKE_CASE_=7_68 , SCREAMING_SNAKE_CASE_=64 , SCREAMING_SNAKE_CASE_=20_48 , SCREAMING_SNAKE_CASE_=64 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=8 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=0.0_1 , SCREAMING_SNAKE_CASE_="float32" , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=1_28 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=1E-6 , SCREAMING_SNAKE_CASE_=0.0_0_1 , SCREAMING_SNAKE_CASE_=0.0_0_1 , SCREAMING_SNAKE_CASE_=1.0 , SCREAMING_SNAKE_CASE_="relu" , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=1 , **SCREAMING_SNAKE_CASE_ , ) -> int:
__lowerCamelCase : str = vocab_size
__lowerCamelCase : Dict = d_model
__lowerCamelCase : List[str] = d_kv
__lowerCamelCase : Dict = d_ff
__lowerCamelCase : Union[str, Any] = num_sparse_encoder_layers
__lowerCamelCase : Optional[int] = num_layers
__lowerCamelCase : List[str] = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
__lowerCamelCase : Dict = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
__lowerCamelCase : List[Any] = self.num_layers // self.num_sparse_encoder_layers
else:
__lowerCamelCase : List[str] = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
__lowerCamelCase : Union[str, Any] = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
__lowerCamelCase : Optional[int] = self.num_decoder_layers # HACK: this will create 0 sparse layers
__lowerCamelCase : str = num_heads
__lowerCamelCase : Tuple = num_experts
__lowerCamelCase : str = expert_capacity
__lowerCamelCase : Optional[Any] = router_bias
__lowerCamelCase : int = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f'`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}' )
__lowerCamelCase : int = router_dtype
__lowerCamelCase : int = router_ignore_padding_tokens
__lowerCamelCase : Tuple = relative_attention_num_buckets
__lowerCamelCase : Dict = relative_attention_max_distance
__lowerCamelCase : Optional[Any] = dropout_rate
__lowerCamelCase : List[str] = layer_norm_epsilon
__lowerCamelCase : List[str] = initializer_factor
__lowerCamelCase : Any = feed_forward_proj
__lowerCamelCase : Any = use_cache
__lowerCamelCase : Tuple = add_router_probs
__lowerCamelCase : int = router_z_loss_coef
__lowerCamelCase : Optional[int] = router_aux_loss_coef
__lowerCamelCase : str = self.feed_forward_proj.split('-' )
__lowerCamelCase : Optional[int] = act_info[-1]
__lowerCamelCase : Any = act_info[0] == 'gated'
if len(SCREAMING_SNAKE_CASE_ ) > 1 and act_info[0] != "gated" or len(SCREAMING_SNAKE_CASE_ ) > 2:
raise ValueError(
f'`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'
'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '
'\'gated-gelu\' or \'relu\'' )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
__lowerCamelCase : List[str] = 'gelu_new'
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , is_encoder_decoder=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
| 185 |
'''simple docstring'''
from numpy import exp, pi, sqrt
def UpperCAmelCase__ ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : float = 0.0 , UpperCAmelCase_ : float = 1.0 ) -> int:
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 185 | 1 |
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _snake_case ( unittest.TestCase ):
@property
def SCREAMING_SNAKE_CASE__ ( self ):
torch.manual_seed(0 )
a :Union[str, Any] = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
return model
def SCREAMING_SNAKE_CASE__ ( self ):
a :Any = self.dummy_uncond_unet
a :Tuple = PNDMScheduler()
a :Dict = PNDMPipeline(unet=_lowerCamelCase , scheduler=_lowerCamelCase )
pndm.to(_lowerCamelCase )
pndm.set_progress_bar_config(disable=_lowerCamelCase )
a :Dict = torch.manual_seed(0 )
a :List[Any] = pndm(generator=_lowerCamelCase , num_inference_steps=20 , output_type='''numpy''' ).images
a :str = torch.manual_seed(0 )
a :Optional[int] = pndm(generator=_lowerCamelCase , num_inference_steps=20 , output_type='''numpy''' , return_dict=_lowerCamelCase )[0]
a :Optional[Any] = image[0, -3:, -3:, -1]
a :int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
a :Tuple = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class _snake_case ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self ):
a :str = '''google/ddpm-cifar10-32'''
a :int = UNetaDModel.from_pretrained(_lowerCamelCase )
a :Optional[int] = PNDMScheduler()
a :str = PNDMPipeline(unet=_lowerCamelCase , scheduler=_lowerCamelCase )
pndm.to(_lowerCamelCase )
pndm.set_progress_bar_config(disable=_lowerCamelCase )
a :Any = torch.manual_seed(0 )
a :List[Any] = pndm(generator=_lowerCamelCase , output_type='''numpy''' ).images
a :Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
a :Dict = np.array([0.1564, 0.1_4645, 0.1406, 0.1_4715, 0.1_2425, 0.1_4045, 0.1_3115, 0.1_2175, 0.125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 350 |
def __lowerCamelCase ( UpperCAmelCase_ : str ):
"""simple docstring"""
if n_term == "":
return []
a :list = []
for temp in range(int(UpperCAmelCase_ ) ):
series.append(F'''1/{temp + 1}''' if series else '''1''' )
return series
if __name__ == "__main__":
snake_case : Tuple = input('''Enter the last number (nth term) of the Harmonic Series''')
print('''Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n''')
print(harmonic_series(nth_term))
| 281 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase : int = {
'configuration_roberta': ['ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RobertaConfig', 'RobertaOnnxConfig'],
'tokenization_roberta': ['RobertaTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Optional[int] = ['RobertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Union[str, Any] = [
'ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'RobertaForCausalLM',
'RobertaForMaskedLM',
'RobertaForMultipleChoice',
'RobertaForQuestionAnswering',
'RobertaForSequenceClassification',
'RobertaForTokenClassification',
'RobertaModel',
'RobertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : List[Any] = [
'TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRobertaForCausalLM',
'TFRobertaForMaskedLM',
'TFRobertaForMultipleChoice',
'TFRobertaForQuestionAnswering',
'TFRobertaForSequenceClassification',
'TFRobertaForTokenClassification',
'TFRobertaMainLayer',
'TFRobertaModel',
'TFRobertaPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Optional[int] = [
'FlaxRobertaForCausalLM',
'FlaxRobertaForMaskedLM',
'FlaxRobertaForMultipleChoice',
'FlaxRobertaForQuestionAnswering',
'FlaxRobertaForSequenceClassification',
'FlaxRobertaForTokenClassification',
'FlaxRobertaModel',
'FlaxRobertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
lowercase : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 232 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
from transformers import BatchEncoding, CanineTokenizer
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.tokenization_utils import AddedToken
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
class __SCREAMING_SNAKE_CASE ( lowerCamelCase , unittest.TestCase ):
snake_case_ = CanineTokenizer
snake_case_ = False
def __magic_name__ ( self : Any ) -> List[Any]:
super().setUp()
SCREAMING_SNAKE_CASE__ : int =CanineTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __magic_name__ ( self : Optional[int] ) -> List[str]:
return CanineTokenizer.from_pretrained('''google/canine-s''' )
def __magic_name__ ( self : Optional[int] , **__lowercase : int ) -> CanineTokenizer:
SCREAMING_SNAKE_CASE__ : int =self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowercase )
SCREAMING_SNAKE_CASE__ : Tuple =10_24
return tokenizer
@require_torch
def __magic_name__ ( self : List[Any] ) -> List[str]:
SCREAMING_SNAKE_CASE__ : Optional[int] =self.canine_tokenizer
SCREAMING_SNAKE_CASE__ : Union[str, Any] =['''Life is like a box of chocolates.''', '''You never know what you\'re gonna get.''']
# fmt: off
SCREAMING_SNAKE_CASE__ : List[Any] =[5_73_44, 76, 1_05, 1_02, 1_01, 32, 1_05, 1_15, 32, 1_08, 1_05, 1_07, 1_01, 32, 97, 32, 98, 1_11, 1_20, 32, 1_11, 1_02, 32, 99, 1_04, 1_11, 99, 1_11, 1_08, 97, 1_16, 1_01, 1_15, 46, 5_73_45, 0, 0, 0, 0]
# fmt: on
SCREAMING_SNAKE_CASE__ : Union[str, Any] =tokenizer(__lowercase , padding=__lowercase , return_tensors='''pt''' )
self.assertIsInstance(__lowercase , __lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] =list(batch.input_ids.numpy()[0] )
self.assertListEqual(__lowercase , __lowercase )
self.assertEqual((2, 39) , batch.input_ids.shape )
self.assertEqual((2, 39) , batch.attention_mask.shape )
@require_torch
def __magic_name__ ( self : Any ) -> List[str]:
SCREAMING_SNAKE_CASE__ : Dict =self.canine_tokenizer
SCREAMING_SNAKE_CASE__ : str =['''Once there was a man.''', '''He wrote a test in HuggingFace Tranformers.''']
SCREAMING_SNAKE_CASE__ : List[Any] =tokenizer(__lowercase , padding=__lowercase , return_tensors='''pt''' )
# check if input_ids, attention_mask and token_type_ids are returned
self.assertIn('''input_ids''' , __lowercase )
self.assertIn('''attention_mask''' , __lowercase )
self.assertIn('''token_type_ids''' , __lowercase )
@require_torch
def __magic_name__ ( self : Dict ) -> List[str]:
SCREAMING_SNAKE_CASE__ : List[str] =self.canine_tokenizer
SCREAMING_SNAKE_CASE__ : Dict =[
'''What\'s the weater?''',
'''It\'s about 25 degrees.''',
]
SCREAMING_SNAKE_CASE__ : int =tokenizer(
text_target=__lowercase , max_length=32 , padding='''max_length''' , truncation=__lowercase , return_tensors='''pt''' )
self.assertEqual(32 , targets['''input_ids'''].shape[1] )
def __magic_name__ ( self : List[str] ) -> Any:
# safety check on max_len default value so we are sure the test works
SCREAMING_SNAKE_CASE__ : str =self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
SCREAMING_SNAKE_CASE__ : int =self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
SCREAMING_SNAKE_CASE__ : List[str] =tempfile.mkdtemp()
SCREAMING_SNAKE_CASE__ : Dict =''' He is very happy, UNwant\u00E9d,running'''
SCREAMING_SNAKE_CASE__ : List[Any] =tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
tokenizer.save_pretrained(__lowercase )
SCREAMING_SNAKE_CASE__ : Tuple =tokenizer.__class__.from_pretrained(__lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] =after_tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
self.assertListEqual(__lowercase , __lowercase )
shutil.rmtree(__lowercase )
SCREAMING_SNAKE_CASE__ : Any =self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
SCREAMING_SNAKE_CASE__ : Union[str, Any] =tempfile.mkdtemp()
SCREAMING_SNAKE_CASE__ : Union[str, Any] =''' He is very happy, UNwant\u00E9d,running'''
SCREAMING_SNAKE_CASE__ : Tuple =tokenizer.additional_special_tokens
# We can add a new special token for Canine as follows:
SCREAMING_SNAKE_CASE__ : str =chr(0xE007 )
additional_special_tokens.append(__lowercase )
tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens} )
SCREAMING_SNAKE_CASE__ : List[Any] =tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
tokenizer.save_pretrained(__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] =tokenizer.__class__.from_pretrained(__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] =after_tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
self.assertListEqual(__lowercase , __lowercase )
self.assertIn(__lowercase , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
SCREAMING_SNAKE_CASE__ : List[str] =tokenizer.__class__.from_pretrained(__lowercase , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(__lowercase )
def __magic_name__ ( self : Optional[int] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : Tuple =self.get_tokenizers(do_lower_case=__lowercase )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] =self.get_clean_sequence(__lowercase )
# a special token for Canine can be defined as follows:
SCREAMING_SNAKE_CASE__ : Optional[int] =0xE005
SCREAMING_SNAKE_CASE__ : Any =chr(__lowercase )
tokenizer.add_special_tokens({'''cls_token''': special_token} )
SCREAMING_SNAKE_CASE__ : Tuple =tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
self.assertEqual(len(__lowercase ) , 1 )
SCREAMING_SNAKE_CASE__ : str =tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=__lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] =tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] =tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] =tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
self.assertEqual(__lowercase , input_encoded + special_token_id )
SCREAMING_SNAKE_CASE__ : Any =tokenizer.decode(__lowercase , skip_special_tokens=__lowercase )
self.assertTrue(special_token not in decoded )
def __magic_name__ ( self : int ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : Tuple =self.get_tokenizers(do_lower_case=__lowercase )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
SCREAMING_SNAKE_CASE__ : Tuple =chr(0xE005 )
SCREAMING_SNAKE_CASE__ : List[Any] =chr(0xE006 )
# `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py)
tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=__lowercase )
# `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`,
# which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py)
tokenizer.add_special_tokens({'''additional_special_tokens''': [SPECIAL_TOKEN_2]} )
SCREAMING_SNAKE_CASE__ : Optional[int] =tokenizer.tokenize(__lowercase )
SCREAMING_SNAKE_CASE__ : Any =tokenizer.tokenize(__lowercase )
self.assertEqual(len(__lowercase ) , 1 )
self.assertEqual(len(__lowercase ) , 1 )
self.assertEqual(token_a[0] , __lowercase )
self.assertEqual(token_a[0] , __lowercase )
@require_tokenizers
def __magic_name__ ( self : List[Any] ) -> Dict:
SCREAMING_SNAKE_CASE__ : Tuple =self.get_tokenizers(do_lower_case=__lowercase )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
# a special token for Canine can be defined as follows:
SCREAMING_SNAKE_CASE__ : str =0xE006
SCREAMING_SNAKE_CASE__ : int =chr(__lowercase )
SCREAMING_SNAKE_CASE__ : Dict =AddedToken(__lowercase , lstrip=__lowercase )
tokenizer.add_special_tokens({'''additional_special_tokens''': [new_token]} )
with tempfile.TemporaryDirectory() as tmp_dir_name:
tokenizer.save_pretrained(__lowercase )
tokenizer.from_pretrained(__lowercase )
def __magic_name__ ( self : Optional[int] ) -> int:
SCREAMING_SNAKE_CASE__ : int =[]
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(__lowercase )
with open(os.path.join(__lowercase , '''special_tokens_map.json''' ) , encoding='''utf-8''' ) as json_file:
SCREAMING_SNAKE_CASE__ : List[Any] =json.load(__lowercase )
with open(os.path.join(__lowercase , '''tokenizer_config.json''' ) , encoding='''utf-8''' ) as json_file:
SCREAMING_SNAKE_CASE__ : Dict =json.load(__lowercase )
# a special token for Canine can be defined as follows:
SCREAMING_SNAKE_CASE__ : Optional[Any] =0xE006
SCREAMING_SNAKE_CASE__ : Dict =chr(__lowercase )
SCREAMING_SNAKE_CASE__ : str =[new_token_a]
SCREAMING_SNAKE_CASE__ : Optional[Any] =[new_token_a]
with open(os.path.join(__lowercase , '''special_tokens_map.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(__lowercase , __lowercase )
with open(os.path.join(__lowercase , '''tokenizer_config.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(__lowercase , __lowercase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
SCREAMING_SNAKE_CASE__ : Optional[int] =tokenizer_class.from_pretrained(__lowercase , extra_ids=0 )
self.assertIn(__lowercase , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , )
SCREAMING_SNAKE_CASE__ : str =0xE007
SCREAMING_SNAKE_CASE__ : Optional[int] =chr(__lowercase )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
SCREAMING_SNAKE_CASE__ : Tuple =[AddedToken(__lowercase , lstrip=__lowercase )]
SCREAMING_SNAKE_CASE__ : Any =tokenizer_class.from_pretrained(
__lowercase , additional_special_tokens=__lowercase , extra_ids=0 )
self.assertIn(__lowercase , tokenizer.additional_special_tokens )
# self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) )
@require_tokenizers
def __magic_name__ ( self : Dict ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : int =self.get_tokenizers(do_lower_case=__lowercase )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
SCREAMING_SNAKE_CASE__ : List[str] ='''hello world'''
if self.space_between_special_tokens:
SCREAMING_SNAKE_CASE__ : str ='''[CLS] hello world [SEP]'''
else:
SCREAMING_SNAKE_CASE__ : int =input
SCREAMING_SNAKE_CASE__ : Optional[int] =tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
SCREAMING_SNAKE_CASE__ : str =tokenizer.decode(__lowercase , spaces_between_special_tokens=self.space_between_special_tokens )
self.assertIn(__lowercase , [output, output.lower()] )
def __magic_name__ ( self : str ) -> Dict:
SCREAMING_SNAKE_CASE__ : Dict =self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
SCREAMING_SNAKE_CASE__ : str =[
'''bos_token''',
'''eos_token''',
'''unk_token''',
'''sep_token''',
'''pad_token''',
'''cls_token''',
'''mask_token''',
]
SCREAMING_SNAKE_CASE__ : Tuple ='''a'''
SCREAMING_SNAKE_CASE__ : Tuple =ord(__lowercase )
for attr in attributes_list:
setattr(__lowercase , attr + '''_id''' , __lowercase )
self.assertEqual(getattr(__lowercase , __lowercase ) , __lowercase )
self.assertEqual(getattr(__lowercase , attr + '''_id''' ) , __lowercase )
setattr(__lowercase , attr + '''_id''' , __lowercase )
self.assertEqual(getattr(__lowercase , __lowercase ) , __lowercase )
self.assertEqual(getattr(__lowercase , attr + '''_id''' ) , __lowercase )
setattr(__lowercase , '''additional_special_tokens_ids''' , [] )
self.assertListEqual(getattr(__lowercase , '''additional_special_tokens''' ) , [] )
self.assertListEqual(getattr(__lowercase , '''additional_special_tokens_ids''' ) , [] )
SCREAMING_SNAKE_CASE__ : str =0xE006
SCREAMING_SNAKE_CASE__ : List[str] =chr(__lowercase )
setattr(__lowercase , '''additional_special_tokens_ids''' , [additional_special_token_id] )
self.assertListEqual(getattr(__lowercase , '''additional_special_tokens''' ) , [additional_special_token] )
self.assertListEqual(getattr(__lowercase , '''additional_special_tokens_ids''' ) , [additional_special_token_id] )
def __magic_name__ ( self : str ) -> Dict:
pass
def __magic_name__ ( self : List[Any] ) -> List[Any]:
pass
def __magic_name__ ( self : Any ) -> int:
pass
def __magic_name__ ( self : Union[str, Any] ) -> Union[str, Any]:
pass
def __magic_name__ ( self : List[Any] ) -> Optional[int]:
pass
def __magic_name__ ( self : Tuple ) -> Optional[Any]:
pass
def __magic_name__ ( self : Dict ) -> Dict:
pass
def __magic_name__ ( self : List[str] ) -> Dict:
pass | 152 | 0 |
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class lowerCAmelCase__ :
def __init__( self : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[int]=14 , SCREAMING_SNAKE_CASE__ : Any=7 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : Any=False , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]=99 , SCREAMING_SNAKE_CASE__ : Dict=32 , SCREAMING_SNAKE_CASE__ : Optional[Any]=4 , SCREAMING_SNAKE_CASE__ : Optional[Any]=4 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=4 , SCREAMING_SNAKE_CASE__ : str=37 , SCREAMING_SNAKE_CASE__ : Optional[int]="gelu" , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : List[Any]=5_12 , SCREAMING_SNAKE_CASE__ : Any=0.02 , ) -> Optional[int]:
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = seq_length
__lowerCamelCase = is_training
__lowerCamelCase = use_input_mask
__lowerCamelCase = use_token_type_ids
__lowerCamelCase = use_labels
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = rotary_dim
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = initializer_range
__lowerCamelCase = None
__lowerCamelCase = vocab_size - 1
__lowerCamelCase = vocab_size - 1
__lowerCamelCase = vocab_size - 1
def __A ( self : int ) -> Tuple:
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase = None
if self.use_input_mask:
__lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCamelCase = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=SCREAMING_SNAKE_CASE__ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def __A ( self : List[str] ) -> Any:
__lowerCamelCase = self.prepare_config_and_inputs()
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = config_and_inputs
__lowerCamelCase = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def __A ( self : Any , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> int:
__lowerCamelCase = 20
__lowerCamelCase = model_class_name(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = model.init_cache(input_ids.shape[0] , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
__lowerCamelCase = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
__lowerCamelCase = model(
input_ids[:, :-1] , attention_mask=SCREAMING_SNAKE_CASE__ , past_key_values=SCREAMING_SNAKE_CASE__ , position_ids=SCREAMING_SNAKE_CASE__ , )
__lowerCamelCase = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='''i4''' )
__lowerCamelCase = model(
input_ids[:, -1:] , attention_mask=SCREAMING_SNAKE_CASE__ , past_key_values=outputs_cache.past_key_values , position_ids=SCREAMING_SNAKE_CASE__ , )
__lowerCamelCase = model(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' )
def __A ( self : str , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[Any] ) -> Optional[int]:
__lowerCamelCase = 20
__lowerCamelCase = model_class_name(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
__lowerCamelCase = model.init_cache(input_ids.shape[0] , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
__lowerCamelCase = model(
input_ids[:, :-1] , attention_mask=SCREAMING_SNAKE_CASE__ , past_key_values=SCREAMING_SNAKE_CASE__ , position_ids=SCREAMING_SNAKE_CASE__ , )
__lowerCamelCase = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='''i4''' )
__lowerCamelCase = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=SCREAMING_SNAKE_CASE__ , position_ids=SCREAMING_SNAKE_CASE__ , )
__lowerCamelCase = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' )
@require_flax
class lowerCAmelCase__ ( __lowercase , __lowercase , unittest.TestCase ):
a__ : Any = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
a__ : Union[str, Any] = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def __A ( self : Optional[Any] ) -> Tuple:
__lowerCamelCase = FlaxGPTJModelTester(self )
def __A ( self : Union[str, Any] ) -> Any:
for model_class_name in self.all_model_classes:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __A ( self : List[str] ) -> Optional[Any]:
for model_class_name in self.all_model_classes:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@tooslow
def __A ( self : List[Any] ) -> Optional[Any]:
__lowerCamelCase = GPTaTokenizer.from_pretrained('''gpt2''' , pad_token='''<|endoftext|>''' , padding_side='''left''' )
__lowerCamelCase = tokenizer(['''Hello this is a long string''', '''Hey'''] , return_tensors='''np''' , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = FlaxGPTJForCausalLM.from_pretrained('''EleutherAI/gpt-j-6B''' )
__lowerCamelCase = False
__lowerCamelCase = model.config.eos_token_id
__lowerCamelCase = jax.jit(model.generate )
__lowerCamelCase = jit_generate(
inputs['''input_ids'''] , attention_mask=inputs['''attention_mask'''] , pad_token_id=tokenizer.pad_token_id ).sequences
__lowerCamelCase = tokenizer.batch_decode(SCREAMING_SNAKE_CASE__ , skip_special_tokens=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = [
'''Hello this is a long string of text.\n\nI\'m trying to get the text of the''',
'''Hey, I\'m a little late to the party. I\'m going to''',
]
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@is_pt_flax_cross_test
def __A ( self : Dict ) -> List[str]:
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
__lowerCamelCase = self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
__lowerCamelCase = model_class.__name__[4:] # Skip the "Flax" at the beginning
__lowerCamelCase = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase , __lowerCamelCase = pt_inputs['''input_ids'''].shape
__lowerCamelCase = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase = 0
__lowerCamelCase = 1
__lowerCamelCase = 0
__lowerCamelCase = 1
__lowerCamelCase = pt_model_class(SCREAMING_SNAKE_CASE__ ).eval()
__lowerCamelCase = model_class(SCREAMING_SNAKE_CASE__ , dtype=jnp.floataa )
__lowerCamelCase = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = fx_state
with torch.no_grad():
__lowerCamelCase = pt_model(**SCREAMING_SNAKE_CASE__ ).to_tuple()
__lowerCamelCase = fx_model(**SCREAMING_SNAKE_CASE__ ).to_tuple()
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , len(SCREAMING_SNAKE_CASE__ ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = model_class.from_pretrained(SCREAMING_SNAKE_CASE__ , from_pt=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = fx_model_loaded(**SCREAMING_SNAKE_CASE__ ).to_tuple()
self.assertEqual(
len(SCREAMING_SNAKE_CASE__ ) , len(SCREAMING_SNAKE_CASE__ ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output_loaded, pt_output in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
@is_pt_flax_cross_test
def __A ( self : str ) -> Optional[Any]:
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
__lowerCamelCase = self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
__lowerCamelCase = model_class.__name__[4:] # Skip the "Flax" at the beginning
__lowerCamelCase = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = pt_model_class(SCREAMING_SNAKE_CASE__ ).eval()
__lowerCamelCase = model_class(SCREAMING_SNAKE_CASE__ , dtype=jnp.floataa )
__lowerCamelCase = load_flax_weights_in_pytorch_model(SCREAMING_SNAKE_CASE__ , fx_model.params )
__lowerCamelCase , __lowerCamelCase = pt_inputs['''input_ids'''].shape
__lowerCamelCase = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase = 0
__lowerCamelCase = 1
__lowerCamelCase = 0
__lowerCamelCase = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
__lowerCamelCase = pt_model(**SCREAMING_SNAKE_CASE__ ).to_tuple()
__lowerCamelCase = fx_model(**SCREAMING_SNAKE_CASE__ ).to_tuple()
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , len(SCREAMING_SNAKE_CASE__ ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = pt_model_class.from_pretrained(SCREAMING_SNAKE_CASE__ , from_flax=SCREAMING_SNAKE_CASE__ )
with torch.no_grad():
__lowerCamelCase = pt_model_loaded(**SCREAMING_SNAKE_CASE__ ).to_tuple()
self.assertEqual(
len(SCREAMING_SNAKE_CASE__ ) , len(SCREAMING_SNAKE_CASE__ ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
@tooslow
def __A ( self : Tuple ) -> Union[str, Any]:
for model_class_name in self.all_model_classes:
__lowerCamelCase = model_class_name.from_pretrained('''EleutherAI/gpt-j-6B''' )
__lowerCamelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
| 360 |
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
SCREAMING_SNAKE_CASE__ : Any = TypeVar("KEY")
SCREAMING_SNAKE_CASE__ : Dict = TypeVar("VAL")
@dataclass(frozen=__lowercase , slots=__lowercase )
class lowerCAmelCase__ ( Generic[KEY, VAL] ):
a__ : KEY
a__ : VAL
class lowerCAmelCase__ ( _Item ):
def __init__( self : str ) -> None:
super().__init__(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __bool__( self : Tuple ) -> bool:
return False
SCREAMING_SNAKE_CASE__ : List[Any] = _DeletedItem()
class lowerCAmelCase__ ( MutableMapping[KEY, VAL] ):
def __init__( self : int , SCREAMING_SNAKE_CASE__ : int = 8 , SCREAMING_SNAKE_CASE__ : float = 0.75 ) -> None:
__lowerCamelCase = initial_block_size
__lowerCamelCase = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
__lowerCamelCase = capacity_factor
__lowerCamelCase = 0
def __A ( self : Any , SCREAMING_SNAKE_CASE__ : KEY ) -> int:
return hash(SCREAMING_SNAKE_CASE__ ) % len(self._buckets )
def __A ( self : Any , SCREAMING_SNAKE_CASE__ : int ) -> int:
return (ind + 1) % len(self._buckets )
def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : KEY , SCREAMING_SNAKE_CASE__ : VAL ) -> bool:
__lowerCamelCase = self._buckets[ind]
if not stored:
__lowerCamelCase = _Item(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self._len += 1
return True
elif stored.key == key:
__lowerCamelCase = _Item(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return True
else:
return False
def __A ( self : Any ) -> bool:
__lowerCamelCase = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(SCREAMING_SNAKE_CASE__ )
def __A ( self : List[Any] ) -> bool:
if len(self._buckets ) <= self._initial_block_size:
return False
__lowerCamelCase = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def __A ( self : int , SCREAMING_SNAKE_CASE__ : int ) -> None:
__lowerCamelCase = self._buckets
__lowerCamelCase = [None] * new_size
__lowerCamelCase = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def __A ( self : str ) -> None:
self._resize(len(self._buckets ) * 2 )
def __A ( self : Dict ) -> None:
self._resize(len(self._buckets ) // 2 )
def __A ( self : Tuple , SCREAMING_SNAKE_CASE__ : KEY ) -> Iterator[int]:
__lowerCamelCase = self._get_bucket_index(SCREAMING_SNAKE_CASE__ )
for _ in range(len(self._buckets ) ):
yield ind
__lowerCamelCase = self._get_next_ind(SCREAMING_SNAKE_CASE__ )
def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : KEY , SCREAMING_SNAKE_CASE__ : VAL ) -> None:
for ind in self._iterate_buckets(SCREAMING_SNAKE_CASE__ ):
if self._try_set(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
break
def __setitem__( self : Tuple , SCREAMING_SNAKE_CASE__ : KEY , SCREAMING_SNAKE_CASE__ : VAL ) -> None:
if self._is_full():
self._size_up()
self._add_item(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __delitem__( self : List[Any] , SCREAMING_SNAKE_CASE__ : KEY ) -> None:
for ind in self._iterate_buckets(SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase = self._buckets[ind]
if item is None:
raise KeyError(SCREAMING_SNAKE_CASE__ )
if item is _deleted:
continue
if item.key == key:
__lowerCamelCase = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : KEY ) -> VAL:
for ind in self._iterate_buckets(SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(SCREAMING_SNAKE_CASE__ )
def __len__( self : int ) -> int:
return self._len
def __iter__( self : Tuple ) -> Iterator[KEY]:
yield from (item.key for item in self._buckets if item)
def __repr__( self : Optional[Any] ) -> str:
__lowerCamelCase = ''' ,'''.join(
f'''{item.key}: {item.val}''' for item in self._buckets if item )
return f'''HashMap({val_string})'''
| 339 | 0 |
'''simple docstring'''
from math import factorial
a__ : dict[str, int] = {str(digit): factorial(digit) for digit in range(1_0)}
def _UpperCamelCase ( __A ) -> int:
'''simple docstring'''
if not isinstance(__A , __A ):
raise TypeError("Parameter number must be int" )
if number < 0:
raise ValueError("Parameter number must be greater than or equal to 0" )
# Converts number in string to iterate on its digits and adds its factorial.
return sum(DIGIT_FACTORIAL[digit] for digit in str(__A ) )
def _UpperCamelCase ( __A = 60 , __A = 1000000 ) -> int:
'''simple docstring'''
if not isinstance(__A , __A ) or not isinstance(__A , __A ):
raise TypeError("Parameters chain_length and number_limit must be int" )
if chain_length <= 0 or number_limit <= 0:
raise ValueError(
"Parameters chain_length and number_limit must be greater than 0" )
# the counter for the chains with the exact desired length
UpperCamelCase__ = 0
# the cached sizes of the previous chains
UpperCamelCase__ = {}
for start_chain_element in range(1 , __A ):
# The temporary set will contain the elements of the chain
UpperCamelCase__ = set()
UpperCamelCase__ = 0
# Stop computing the chain when you find a cached size, a repeating item or the
# length is greater then the desired one.
UpperCamelCase__ = start_chain_element
while (
chain_element not in chain_sets_lengths
and chain_element not in chain_set
and chain_set_length <= chain_length
):
chain_set.add(__A )
chain_set_length += 1
UpperCamelCase__ = digit_factorial_sum(__A )
if chain_element in chain_sets_lengths:
chain_set_length += chain_sets_lengths[chain_element]
UpperCamelCase__ = chain_set_length
# If chain contains the exact amount of elements increase the counter
if chain_set_length == chain_length:
chains_counter += 1
return chains_counter
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"""{solution()}""")
| 80 |
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
UpperCAmelCase : List[Any] =logging.get_logger(__name__)
def _lowerCAmelCase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase):
return [
int(10_00 * (box[0] / width)),
int(10_00 * (box[1] / height)),
int(10_00 * (box[2] / width)),
int(10_00 * (box[3] / height)),
]
def _lowerCAmelCase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None):
UpperCamelCase_ = tesseract_config if tesseract_config is not None else ""
# apply OCR
UpperCamelCase_ = to_pil_image(_lowerCAmelCase)
UpperCamelCase_ , UpperCamelCase_ = pil_image.size
UpperCamelCase_ = pytesseract.image_to_data(_lowerCAmelCase , lang=_lowerCAmelCase , output_type="dict" , config=_lowerCAmelCase)
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = data["text"], data["left"], data["top"], data["width"], data["height"]
# filter empty words and corresponding coordinates
UpperCamelCase_ = [idx for idx, word in enumerate(_lowerCAmelCase) if not word.strip()]
UpperCamelCase_ = [word for idx, word in enumerate(_lowerCAmelCase) if idx not in irrelevant_indices]
UpperCamelCase_ = [coord for idx, coord in enumerate(_lowerCAmelCase) if idx not in irrelevant_indices]
UpperCamelCase_ = [coord for idx, coord in enumerate(_lowerCAmelCase) if idx not in irrelevant_indices]
UpperCamelCase_ = [coord for idx, coord in enumerate(_lowerCAmelCase) if idx not in irrelevant_indices]
UpperCamelCase_ = [coord for idx, coord in enumerate(_lowerCAmelCase) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
UpperCamelCase_ = []
for x, y, w, h in zip(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase):
UpperCamelCase_ = [x, y, x + w, y + h]
actual_boxes.append(_lowerCAmelCase)
# finally, normalize the bounding boxes
UpperCamelCase_ = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase))
assert len(_lowerCAmelCase) == len(_lowerCAmelCase), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class _lowercase (a_ ):
'''simple docstring'''
lowercase__ = ["""pixel_values"""]
def __init__( self , snake_case__ = True , snake_case__ = None , snake_case__ = PILImageResampling.BILINEAR , snake_case__ = True , snake_case__ = None , snake_case__ = "" , **snake_case__ , ):
'''simple docstring'''
super().__init__(**snake_case__ )
UpperCamelCase_ = size if size is not None else {"height": 224, "width": 224}
UpperCamelCase_ = get_size_dict(snake_case__ )
UpperCamelCase_ = do_resize
UpperCamelCase_ = size
UpperCamelCase_ = resample
UpperCamelCase_ = apply_ocr
UpperCamelCase_ = ocr_lang
UpperCamelCase_ = tesseract_config
def _lowerCamelCase ( self , snake_case__ , snake_case__ , snake_case__ = PILImageResampling.BILINEAR , snake_case__ = None , **snake_case__ , ):
'''simple docstring'''
UpperCamelCase_ = get_size_dict(snake_case__ )
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
UpperCamelCase_ = (size["height"], size["width"])
return resize(snake_case__ , size=snake_case__ , resample=snake_case__ , data_format=snake_case__ , **snake_case__ )
def _lowerCamelCase ( self , snake_case__ , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = ChannelDimension.FIRST , **snake_case__ , ):
'''simple docstring'''
UpperCamelCase_ = do_resize if do_resize is not None else self.do_resize
UpperCamelCase_ = size if size is not None else self.size
UpperCamelCase_ = get_size_dict(snake_case__ )
UpperCamelCase_ = resample if resample is not None else self.resample
UpperCamelCase_ = apply_ocr if apply_ocr is not None else self.apply_ocr
UpperCamelCase_ = ocr_lang if ocr_lang is not None else self.ocr_lang
UpperCamelCase_ = tesseract_config if tesseract_config is not None else self.tesseract_config
UpperCamelCase_ = make_list_of_images(snake_case__ )
if not valid_images(snake_case__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
# All transformations expect numpy arrays.
UpperCamelCase_ = [to_numpy_array(snake_case__ ) for image in images]
if apply_ocr:
requires_backends(self , "pytesseract" )
UpperCamelCase_ = []
UpperCamelCase_ = []
for image in images:
UpperCamelCase_ , UpperCamelCase_ = apply_tesseract(snake_case__ , snake_case__ , snake_case__ )
words_batch.append(snake_case__ )
boxes_batch.append(snake_case__ )
if do_resize:
UpperCamelCase_ = [self.resize(image=snake_case__ , size=snake_case__ , resample=snake_case__ ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
UpperCamelCase_ = [flip_channel_order(snake_case__ ) for image in images]
UpperCamelCase_ = [to_channel_dimension_format(snake_case__ , snake_case__ ) for image in images]
UpperCamelCase_ = BatchFeature(data={"pixel_values": images} , tensor_type=snake_case__ )
if apply_ocr:
UpperCamelCase_ = words_batch
UpperCamelCase_ = boxes_batch
return data
| 128 | 0 |
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class a ( UpperCAmelCase ):
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Dict = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCAmelCase__ , "hidden_sizes" ) )
self.parent.assertTrue(hasattr(lowerCAmelCase__ , "num_attention_heads" ) )
self.parent.assertTrue(hasattr(lowerCAmelCase__ , "num_encoder_blocks" ) )
class a :
def __init__( self , A_ , A_=13 , A_=64 , A_=3 , A_=4 , A_=[2, 2, 2, 2] , A_=[8, 4, 2, 1] , A_=[16, 32, 64, 128] , A_=[1, 4, 8, 16] , A_=[1, 2, 4, 8] , A_=True , A_=True , A_="gelu" , A_=0.1 , A_=0.1 , A_=0.02 , A_=3 , A_=None , ):
'''simple docstring'''
_UpperCAmelCase : Optional[int] = parent
_UpperCAmelCase : Optional[Any] = batch_size
_UpperCAmelCase : str = image_size
_UpperCAmelCase : Dict = num_channels
_UpperCAmelCase : Any = num_encoder_blocks
_UpperCAmelCase : int = sr_ratios
_UpperCAmelCase : Dict = depths
_UpperCAmelCase : int = hidden_sizes
_UpperCAmelCase : List[Any] = downsampling_rates
_UpperCAmelCase : int = num_attention_heads
_UpperCAmelCase : Dict = is_training
_UpperCAmelCase : List[Any] = use_labels
_UpperCAmelCase : int = hidden_act
_UpperCAmelCase : List[Any] = hidden_dropout_prob
_UpperCAmelCase : Dict = attention_probs_dropout_prob
_UpperCAmelCase : Union[str, Any] = initializer_range
_UpperCAmelCase : Union[str, Any] = num_labels
_UpperCAmelCase : Optional[Any] = scope
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase : str = None
if self.use_labels:
_UpperCAmelCase : Dict = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_UpperCAmelCase : List[str] = self.get_config()
return config, pixel_values, labels
def _UpperCAmelCase ( self ):
'''simple docstring'''
return SegformerConfig(
image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def _UpperCAmelCase ( self , A_ , A_ , A_ ):
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = SegformerModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
_UpperCAmelCase : Dict = model(lowerCAmelCase__ )
_UpperCAmelCase : Optional[Any] = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) )
def _UpperCAmelCase ( self , A_ , A_ , A_ ):
'''simple docstring'''
_UpperCAmelCase : Dict = self.num_labels
_UpperCAmelCase : Optional[int] = SegformerForSemanticSegmentation(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
_UpperCAmelCase : Tuple = model(lowerCAmelCase__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
_UpperCAmelCase : Tuple = model(lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
self.parent.assertGreater(result.loss , 0.0 )
def _UpperCAmelCase ( self , A_ , A_ , A_ ):
'''simple docstring'''
_UpperCAmelCase : str = 1
_UpperCAmelCase : Dict = SegformerForSemanticSegmentation(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
_UpperCAmelCase : Tuple = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(lowerCAmelCase__ )
_UpperCAmelCase : Optional[Any] = model(lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertGreater(result.loss , 0.0 )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : List[Any] = self.prepare_config_and_inputs()
_UpperCAmelCase : Any = config_and_inputs
_UpperCAmelCase : Tuple = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class a ( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
_lowercase = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
_lowercase = (
{
'''feature-extraction''': SegformerModel,
'''image-classification''': SegformerForImageClassification,
'''image-segmentation''': SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_lowercase = True
_lowercase = False
_lowercase = False
_lowercase = False
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Optional[int] = SegformerModelTester(self )
_UpperCAmelCase : int = SegformerConfigTester(self , config_class=lowerCAmelCase__ )
def _UpperCAmelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*lowerCAmelCase__ )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*lowerCAmelCase__ )
@unittest.skip("SegFormer does not use inputs_embeds" )
def _UpperCAmelCase ( self ):
'''simple docstring'''
pass
@unittest.skip("SegFormer does not have get_input_embeddings method and get_output_embeddings methods" )
def _UpperCAmelCase ( self ):
'''simple docstring'''
pass
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase : int = model_class(lowerCAmelCase__ )
_UpperCAmelCase : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase : List[str] = [*signature.parameters.keys()]
_UpperCAmelCase : List[str] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCAmelCase__ )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase : Optional[Any] = True
for model_class in self.all_model_classes:
_UpperCAmelCase : str = True
_UpperCAmelCase : Any = False
_UpperCAmelCase : List[str] = True
_UpperCAmelCase : Dict = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
_UpperCAmelCase : List[str] = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
_UpperCAmelCase : Tuple = outputs.attentions
_UpperCAmelCase : str = sum(self.model_tester.depths )
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_UpperCAmelCase : int = True
_UpperCAmelCase : Optional[int] = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
_UpperCAmelCase : Tuple = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
_UpperCAmelCase : str = outputs.attentions
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
# verify the first attentions (first block, first layer)
_UpperCAmelCase : List[Any] = (self.model_tester.image_size // 4) ** 2
_UpperCAmelCase : Union[str, Any] = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
# verify the last attentions (last block, last layer)
_UpperCAmelCase : Any = (self.model_tester.image_size // 32) ** 2
_UpperCAmelCase : Union[str, Any] = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , )
_UpperCAmelCase : Dict = len(lowerCAmelCase__ )
# Check attention is always last and order is fine
_UpperCAmelCase : Optional[Any] = True
_UpperCAmelCase : Optional[int] = True
_UpperCAmelCase : str = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
_UpperCAmelCase : Tuple = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
self.assertEqual(out_len + 1 , len(lowerCAmelCase__ ) )
_UpperCAmelCase : Dict = outputs.attentions
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
# verify the first attentions (first block, first layer)
_UpperCAmelCase : Dict = (self.model_tester.image_size // 4) ** 2
_UpperCAmelCase : List[str] = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
def _UpperCAmelCase ( self ):
'''simple docstring'''
def check_hidden_states_output(A_ , A_ , A_ ):
_UpperCAmelCase : List[str] = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
_UpperCAmelCase : Tuple = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
_UpperCAmelCase : str = outputs.hidden_states
_UpperCAmelCase : int = self.model_tester.num_encoder_blocks
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
_UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase : Dict = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase : Optional[Any] = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def _UpperCAmelCase ( self ):
'''simple docstring'''
if not self.model_tester.is_training:
return
_UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase : Optional[Any] = True
for model_class in self.all_model_classes:
if model_class in get_values(lowerCAmelCase__ ):
continue
_UpperCAmelCase : Optional[int] = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.train()
_UpperCAmelCase : Dict = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ )
_UpperCAmelCase : Tuple = model(**lowerCAmelCase__ ).loss
loss.backward()
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _UpperCAmelCase ( self ):
'''simple docstring'''
pass
@slow
def _UpperCAmelCase ( self ):
'''simple docstring'''
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase : str = SegformerModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
_UpperCAmelCase : str = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
class a ( unittest.TestCase ):
@slow
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=lowerCAmelCase__ , align=lowerCAmelCase__ , do_random_crop=lowerCAmelCase__ )
_UpperCAmelCase : Union[str, Any] = SegformerForSemanticSegmentation.from_pretrained("nvidia/segformer-b0-finetuned-ade-512-512" ).to(
lowerCAmelCase__ )
_UpperCAmelCase : Optional[int] = prepare_img()
_UpperCAmelCase : List[Any] = image_processor(images=lowerCAmelCase__ , return_tensors="pt" )
_UpperCAmelCase : List[str] = encoded_inputs.pixel_values.to(lowerCAmelCase__ )
with torch.no_grad():
_UpperCAmelCase : Dict = model(lowerCAmelCase__ )
_UpperCAmelCase : Union[str, Any] = torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase__ )
_UpperCAmelCase : Tuple = torch.tensor(
[
[[-4.63_10, -5.52_32, -6.23_56], [-5.19_21, -6.14_44, -6.59_96], [-5.44_24, -6.27_90, -6.75_74]],
[[-12.13_91, -13.31_22, -13.95_54], [-12.87_32, -13.93_52, -14.35_63], [-12.94_38, -13.82_26, -14.25_13]],
[[-12.51_34, -13.46_86, -14.49_15], [-12.86_69, -14.43_43, -14.77_58], [-13.25_23, -14.58_19, -15.06_94]],
] ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , lowerCAmelCase__ , atol=1e-4 ) )
@slow
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=lowerCAmelCase__ , align=lowerCAmelCase__ , do_random_crop=lowerCAmelCase__ )
_UpperCAmelCase : int = SegformerForSemanticSegmentation.from_pretrained(
"nvidia/segformer-b1-finetuned-cityscapes-1024-1024" ).to(lowerCAmelCase__ )
_UpperCAmelCase : Dict = prepare_img()
_UpperCAmelCase : Optional[Any] = image_processor(images=lowerCAmelCase__ , return_tensors="pt" )
_UpperCAmelCase : List[str] = encoded_inputs.pixel_values.to(lowerCAmelCase__ )
with torch.no_grad():
_UpperCAmelCase : int = model(lowerCAmelCase__ )
_UpperCAmelCase : Dict = torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase__ )
_UpperCAmelCase : Union[str, Any] = torch.tensor(
[
[[-13.57_48, -13.91_11, -12.65_00], [-14.35_00, -15.36_83, -14.23_28], [-14.75_32, -16.04_24, -15.60_87]],
[[-17.16_51, -15.87_25, -12.96_53], [-17.25_80, -17.37_18, -14.82_23], [-16.60_58, -16.87_83, -16.74_52]],
[[-3.64_56, -3.02_09, -1.42_03], [-3.07_97, -3.19_59, -2.00_00], [-1.87_57, -1.92_17, -1.69_97]],
] ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , lowerCAmelCase__ , atol=1e-1 ) )
@slow
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Optional[int] = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=lowerCAmelCase__ , align=lowerCAmelCase__ , do_random_crop=lowerCAmelCase__ )
_UpperCAmelCase : Any = SegformerForSemanticSegmentation.from_pretrained("nvidia/segformer-b0-finetuned-ade-512-512" ).to(
lowerCAmelCase__ )
_UpperCAmelCase : List[str] = prepare_img()
_UpperCAmelCase : Union[str, Any] = image_processor(images=lowerCAmelCase__ , return_tensors="pt" )
_UpperCAmelCase : Any = encoded_inputs.pixel_values.to(lowerCAmelCase__ )
with torch.no_grad():
_UpperCAmelCase : Tuple = model(lowerCAmelCase__ )
_UpperCAmelCase : Any = outputs.logits.detach().cpu()
_UpperCAmelCase : Any = image_processor.post_process_semantic_segmentation(outputs=lowerCAmelCase__ , target_sizes=[(500, 300)] )
_UpperCAmelCase : Dict = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , lowerCAmelCase__ )
_UpperCAmelCase : int = image_processor.post_process_semantic_segmentation(outputs=lowerCAmelCase__ )
_UpperCAmelCase : Any = torch.Size((128, 128) )
self.assertEqual(segmentation[0].shape , lowerCAmelCase__ )
| 367 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class a ( unittest.TestCase ):
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : int = "ZinengTang/tvlt-base"
_UpperCAmelCase : int = tempfile.mkdtemp()
def _UpperCAmelCase ( self , **A_ ):
'''simple docstring'''
return TvltImageProcessor.from_pretrained(self.checkpoint , **A_ )
def _UpperCAmelCase ( self , **A_ ):
'''simple docstring'''
return TvltFeatureExtractor.from_pretrained(self.checkpoint , **A_ )
def _UpperCAmelCase ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Dict = self.get_image_processor()
_UpperCAmelCase : Optional[int] = self.get_feature_extractor()
_UpperCAmelCase : Tuple = TvltProcessor(image_processor=A_ , feature_extractor=A_ )
processor.save_pretrained(self.tmpdirname )
_UpperCAmelCase : str = TvltProcessor.from_pretrained(self.tmpdirname )
self.assertIsInstance(processor.feature_extractor , A_ )
self.assertIsInstance(processor.image_processor , A_ )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : List[Any] = self.get_image_processor()
_UpperCAmelCase : int = self.get_feature_extractor()
_UpperCAmelCase : Tuple = TvltProcessor(image_processor=A_ , feature_extractor=A_ )
_UpperCAmelCase : List[str] = np.ones([12000] )
_UpperCAmelCase : int = feature_extractor(A_ , return_tensors="np" )
_UpperCAmelCase : int = processor(audio=A_ , return_tensors="np" )
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : List[str] = self.get_image_processor()
_UpperCAmelCase : int = self.get_feature_extractor()
_UpperCAmelCase : Optional[Any] = TvltProcessor(image_processor=A_ , feature_extractor=A_ )
_UpperCAmelCase : Union[str, Any] = np.ones([3, 224, 224] )
_UpperCAmelCase : Tuple = image_processor(A_ , return_tensors="np" )
_UpperCAmelCase : List[str] = processor(images=A_ , return_tensors="np" )
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : List[str] = self.get_image_processor()
_UpperCAmelCase : Any = self.get_feature_extractor()
_UpperCAmelCase : Dict = TvltProcessor(image_processor=A_ , feature_extractor=A_ )
_UpperCAmelCase : str = np.ones([12000] )
_UpperCAmelCase : Optional[Any] = np.ones([3, 224, 224] )
_UpperCAmelCase : List[Any] = processor(audio=A_ , images=A_ )
self.assertListEqual(list(inputs.keys() ) , ["audio_values", "audio_mask", "pixel_values", "pixel_mask"] )
# test if it raises when no input is passed
with pytest.raises(A_ ):
processor()
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.get_image_processor()
_UpperCAmelCase : int = self.get_feature_extractor()
_UpperCAmelCase : str = TvltProcessor(image_processor=A_ , feature_extractor=A_ )
self.assertListEqual(
processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg="`processor` and `image_processor`+`feature_extractor` model input names do not match" , )
| 189 | 0 |
"""simple docstring"""
UpperCamelCase : Any = {
0: "0",
1: "1",
2: "2",
3: "3",
4: "4",
5: "5",
6: "6",
7: "7",
8: "8",
9: "9",
1_0: "a",
1_1: "b",
1_2: "c",
1_3: "d",
1_4: "e",
1_5: "f",
}
def A ( snake_case :float ) -> str:
assert type(snake_case ) in (int, float) and decimal == int(snake_case )
__UpperCamelCase = int(snake_case )
__UpperCamelCase = ''
__UpperCamelCase = False
if decimal < 0:
__UpperCamelCase = True
decimal *= -1
while decimal > 0:
__UpperCamelCase , __UpperCamelCase = divmod(snake_case , 1_6 )
__UpperCamelCase = values[remainder] + hexadecimal
__UpperCamelCase = '0x' + hexadecimal
if negative:
__UpperCamelCase = '-' + hexadecimal
return hexadecimal
if __name__ == "__main__":
import doctest
doctest.testmod()
| 316 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
lowercase = 42
lowercase = 42
def __init__( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=__UpperCAmelCase , scheduler=__UpperCAmelCase )
@torch.no_grad()
def __call__( self , __UpperCAmelCase = 1 , __UpperCAmelCase = 2000 , __UpperCAmelCase = None , __UpperCAmelCase = "pil" , __UpperCAmelCase = True , **__UpperCAmelCase , ):
'''simple docstring'''
__UpperCamelCase = self.unet.config.sample_size
__UpperCamelCase = (batch_size, 3, img_size, img_size)
__UpperCamelCase = self.unet
__UpperCamelCase = randn_tensor(__UpperCAmelCase , generator=__UpperCAmelCase ) * self.scheduler.init_noise_sigma
__UpperCamelCase = sample.to(self.device )
self.scheduler.set_timesteps(__UpperCAmelCase )
self.scheduler.set_sigmas(__UpperCAmelCase )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
__UpperCamelCase = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
__UpperCamelCase = self.unet(__UpperCAmelCase , __UpperCAmelCase ).sample
__UpperCamelCase = self.scheduler.step_correct(__UpperCAmelCase , __UpperCAmelCase , generator=__UpperCAmelCase ).prev_sample
# prediction step
__UpperCamelCase = model(__UpperCAmelCase , __UpperCAmelCase ).sample
__UpperCamelCase = self.scheduler.step_pred(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , generator=__UpperCAmelCase )
__UpperCamelCase , __UpperCamelCase = output.prev_sample, output.prev_sample_mean
__UpperCamelCase = sample_mean.clamp(0 , 1 )
__UpperCamelCase = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__UpperCamelCase = self.numpy_to_pil(__UpperCAmelCase )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=__UpperCAmelCase )
| 316 | 1 |
"""simple docstring"""
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
__A = logging.get_logger("transformers.models.speecht5")
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Optional[int]:
hf_model.apply_weight_norm()
__lowerCAmelCase: str = checkpoint["input_conv.weight_g"]
__lowerCAmelCase: List[str] = checkpoint["input_conv.weight_v"]
__lowerCAmelCase: List[Any] = checkpoint["input_conv.bias"]
for i in range(len(config.upsample_rates ) ):
__lowerCAmelCase: int = checkpoint[F"upsamples.{i}.1.weight_g"]
__lowerCAmelCase: int = checkpoint[F"upsamples.{i}.1.weight_v"]
__lowerCAmelCase: Optional[Any] = checkpoint[F"upsamples.{i}.1.bias"]
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
__lowerCAmelCase: Optional[Any] = checkpoint[F"blocks.{i}.convs1.{j}.1.weight_g"]
__lowerCAmelCase: Any = checkpoint[F"blocks.{i}.convs1.{j}.1.weight_v"]
__lowerCAmelCase: Union[str, Any] = checkpoint[F"blocks.{i}.convs1.{j}.1.bias"]
__lowerCAmelCase: int = checkpoint[F"blocks.{i}.convs2.{j}.1.weight_g"]
__lowerCAmelCase: List[Any] = checkpoint[F"blocks.{i}.convs2.{j}.1.weight_v"]
__lowerCAmelCase: Tuple = checkpoint[F"blocks.{i}.convs2.{j}.1.bias"]
__lowerCAmelCase: Any = checkpoint["output_conv.1.weight_g"]
__lowerCAmelCase: str = checkpoint["output_conv.1.weight_v"]
__lowerCAmelCase: Dict = checkpoint["output_conv.1.bias"]
hf_model.remove_weight_norm()
@torch.no_grad()
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , ) -> Tuple:
if config_path is not None:
__lowerCAmelCase: Optional[int] = SpeechTaHifiGanConfig.from_pretrained(__SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase: str = SpeechTaHifiGanConfig()
__lowerCAmelCase: Optional[int] = SpeechTaHifiGan(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Optional[Any] = torch.load(__SCREAMING_SNAKE_CASE )
load_weights(orig_checkpoint["model"]["generator"] , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__lowerCAmelCase: str = np.load(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Optional[Any] = stats[0].reshape(-1 )
__lowerCAmelCase: Optional[int] = stats[1].reshape(-1 )
__lowerCAmelCase: str = torch.from_numpy(__SCREAMING_SNAKE_CASE ).float()
__lowerCAmelCase: Optional[int] = torch.from_numpy(__SCREAMING_SNAKE_CASE ).float()
model.save_pretrained(__SCREAMING_SNAKE_CASE )
if repo_id:
print("Pushing to the hub..." )
model.push_to_hub(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint")
parser.add_argument("--stats_path", required=True, default=None, type=str, help="Path to stats.npy file")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
__A = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 108 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class snake_case ( unittest.TestCase ):
def __init__( self : Any , UpperCamelCase__ : int , UpperCamelCase__ : Any=7 , UpperCamelCase__ : List[str]=3 , UpperCamelCase__ : List[Any]=1_8 , UpperCamelCase__ : List[Any]=3_0 , UpperCamelCase__ : List[str]=4_0_0 , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : int=[0.48145466, 0.4578275, 0.40821073] , UpperCamelCase__ : str=[0.26862954, 0.26130258, 0.27577711] , UpperCamelCase__ : List[str]=True , )-> Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase: Dict = size if size is not None else {"height": 2_2_4, "width": 2_2_4}
__lowerCAmelCase: Union[str, Any] = crop_size if crop_size is not None else {"height": 1_8, "width": 1_8}
__lowerCAmelCase: Optional[int] = parent
__lowerCAmelCase: List[str] = batch_size
__lowerCAmelCase: Union[str, Any] = num_channels
__lowerCAmelCase: Optional[Any] = image_size
__lowerCAmelCase: Tuple = min_resolution
__lowerCAmelCase: List[str] = max_resolution
__lowerCAmelCase: List[Any] = do_resize
__lowerCAmelCase: Union[str, Any] = size
__lowerCAmelCase: List[Any] = do_center_crop
__lowerCAmelCase: Optional[int] = crop_size
__lowerCAmelCase: Dict = do_normalize
__lowerCAmelCase: List[str] = image_mean
__lowerCAmelCase: Optional[int] = image_std
__lowerCAmelCase: str = do_convert_rgb
def lowercase_ ( self : Tuple)-> str:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def lowercase_ ( self : Any , UpperCamelCase__ : Tuple=False , UpperCamelCase__ : List[str]=False , UpperCamelCase__ : Dict=False)-> List[str]:
'''simple docstring'''
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
__lowerCAmelCase: Optional[int] = []
for i in range(self.batch_size):
image_inputs.append(
np.random.randint(
2_5_5 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta))
else:
__lowerCAmelCase: List[str] = []
for i in range(self.batch_size):
__lowerCAmelCase , __lowerCAmelCase: List[str] = np.random.choice(np.arange(self.min_resolution , self.max_resolution) , 2)
image_inputs.append(np.random.randint(2_5_5 , size=(self.num_channels, width, height) , dtype=np.uinta))
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
__lowerCAmelCase: Union[str, Any] = [Image.fromarray(np.moveaxis(UpperCamelCase__ , 0 , -1)) for x in image_inputs]
if torchify:
__lowerCAmelCase: str = [torch.from_numpy(UpperCamelCase__) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class snake_case ( __snake_case, unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : str = ChineseCLIPImageProcessor if is_vision_available() else None
def lowercase_ ( self : Any)-> List[Any]:
'''simple docstring'''
__lowerCAmelCase: Tuple = ChineseCLIPImageProcessingTester(self , do_center_crop=UpperCamelCase__)
@property
def lowercase_ ( self : Any)-> Optional[Any]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase_ ( self : Union[str, Any])-> Optional[int]:
'''simple docstring'''
__lowerCAmelCase: Tuple = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(UpperCamelCase__ , "do_resize"))
self.assertTrue(hasattr(UpperCamelCase__ , "size"))
self.assertTrue(hasattr(UpperCamelCase__ , "do_center_crop"))
self.assertTrue(hasattr(UpperCamelCase__ , "center_crop"))
self.assertTrue(hasattr(UpperCamelCase__ , "do_normalize"))
self.assertTrue(hasattr(UpperCamelCase__ , "image_mean"))
self.assertTrue(hasattr(UpperCamelCase__ , "image_std"))
self.assertTrue(hasattr(UpperCamelCase__ , "do_convert_rgb"))
def lowercase_ ( self : List[Any])-> str:
'''simple docstring'''
__lowerCAmelCase: Tuple = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {"height": 2_2_4, "width": 2_2_4})
self.assertEqual(image_processor.crop_size , {"height": 1_8, "width": 1_8})
__lowerCAmelCase: List[str] = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4)
self.assertEqual(image_processor.size , {"shortest_edge": 4_2})
self.assertEqual(image_processor.crop_size , {"height": 8_4, "width": 8_4})
def lowercase_ ( self : List[str])-> Optional[int]:
'''simple docstring'''
pass
def lowercase_ ( self : Any)-> Optional[int]:
'''simple docstring'''
__lowerCAmelCase: int = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
__lowerCAmelCase: Union[str, Any] = self.image_processor_tester.prepare_inputs(equal_resolution=UpperCamelCase__)
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , Image.Image)
# Test not batched input
__lowerCAmelCase: Optional[int] = image_processing(image_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__lowerCAmelCase: int = image_processing(UpperCamelCase__ , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def lowercase_ ( self : int)-> Optional[Any]:
'''simple docstring'''
__lowerCAmelCase: Union[str, Any] = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
__lowerCAmelCase: List[Any] = self.image_processor_tester.prepare_inputs(equal_resolution=UpperCamelCase__ , numpify=UpperCamelCase__)
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , np.ndarray)
# Test not batched input
__lowerCAmelCase: List[Any] = image_processing(image_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__lowerCAmelCase: Any = image_processing(UpperCamelCase__ , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def lowercase_ ( self : int)-> str:
'''simple docstring'''
__lowerCAmelCase: Union[str, Any] = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
__lowerCAmelCase: Dict = self.image_processor_tester.prepare_inputs(equal_resolution=UpperCamelCase__ , torchify=UpperCamelCase__)
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , torch.Tensor)
# Test not batched input
__lowerCAmelCase: Tuple = image_processing(image_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__lowerCAmelCase: Optional[int] = image_processing(UpperCamelCase__ , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
@require_torch
@require_vision
class snake_case ( __snake_case, unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : List[str] = ChineseCLIPImageProcessor if is_vision_available() else None
def lowercase_ ( self : int)-> Dict:
'''simple docstring'''
__lowerCAmelCase: Optional[int] = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=UpperCamelCase__)
__lowerCAmelCase: Union[str, Any] = 3
@property
def lowercase_ ( self : Union[str, Any])-> Any:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase_ ( self : int)-> str:
'''simple docstring'''
__lowerCAmelCase: int = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(UpperCamelCase__ , "do_resize"))
self.assertTrue(hasattr(UpperCamelCase__ , "size"))
self.assertTrue(hasattr(UpperCamelCase__ , "do_center_crop"))
self.assertTrue(hasattr(UpperCamelCase__ , "center_crop"))
self.assertTrue(hasattr(UpperCamelCase__ , "do_normalize"))
self.assertTrue(hasattr(UpperCamelCase__ , "image_mean"))
self.assertTrue(hasattr(UpperCamelCase__ , "image_std"))
self.assertTrue(hasattr(UpperCamelCase__ , "do_convert_rgb"))
def lowercase_ ( self : Tuple)-> Any:
'''simple docstring'''
pass
def lowercase_ ( self : Tuple)-> Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase: Any = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
__lowerCAmelCase: int = self.image_processor_tester.prepare_inputs(equal_resolution=UpperCamelCase__)
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , Image.Image)
# Test not batched input
__lowerCAmelCase: List[Any] = image_processing(image_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__lowerCAmelCase: Optional[int] = image_processing(UpperCamelCase__ , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 108 | 1 |
from collections.abc import Sequence
def _UpperCAmelCase ( snake_case , snake_case = False ):
"""simple docstring"""
if not arr:
return 0
_lowerCAmelCase = 0 if allow_empty_subarrays else float("""-inf""" )
_lowerCAmelCase = 0.0
for num in arr:
_lowerCAmelCase = max(0 if allow_empty_subarrays else num , curr_sum + num )
_lowerCAmelCase = max(__UpperCAmelCase , __UpperCAmelCase )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
A__ = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(f"{max_subarray_sum(nums) = }")
| 82 |
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'huggingface/informer-tourism-monthly': (
'https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json'
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class lowercase__ ( __lowerCamelCase ):
'''simple docstring'''
a : Optional[int] = "informer"
a : Tuple = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
"num_hidden_layers": "encoder_layers",
}
def __init__( self, __magic_name__ = None, __magic_name__ = None, __magic_name__ = "student_t", __magic_name__ = "nll", __magic_name__ = 1, __magic_name__ = None, __magic_name__ = "mean", __magic_name__ = 0, __magic_name__ = 0, __magic_name__ = 0, __magic_name__ = 0, __magic_name__ = None, __magic_name__ = None, __magic_name__ = 64, __magic_name__ = 32, __magic_name__ = 32, __magic_name__ = 2, __magic_name__ = 2, __magic_name__ = 2, __magic_name__ = 2, __magic_name__ = True, __magic_name__ = "gelu", __magic_name__ = 0.05, __magic_name__ = 0.1, __magic_name__ = 0.1, __magic_name__ = 0.1, __magic_name__ = 0.1, __magic_name__ = 100, __magic_name__ = 0.02, __magic_name__=True, __magic_name__ = "prob", __magic_name__ = 5, __magic_name__ = True, **__magic_name__, ) -> Optional[int]:
"""simple docstring"""
# time series specific configuration
UpperCamelCase__ : List[Any] = prediction_length
UpperCamelCase__ : Any = context_length or prediction_length
UpperCamelCase__ : Optional[int] = distribution_output
UpperCamelCase__ : Union[str, Any] = loss
UpperCamelCase__ : Optional[Any] = input_size
UpperCamelCase__ : Dict = num_time_features
UpperCamelCase__ : Dict = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
UpperCamelCase__ : Optional[int] = scaling
UpperCamelCase__ : Any = num_dynamic_real_features
UpperCamelCase__ : Optional[int] = num_static_real_features
UpperCamelCase__ : Optional[Any] = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(__magic_name__ ) != num_static_categorical_features:
raise ValueError(
'''The cardinality should be a list of the same length as `num_static_categorical_features`''' )
UpperCamelCase__ : str = cardinality
else:
UpperCamelCase__ : int = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(__magic_name__ ) != num_static_categorical_features:
raise ValueError(
'''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' )
UpperCamelCase__ : Any = embedding_dimension
else:
UpperCamelCase__ : Union[str, Any] = [min(50, (cat + 1) // 2 ) for cat in self.cardinality]
UpperCamelCase__ : Optional[Any] = num_parallel_samples
# Transformer architecture configuration
UpperCamelCase__ : Union[str, Any] = input_size * len(self.lags_sequence ) + self._number_of_features
UpperCamelCase__ : Optional[Any] = d_model
UpperCamelCase__ : Tuple = encoder_attention_heads
UpperCamelCase__ : Any = decoder_attention_heads
UpperCamelCase__ : Dict = encoder_ffn_dim
UpperCamelCase__ : Optional[Any] = decoder_ffn_dim
UpperCamelCase__ : str = encoder_layers
UpperCamelCase__ : Optional[int] = decoder_layers
UpperCamelCase__ : Optional[Any] = dropout
UpperCamelCase__ : List[Any] = attention_dropout
UpperCamelCase__ : Any = activation_dropout
UpperCamelCase__ : Optional[int] = encoder_layerdrop
UpperCamelCase__ : Union[str, Any] = decoder_layerdrop
UpperCamelCase__ : Tuple = activation_function
UpperCamelCase__ : List[str] = init_std
UpperCamelCase__ : int = use_cache
# Informer
UpperCamelCase__ : Optional[int] = attention_type
UpperCamelCase__ : Optional[int] = sampling_factor
UpperCamelCase__ : Dict = distil
super().__init__(is_encoder_decoder=__magic_name__, **__magic_name__ )
@property
def UpperCamelCase__ ( self ) -> int:
"""simple docstring"""
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 201 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections import Counter
from random import random
class _a :
def __init__( self: Dict ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = {}
def lowerCamelCase_ ( self: Dict , UpperCamelCase_: str ) -> None:
"""simple docstring"""
lowercase__ = {}
def lowerCamelCase_ ( self: str , UpperCamelCase_: str , UpperCamelCase_: str , UpperCamelCase_: float ) -> None:
"""simple docstring"""
if nodea not in self.connections:
self.add_node(UpperCamelCase_ )
if nodea not in self.connections:
self.add_node(UpperCamelCase_ )
lowercase__ = probability
def lowerCamelCase_ ( self: List[str] ) -> list[str]:
"""simple docstring"""
return list(self.connections )
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase_: str ) -> str:
"""simple docstring"""
lowercase__ = 0
lowercase__ = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ = Counter(graph.get_nodes() )
lowercase__ = start
for _ in range(SCREAMING_SNAKE_CASE ):
lowercase__ = graph.transition(SCREAMING_SNAKE_CASE )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 355 |
import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from tax import checkpoints
from transformers import (
AutoTokenizer,
PixaStructConfig,
PixaStructForConditionalGeneration,
PixaStructImageProcessor,
PixaStructProcessor,
PixaStructTextConfig,
PixaStructVisionConfig,
)
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = checkpoints.load_tax_checkpoint(SCREAMING_SNAKE_CASE )
lowercase__ = flatten_dict(SCREAMING_SNAKE_CASE )
return flax_params
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = {}
lowercase__ = {
'''token_embedder''': '''embeddings''',
'''encoder_norm''': '''layernorm''',
'''kernel''': '''weight''',
'''.out''': '''.output''',
'''scale''': '''weight''',
'''embedders_0.pos_embedding''': '''row_embedder.weight''',
'''embedders_1.pos_embedding''': '''column_embedder.weight''',
}
lowercase__ = {
'''query''': '''attention.query''',
'''key''': '''attention.key''',
'''value''': '''attention.value''',
'''output.dense''': '''output''',
'''encoder_decoder_attention.o''': '''encoder_decoder_attention.attention.o''',
'''pre_self_attention_layer_norm''': '''self_attention.layer_norm''',
'''pre_cross_attention_layer_norm''': '''encoder_decoder_attention.layer_norm''',
'''mlp.''': '''mlp.DenseReluDense.''',
'''pre_mlp_layer_norm''': '''mlp.layer_norm''',
'''self_attention.o''': '''self_attention.attention.o''',
'''decoder.embeddings.embedding''': '''decoder.embed_tokens.weight''',
'''decoder.relpos_bias.rel_embedding''': '''decoder.layer.0.self_attention.attention.relative_attention_bias.weight''',
'''decoder.decoder_norm.weight''': '''decoder.final_layer_norm.weight''',
'''decoder.logits_dense.weight''': '''decoder.lm_head.weight''',
}
for key in flax_dict.keys():
if "target" in key:
# remove the first prefix from the key
lowercase__ = '''.'''.join(key[1:] )
# rename the key
for old, new in CONVERSION_MAPPING.items():
lowercase__ = new_key.replace(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
lowercase__ = new_key.replace(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
lowercase__ = re.sub(R'''layers_(\d+)''' , R'''layer.\1''' , SCREAMING_SNAKE_CASE )
lowercase__ = new_key.replace('''encoder''' , '''encoder.encoder''' )
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
lowercase__ = re.sub(R'''layers_(\d+)''' , R'''layer.\1''' , SCREAMING_SNAKE_CASE )
lowercase__ = flax_dict[key]
lowercase__ = {}
# convert converted_dict into torch format
for key in converted_dict.keys():
if ("embed_tokens" not in key) and ("embedder" not in key):
lowercase__ = torch.from_numpy(converted_dict[key].T )
else:
lowercase__ = torch.from_numpy(converted_dict[key] )
return converted_torch_dict
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False ):
"""simple docstring"""
lowercase__ = get_flax_param(SCREAMING_SNAKE_CASE )
if not use_large:
lowercase__ = PixaStructVisionConfig()
lowercase__ = PixaStructTextConfig()
else:
lowercase__ = PixaStructVisionConfig(
hidden_size=15_36 , d_ff=39_68 , num_attention_heads=24 , num_hidden_layers=18 )
lowercase__ = PixaStructTextConfig(hidden_size=15_36 , d_ff=39_68 , num_heads=24 , num_layers=18 )
lowercase__ = PixaStructConfig(
vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=SCREAMING_SNAKE_CASE )
lowercase__ = PixaStructForConditionalGeneration(SCREAMING_SNAKE_CASE )
lowercase__ = rename_and_convert_flax_params(SCREAMING_SNAKE_CASE )
model.load_state_dict(SCREAMING_SNAKE_CASE )
lowercase__ = AutoTokenizer.from_pretrained('''ybelkada/test-pix2struct-tokenizer''' )
lowercase__ = PixaStructImageProcessor()
lowercase__ = PixaStructProcessor(image_processor=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE )
if use_large:
lowercase__ = 40_96
lowercase__ = True
# mkdir if needed
os.makedirs(SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
model.save_pretrained(SCREAMING_SNAKE_CASE )
processor.save_pretrained(SCREAMING_SNAKE_CASE )
print('''Model saved in {}'''.format(SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('--t5x_checkpoint_path', default=None, type=str, help='Path to the original T5x checkpoint.')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--use_large', action='store_true', help='Use large model.')
parser.add_argument('--is_vqa', action='store_true', help='Use large model.')
lowerCAmelCase = parser.parse_args()
convert_pixastruct_original_pytorch_checkpoint_to_hf(
args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
)
| 93 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE_ : Any = {
'configuration_rembert': ['REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RemBertConfig', 'RemBertOnnxConfig']
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ : Dict = ['RemBertTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ : str = ['RemBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ : Optional[Any] = [
'REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RemBertForCausalLM',
'RemBertForMaskedLM',
'RemBertForMultipleChoice',
'RemBertForQuestionAnswering',
'RemBertForSequenceClassification',
'RemBertForTokenClassification',
'RemBertLayer',
'RemBertModel',
'RemBertPreTrainedModel',
'load_tf_weights_in_rembert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ : Tuple = [
'TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRemBertForCausalLM',
'TFRemBertForMaskedLM',
'TFRemBertForMultipleChoice',
'TFRemBertForQuestionAnswering',
'TFRemBertForSequenceClassification',
'TFRemBertForTokenClassification',
'TFRemBertLayer',
'TFRemBertModel',
'TFRemBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 335 |
"""simple docstring"""
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class a ( _lowerCamelCase ):
"""simple docstring"""
def __init__( self: Optional[int] , *UpperCamelCase: Optional[Any] , UpperCamelCase: Tuple=None , UpperCamelCase: Tuple=None , **UpperCamelCase: Dict ):
"""simple docstring"""
super().__init__(*UpperCamelCase , **UpperCamelCase )
A__ = eval_examples
A__ = post_process_function
def UpperCamelCase ( self: Optional[Any] , UpperCamelCase: Optional[Dataset] = None , UpperCamelCase: List[Any]=None , UpperCamelCase: Optional[List[str]] = None , UpperCamelCase: str = "eval" , **UpperCamelCase: Optional[int] , ):
"""simple docstring"""
A__ = gen_kwargs.copy()
A__ = (
gen_kwargs["""max_length"""] if gen_kwargs.get("""max_length""" ) is not None else self.args.generation_max_length
)
A__ = (
gen_kwargs["""num_beams"""] if gen_kwargs.get("""num_beams""" ) is not None else self.args.generation_num_beams
)
A__ = gen_kwargs
A__ = self.eval_dataset if eval_dataset is None else eval_dataset
A__ = self.get_eval_dataloader(UpperCamelCase )
A__ = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
A__ = self.compute_metrics
A__ = None
A__ = time.time()
A__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
A__ = eval_loop(
UpperCamelCase , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCamelCase , metric_key_prefix=UpperCamelCase , )
finally:
A__ = compute_metrics
A__ = self.args.eval_batch_size * self.args.world_size
if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
UpperCamelCase , UpperCamelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
A__ = self.post_process_function(UpperCamelCase , UpperCamelCase , UpperCamelCase )
A__ = self.compute_metrics(UpperCamelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
A__ = metrics.pop(UpperCamelCase )
metrics.update(output.metrics )
else:
A__ = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(UpperCamelCase )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
A__ = self.callback_handler.on_evaluate(self.args , self.state , self.control , UpperCamelCase )
return metrics
def UpperCamelCase ( self: List[Any] , UpperCamelCase: Dict , UpperCamelCase: List[str] , UpperCamelCase: Dict=None , UpperCamelCase: str = "test" , **UpperCamelCase: Optional[int] ):
"""simple docstring"""
A__ = gen_kwargs.copy()
A__ = self.get_test_dataloader(UpperCamelCase )
# Temporarily disable metric computation, we will do it in the loop here.
A__ = self.compute_metrics
A__ = None
A__ = time.time()
A__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
A__ = eval_loop(
UpperCamelCase , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCamelCase , metric_key_prefix=UpperCamelCase , )
finally:
A__ = compute_metrics
A__ = self.args.eval_batch_size * self.args.world_size
if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
UpperCamelCase , UpperCamelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
A__ = self.post_process_function(UpperCamelCase , UpperCamelCase , UpperCamelCase , """predict""" )
A__ = self.compute_metrics(UpperCamelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
A__ = metrics.pop(UpperCamelCase )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=UpperCamelCase )
| 335 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a :int = logging.get_logger(__name__)
a :str = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class __a (UpperCamelCase_):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :Any = """megatron-bert"""
def __init__( self , _a=29_056 , _a=1_024 , _a=24 , _a=16 , _a=4_096 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=2 , _a=0.02 , _a=1E-1_2 , _a=0 , _a="absolute" , _a=True , **_a , ) -> int:
"""simple docstring"""
super().__init__(pad_token_id=lowerCamelCase_ , **lowerCamelCase_ )
SCREAMING_SNAKE_CASE__ : str = vocab_size
SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_size
SCREAMING_SNAKE_CASE__ : Optional[int] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE__ : str = hidden_act
SCREAMING_SNAKE_CASE__ : Optional[int] = intermediate_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Any = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE__ : Optional[int] = type_vocab_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE__ : List[Any] = layer_norm_eps
SCREAMING_SNAKE_CASE__ : Optional[int] = position_embedding_type
SCREAMING_SNAKE_CASE__ : Optional[Any] = use_cache
| 354 |
"""simple docstring"""
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class __a (UpperCamelCase_):
'''simple docstring'''
def __init__( self , _a , _a=13 , _a=7 , _a=True , _a=True , _a=True , _a=True , _a=99 , _a=32 , _a=5 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=16 , _a=2 , _a=0.02 , _a=False , _a=True , _a="None" , _a=3 , _a=4 , _a=None , ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = parent
SCREAMING_SNAKE_CASE__ : Union[str, Any] = batch_size
SCREAMING_SNAKE_CASE__ : str = seq_length
SCREAMING_SNAKE_CASE__ : str = is_training
SCREAMING_SNAKE_CASE__ : List[Any] = use_input_mask
SCREAMING_SNAKE_CASE__ : str = use_token_type_ids
SCREAMING_SNAKE_CASE__ : Tuple = use_labels
SCREAMING_SNAKE_CASE__ : List[str] = vocab_size
SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : str = num_attention_heads
SCREAMING_SNAKE_CASE__ : Optional[int] = intermediate_size
SCREAMING_SNAKE_CASE__ : Tuple = hidden_act
SCREAMING_SNAKE_CASE__ : Tuple = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : List[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Any = max_position_embeddings
SCREAMING_SNAKE_CASE__ : List[str] = type_vocab_size
SCREAMING_SNAKE_CASE__ : Dict = type_sequence_label_size
SCREAMING_SNAKE_CASE__ : List[str] = initializer_range
SCREAMING_SNAKE_CASE__ : List[str] = num_labels
SCREAMING_SNAKE_CASE__ : Optional[int] = num_choices
SCREAMING_SNAKE_CASE__ : List[str] = relative_attention
SCREAMING_SNAKE_CASE__ : str = position_biased_input
SCREAMING_SNAKE_CASE__ : List[str] = pos_att_type
SCREAMING_SNAKE_CASE__ : Union[str, Any] = scope
def _a ( self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
SCREAMING_SNAKE_CASE__ : str = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE__ : Optional[Any] = None
SCREAMING_SNAKE_CASE__ : int = None
SCREAMING_SNAKE_CASE__ : Any = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE__ : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a ( self ) -> Tuple:
"""simple docstring"""
return DebertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def _a ( self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_config()
SCREAMING_SNAKE_CASE__ : Any = 300
return config
def _a ( self , _a ) -> List[str]:
"""simple docstring"""
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def _a ( self , _a , _a , _a , _a , _a , _a , _a ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = DebertaModel(config=_a )
model.to(_a )
model.eval()
SCREAMING_SNAKE_CASE__ : Any = model(_a , attention_mask=_a , token_type_ids=_a )[0]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(_a , token_type_ids=_a )[0]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(_a )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def _a ( self , _a , _a , _a , _a , _a , _a , _a ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = DebertaForMaskedLM(config=_a )
model.to(_a )
model.eval()
SCREAMING_SNAKE_CASE__ : Optional[int] = model(_a , attention_mask=_a , token_type_ids=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self , _a , _a , _a , _a , _a , _a , _a ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = self.num_labels
SCREAMING_SNAKE_CASE__ : Tuple = DebertaForSequenceClassification(_a )
model.to(_a )
model.eval()
SCREAMING_SNAKE_CASE__ : Any = model(_a , attention_mask=_a , token_type_ids=_a , labels=_a )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(_a )
def _a ( self , _a , _a , _a , _a , _a , _a , _a ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = self.num_labels
SCREAMING_SNAKE_CASE__ : Optional[Any] = DebertaForTokenClassification(config=_a )
model.to(_a )
model.eval()
SCREAMING_SNAKE_CASE__ : int = model(_a , attention_mask=_a , token_type_ids=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _a ( self , _a , _a , _a , _a , _a , _a , _a ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = DebertaForQuestionAnswering(config=_a )
model.to(_a )
model.eval()
SCREAMING_SNAKE_CASE__ : List[str] = model(
_a , attention_mask=_a , token_type_ids=_a , start_positions=_a , end_positions=_a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _a ( self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) ,
) : Optional[int] = config_and_inputs
SCREAMING_SNAKE_CASE__ : List[Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __a (UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :List[str] = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
_SCREAMING_SNAKE_CASE :str = (
{
"""feature-extraction""": DebertaModel,
"""fill-mask""": DebertaForMaskedLM,
"""question-answering""": DebertaForQuestionAnswering,
"""text-classification""": DebertaForSequenceClassification,
"""token-classification""": DebertaForTokenClassification,
"""zero-shot""": DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
_SCREAMING_SNAKE_CASE :Union[str, Any] = True
_SCREAMING_SNAKE_CASE :str = False
_SCREAMING_SNAKE_CASE :Dict = False
_SCREAMING_SNAKE_CASE :Dict = False
_SCREAMING_SNAKE_CASE :Union[str, Any] = False
def _a ( self ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = DebertaModelTester(self )
SCREAMING_SNAKE_CASE__ : str = ConfigTester(self , config_class=_a , hidden_size=37 )
def _a ( self ) -> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
def _a ( self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*_a )
def _a ( self ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*_a )
def _a ( self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*_a )
def _a ( self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*_a )
def _a ( self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*_a )
@slow
def _a ( self ) -> Optional[int]:
"""simple docstring"""
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ : Dict = DebertaModel.from_pretrained(_a )
self.assertIsNotNone(_a )
@require_torch
@require_sentencepiece
@require_tokenizers
class __a (unittest.TestCase):
'''simple docstring'''
@unittest.skip(reason="""Model not available yet""" )
def _a ( self ) -> Any:
"""simple docstring"""
pass
@slow
def _a ( self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = DebertaModel.from_pretrained("""microsoft/deberta-base""" )
SCREAMING_SNAKE_CASE__ : Tuple = torch.tensor([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Optional[int] = model(_a , attention_mask=_a )[0]
# compare the actual values for a slice.
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.tensor(
[[[-0.5_986, -0.8_055, -0.8_462], [1.4_484, -0.9_348, -0.8_059], [0.3_123, 0.0_032, -1.4_131]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _a , atol=1E-4 ) , f'''{output[:, 1:4, 1:4]}''' )
| 56 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
a : Optional[Any] = {
'configuration_gpt_bigcode': ['GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GPTBigCodeConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : str = [
'GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST',
'GPTBigCodeForSequenceClassification',
'GPTBigCodeForTokenClassification',
'GPTBigCodeForCausalLM',
'GPTBigCodeModel',
'GPTBigCodePreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
a : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 56 |
'''simple docstring'''
import contextlib
import os
import sqlitea
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> str:
'''simple docstring'''
assert isinstance(__UpperCAmelCase, __UpperCAmelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize('''keep_in_memory''', [False, True] )
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> int:
'''simple docstring'''
snake_case_ = tmp_path / '''cache'''
snake_case_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
snake_case_ = SqlDatasetReader(
'''dataset''', '''sqlite:///''' + sqlite_path, cache_dir=__UpperCAmelCase, keep_in_memory=__UpperCAmelCase ).read()
_check_sql_dataset(__UpperCAmelCase, __UpperCAmelCase )
@require_sqlalchemy
@pytest.mark.parametrize(
'''features''', [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
], )
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
snake_case_ = tmp_path / '''cache'''
snake_case_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
snake_case_ = features.copy() if features else default_expected_features
snake_case_ = (
Features({feature: Value(__UpperCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
snake_case_ = SqlDatasetReader('''dataset''', '''sqlite:///''' + sqlite_path, features=__UpperCAmelCase, cache_dir=__UpperCAmelCase ).read()
_check_sql_dataset(__UpperCAmelCase, __UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
with contextlib.closing(sqlitea.connect(__UpperCAmelCase ) ) as con:
snake_case_ = con.cursor()
cur.execute('''SELECT * FROM dataset''' )
for row in cur:
yield row
@require_sqlalchemy
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
snake_case_ = tmp_path / '''cache'''
snake_case_ = os.path.join(__UpperCAmelCase, '''tmp.sql''' )
snake_case_ = SqlDatasetReader('''dataset''', '''sqlite:///''' + sqlite_path, cache_dir=__UpperCAmelCase ).read()
SqlDatasetWriter(__UpperCAmelCase, '''dataset''', '''sqlite:///''' + output_sqlite_path, num_proc=1 ).write()
snake_case_ = iter_sql_file(__UpperCAmelCase )
snake_case_ = iter_sql_file(__UpperCAmelCase )
for rowa, rowa in zip(__UpperCAmelCase, __UpperCAmelCase ):
assert rowa == rowa
@require_sqlalchemy
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> Any:
'''simple docstring'''
snake_case_ = tmp_path / '''cache'''
snake_case_ = os.path.join(__UpperCAmelCase, '''tmp.sql''' )
snake_case_ = SqlDatasetReader('''dataset''', '''sqlite:///''' + sqlite_path, cache_dir=__UpperCAmelCase ).read()
SqlDatasetWriter(__UpperCAmelCase, '''dataset''', '''sqlite:///''' + output_sqlite_path, num_proc=2 ).write()
snake_case_ = iter_sql_file(__UpperCAmelCase )
snake_case_ = iter_sql_file(__UpperCAmelCase )
for rowa, rowa in zip(__UpperCAmelCase, __UpperCAmelCase ):
assert rowa == rowa
@require_sqlalchemy
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
snake_case_ = tmp_path / '''cache'''
snake_case_ = os.path.join(__UpperCAmelCase, '''tmp.sql''' )
snake_case_ = SqlDatasetReader('''dataset''', '''sqlite:///''' + sqlite_path, cache_dir=__UpperCAmelCase ).read()
with pytest.raises(__UpperCAmelCase ):
SqlDatasetWriter(__UpperCAmelCase, '''dataset''', '''sqlite:///''' + output_sqlite_path, num_proc=0 ).write()
| 56 | 1 |
"""simple docstring"""
# Lint as: python3
import itertools
import os
import re
lowerCAmelCase : Any = re.compile(r"""([A-Z]+)([A-Z][a-z])""")
lowerCAmelCase : int = re.compile(r"""([a-z\d])([A-Z])""")
lowerCAmelCase : Dict = re.compile(r"""(?<!_)_(?!_)""")
lowerCAmelCase : Tuple = re.compile(r"""(_{2,})""")
lowerCAmelCase : int = r"""^\w+(\.\w+)*$"""
lowerCAmelCase : Tuple = r"""<>:/\|?*"""
def a__ ( snake_case__ ) -> Optional[int]:
lowerCamelCase = _uppercase_uppercase_re.sub(R"""\1_\2""" , snake_case__ )
lowerCamelCase = _lowercase_uppercase_re.sub(R"""\1_\2""" , snake_case__ )
return name.lower()
def a__ ( snake_case__ ) -> Dict:
lowerCamelCase = _single_underscore_re.split(snake_case__ )
lowerCamelCase = [_multiple_underscores_re.split(snake_case__ ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(snake_case__ ) if n != """""" )
def a__ ( snake_case__ ) -> Union[str, Any]:
if os.path.basename(snake_case__ ) != name:
raise ValueError(F'Should be a dataset name, not a path: {name}' )
return camelcase_to_snakecase(snake_case__ )
def a__ ( snake_case__ , snake_case__ ) -> Tuple:
if os.path.basename(snake_case__ ) != name:
raise ValueError(F'Should be a dataset name, not a path: {name}' )
if not re.match(_split_re , snake_case__ ):
raise ValueError(F'Split name should match \'{_split_re}\'\' but got \'{split}\'.' )
return F'{filename_prefix_for_name(snake_case__ )}-{split}'
def a__ ( snake_case__ , snake_case__ , snake_case__ , snake_case__=None ) -> Union[str, Any]:
lowerCamelCase = filename_prefix_for_split(snake_case__ , snake_case__ )
if filetype_suffix:
prefix += F'.{filetype_suffix}'
lowerCamelCase = os.path.join(snake_case__ , snake_case__ )
return F'{filepath}*'
def a__ ( snake_case__ , snake_case__ , snake_case__ , snake_case__=None , snake_case__=None ) -> Tuple:
lowerCamelCase = filename_prefix_for_split(snake_case__ , snake_case__ )
lowerCamelCase = os.path.join(snake_case__ , snake_case__ )
if shard_lengths:
lowerCamelCase = len(snake_case__ )
lowerCamelCase = [F'{prefix}-{shard_id:05d}-of-{num_shards:05d}' for shard_id in range(snake_case__ )]
if filetype_suffix:
lowerCamelCase = [filename + F'.{filetype_suffix}' for filename in filenames]
return filenames
else:
lowerCamelCase = prefix
if filetype_suffix:
filename += F'.{filetype_suffix}'
return [filename]
| 168 |
"""simple docstring"""
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class __magic_name__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = 0
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = AutoImageProcessor.from_pretrained("""openai/clip-vit-base-patch32""" )
self.assertIsInstance(_a , _a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase = Path(_a ) / """preprocessor_config.json"""
lowerCamelCase = Path(_a ) / """config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(_a , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(_a , """w""" ) )
lowerCamelCase = AutoImageProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
# Ensure we can load the image processor from the feature extractor config
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase = Path(_a ) / """preprocessor_config.json"""
lowerCamelCase = Path(_a ) / """config.json"""
json.dump(
{"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} , open(_a , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(_a , """w""" ) )
lowerCamelCase = AutoImageProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase = CLIPConfig()
# Create a dummy config file with image_proceesor_type
lowerCamelCase = Path(_a ) / """preprocessor_config.json"""
lowerCamelCase = Path(_a ) / """config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(_a , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(_a , """w""" ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
lowerCamelCase = AutoImageProcessor.from_pretrained(_a ).to_dict()
config_dict.pop("""image_processor_type""" )
lowerCamelCase = CLIPImageProcessor(**_a )
# save in new folder
model_config.save_pretrained(_a )
config.save_pretrained(_a )
lowerCamelCase = AutoImageProcessor.from_pretrained(_a )
# make sure private variable is not incorrectly saved
lowerCamelCase = json.loads(config.to_json_string() )
self.assertTrue("""_processor_class""" not in dict_as_saved )
self.assertIsInstance(_a , _a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase = Path(_a ) / """preprocessor_config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(_a , """w""" ) , )
lowerCamelCase = AutoImageProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
with self.assertRaisesRegex(
_a , """clip-base is not a local folder and is not a valid model identifier""" ):
lowerCamelCase = AutoImageProcessor.from_pretrained("""clip-base""" )
def _lowerCAmelCase ( self ):
"""simple docstring"""
with self.assertRaisesRegex(
_a , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
lowerCamelCase = AutoImageProcessor.from_pretrained(_a , revision="""aaaaaa""" )
def _lowerCAmelCase ( self ):
"""simple docstring"""
with self.assertRaisesRegex(
_a , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ):
lowerCamelCase = AutoImageProcessor.from_pretrained("""hf-internal-testing/config-no-model""" )
def _lowerCAmelCase ( self ):
"""simple docstring"""
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(_a ):
lowerCamelCase = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_a ):
lowerCamelCase = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=_a )
lowerCamelCase = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=_a )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(_a )
lowerCamelCase = AutoImageProcessor.from_pretrained(_a , trust_remote_code=_a )
self.assertEqual(reloaded_image_processor.__class__.__name__ , """NewImageProcessor""" )
def _lowerCAmelCase ( self ):
"""simple docstring"""
try:
AutoConfig.register("""custom""" , _a )
AutoImageProcessor.register(_a , _a )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_a ):
AutoImageProcessor.register(_a , _a )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase = Path(_a ) / """preprocessor_config.json"""
lowerCamelCase = Path(_a ) / """config.json"""
json.dump(
{"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} , open(_a , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(_a , """w""" ) )
lowerCamelCase = CustomImageProcessor.from_pretrained(_a )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(_a )
lowerCamelCase = AutoImageProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def _lowerCAmelCase ( self ):
"""simple docstring"""
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = True
try:
AutoConfig.register("""custom""" , _a )
AutoImageProcessor.register(_a , _a )
# If remote code is not set, the default is to use local
lowerCamelCase = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
lowerCamelCase = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=_a )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
lowerCamelCase = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=_a )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(not hasattr(_a , """is_local""" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 168 | 1 |
'''simple docstring'''
from __future__ import annotations
from statistics import mean
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[int]:
snake_case__ : Optional[Any] = [0] * no_of_processes
snake_case__ : Union[str, Any] = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(SCREAMING_SNAKE_CASE_ ):
snake_case__ : List[str] = burst_time[i]
snake_case__ : List[Any] = []
snake_case__ : Tuple = 0
snake_case__ : List[str] = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
snake_case__ : Dict = []
snake_case__ : Optional[Any] = -1
for i in range(SCREAMING_SNAKE_CASE_ ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ) > 0:
snake_case__ : Optional[int] = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
snake_case__ : Union[str, Any] = i
total_time += burst_time[target_process]
completed += 1
snake_case__ : List[Any] = 0
snake_case__ : Tuple = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Any:
snake_case__ : Any = [0] * no_of_processes
for i in range(SCREAMING_SNAKE_CASE_ ):
snake_case__ : Optional[int] = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print("[TEST CASE 01]")
__a = 4
__a = [2, 5, 3, 7]
__a = [0, 0, 0, 0]
__a = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
__a = calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print("PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time")
for i, process_id in enumerate(list(range(1, 5))):
print(
F"{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t"
F"{waiting_time[i]}\t\t\t\t{turn_around_time[i]}"
)
print(F"\nAverage waiting time = {mean(waiting_time):.5f}")
print(F"Average turnaround time = {mean(turn_around_time):.5f}")
| 35 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
UpperCamelCase__ = {"""tokenization_wav2vec2_phoneme""": ["""Wav2Vec2PhonemeCTCTokenizer"""]}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 92 | 0 |
'''simple docstring'''
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
__lowerCAmelCase = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
__lowerCAmelCase = direct_transformers_import(PATH_TO_TRANSFORMERS)
__lowerCAmelCase = transformers.models.auto.configuration_auto.CONFIG_MAPPING
__lowerCAmelCase = {
# used to compute the property `self.chunk_length`
'EncodecConfig': ['overlap'],
# used as `self.bert_model = BertModel(config, ...)`
'DPRConfig': True,
# not used in modeling files, but it's an important information
'FSMTConfig': ['langs'],
# used internally in the configuration class file
'GPTNeoConfig': ['attention_types'],
# used internally in the configuration class file
'EsmConfig': ['is_folding_model'],
# used during training (despite we don't have training script for these models yet)
'Mask2FormerConfig': ['ignore_value'],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
'OneFormerConfig': ['ignore_value', 'norm'],
# used during preprocessing and collation, see `collating_graphormer.py`
'GraphormerConfig': ['spatial_pos_max'],
# used internally in the configuration class file
'T5Config': ['feed_forward_proj'],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
'MT5Config': ['feed_forward_proj', 'tokenizer_class'],
'UMT5Config': ['feed_forward_proj', 'tokenizer_class'],
# used internally in the configuration class file
'LongT5Config': ['feed_forward_proj'],
# used internally in the configuration class file
'SwitchTransformersConfig': ['feed_forward_proj'],
# having default values other than `1e-5` - we can't fix them without breaking
'BioGptConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'GLPNConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'SegformerConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'CvtConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'PerceiverConfig': ['layer_norm_eps'],
# used internally to calculate the feature size
'InformerConfig': ['num_static_real_features', 'num_time_features'],
# used internally to calculate the feature size
'TimeSeriesTransformerConfig': ['num_static_real_features', 'num_time_features'],
# used internally to calculate the feature size
'AutoformerConfig': ['num_static_real_features', 'num_time_features'],
# used internally to calculate `mlp_dim`
'SamVisionConfig': ['mlp_ratio'],
# For (head) training, but so far not implemented
'ClapAudioConfig': ['num_classes'],
# Not used, but providing useful information to users
'SpeechT5HifiGanConfig': ['sampling_rate'],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
'CLIPSegConfig': True,
'DeformableDetrConfig': True,
'DetaConfig': True,
'DinatConfig': True,
'DonutSwinConfig': True,
'EfficientFormerConfig': True,
'FSMTConfig': True,
'JukeboxConfig': True,
'LayoutLMv2Config': True,
'MaskFormerSwinConfig': True,
'MT5Config': True,
'NatConfig': True,
'OneFormerConfig': True,
'PerceiverConfig': True,
'RagConfig': True,
'SpeechT5Config': True,
'SwinConfig': True,
'Swin2SRConfig': True,
'Swinv2Config': True,
'SwitchTransformersConfig': True,
'TableTransformerConfig': True,
'TapasConfig': True,
'TransfoXLConfig': True,
'UniSpeechConfig': True,
'UniSpeechSatConfig': True,
'WavLMConfig': True,
'WhisperConfig': True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
'JukeboxPriorConfig': True,
# TODO: @Younes (for `is_decoder`)
'Pix2StructTextConfig': True,
}
)
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_snake_case = False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
f"""config.{attribute}""" in modeling_source
or f"""getattr(config, \"{attribute}\"""" in modeling_source
or f"""getattr(self.config, \"{attribute}\"""" in modeling_source
):
_snake_case = True
# Deal with multi-line cases
elif (
re.search(
Rf"""getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*\"{attribute}\"""" , _SCREAMING_SNAKE_CASE , )
is not None
):
_snake_case = True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
_snake_case = True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
_snake_case = [
"""bos_index""",
"""eos_index""",
"""pad_index""",
"""unk_index""",
"""mask_index""",
"""image_size""",
"""use_cache""",
"""out_features""",
"""out_indices""",
]
_snake_case = ["""encoder_no_repeat_ngram_size"""]
# Special cases to be allowed
_snake_case = True
if not attribute_used:
_snake_case = False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
_snake_case = True
elif attribute in ["tie_word_embeddings"] and default_value is False:
_snake_case = True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
_snake_case = True
elif attribute.endswith("""_token_id""" ):
_snake_case = True
# configuration class specific cases
if not case_allowed:
_snake_case = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] )
_snake_case = allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
_snake_case = dict(inspect.signature(config_class.__init__ ).parameters )
_snake_case = [x for x in list(signature.keys() ) if x not in ["""self""", """kwargs"""]]
_snake_case = [signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
_snake_case = {}
if len(config_class.attribute_map ) > 0:
_snake_case = {v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
_snake_case = inspect.getsourcefile(_SCREAMING_SNAKE_CASE )
_snake_case = os.path.dirname(_SCREAMING_SNAKE_CASE )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
_snake_case = [os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for fn in os.listdir(_SCREAMING_SNAKE_CASE ) if fn.startswith("""modeling_""" )]
# Get the source code strings
_snake_case = []
for path in modeling_paths:
if os.path.isfile(_SCREAMING_SNAKE_CASE ):
with open(_SCREAMING_SNAKE_CASE ) as fp:
modeling_sources.append(fp.read() )
_snake_case = []
for config_param, default_value in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
# `attributes` here is all the variant names for `config_param`
_snake_case = [config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
unused_attributes.append(attributes[0] )
return sorted(_SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( ):
_snake_case = {}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
_snake_case = [
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ) , lambda _SCREAMING_SNAKE_CASE : inspect.isclass(_SCREAMING_SNAKE_CASE )
and issubclass(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
and inspect.getmodule(_SCREAMING_SNAKE_CASE ) == inspect.getmodule(_config_class ) , )
]
for config_class in config_classes_in_module:
_snake_case = check_config_attributes_being_used(_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 0:
_snake_case = unused_attributes
if len(_SCREAMING_SNAKE_CASE ) > 0:
_snake_case = """The following configuration classes contain unused attributes in the corresponding modeling files:\n"""
for name, attributes in configs_with_unused_attributes.items():
error += f"""{name}: {attributes}\n"""
raise ValueError(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
check_config_attributes() | 270 |
'''simple docstring'''
from __future__ import annotations
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
_snake_case = len(_SCREAMING_SNAKE_CASE )
# We need to create solution object to save path.
_snake_case = [[0 for _ in range(_SCREAMING_SNAKE_CASE )] for _ in range(_SCREAMING_SNAKE_CASE )]
_snake_case = run_maze(_SCREAMING_SNAKE_CASE , 0 , 0 , _SCREAMING_SNAKE_CASE )
if solved:
print("""\n""".join(str(_SCREAMING_SNAKE_CASE ) for row in solutions ) )
else:
print("""No solution exists!""" )
return solved
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_snake_case = len(_SCREAMING_SNAKE_CASE )
# Final check point.
if i == j == (size - 1):
_snake_case = 1
return True
_snake_case = (not i < 0) and (not j < 0) # Check lower bounds
_snake_case = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
_snake_case = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
_snake_case = 1
# check for directions
if (
run_maze(_SCREAMING_SNAKE_CASE , i + 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
or run_maze(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , j + 1 , _SCREAMING_SNAKE_CASE )
or run_maze(_SCREAMING_SNAKE_CASE , i - 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
or run_maze(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , j - 1 , _SCREAMING_SNAKE_CASE )
):
return True
_snake_case = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod() | 270 | 1 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__UpperCAmelCase = {
'configuration_vivit': ['VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'VivitConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['VivitImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'VivitModel',
'VivitPreTrainedModel',
'VivitForVideoClassification',
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 29 |
from math import factorial
def lowerCamelCase__ (_UpperCAmelCase = 100):
return sum(int(_UpperCAmelCase) for x in str(factorial(_UpperCAmelCase)))
if __name__ == "__main__":
print(solution(int(input('Enter the Number: ').strip())))
| 137 | 0 |
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 368 |
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
# General docstring
SCREAMING_SNAKE_CASE_ = """RegNetConfig"""
# Base docstring
SCREAMING_SNAKE_CASE_ = """facebook/regnet-y-040"""
SCREAMING_SNAKE_CASE_ = [1, 1_0_8_8, 7, 7]
# Image classification docstring
SCREAMING_SNAKE_CASE_ = """facebook/regnet-y-040"""
SCREAMING_SNAKE_CASE_ = """tabby, tabby cat"""
SCREAMING_SNAKE_CASE_ = [
"""facebook/regnet-y-040""",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class UpperCamelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : str ,lowerCamelCase__ : int ,lowerCamelCase__ : int ,lowerCamelCase__ : int = 3 ,lowerCamelCase__ : int = 1 ,lowerCamelCase__ : int = 1 ,lowerCamelCase__ : Optional[str] = "relu" ,) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE = nn.Convad(
lowerCamelCase__ ,lowerCamelCase__ ,kernel_size=lowerCamelCase__ ,stride=lowerCamelCase__ ,padding=kernel_size // 2 ,groups=lowerCamelCase__ ,bias=lowerCamelCase__ ,)
SCREAMING_SNAKE_CASE = nn.BatchNormad(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = ACTaFN[activation] if activation is not None else nn.Identity()
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ,lowerCamelCase__ : Tuple ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.convolution(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.normalization(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.activation(lowerCamelCase__ )
return hidden_state
class UpperCamelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] ,lowerCamelCase__ : RegNetConfig ) -> List[str]:
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE = RegNetConvLayer(
config.num_channels ,config.embedding_size ,kernel_size=3 ,stride=2 ,activation=config.hidden_act )
SCREAMING_SNAKE_CASE = config.num_channels
def SCREAMING_SNAKE_CASE__ ( self : List[str] ,lowerCamelCase__ : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
"""Make sure that the channel dimension of the pixel values match with the one set in the configuration.""" )
SCREAMING_SNAKE_CASE = self.embedder(lowerCamelCase__ )
return hidden_state
class UpperCamelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] ,lowerCamelCase__ : int ,lowerCamelCase__ : int ,lowerCamelCase__ : int = 2 ) -> List[str]:
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE = nn.Convad(lowerCamelCase__ ,lowerCamelCase__ ,kernel_size=1 ,stride=lowerCamelCase__ ,bias=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = nn.BatchNormad(lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : str ,lowerCamelCase__ : Tensor ) -> Tensor:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.convolution(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.normalization(lowerCamelCase__ )
return hidden_state
class UpperCamelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] ,lowerCamelCase__ : int ,lowerCamelCase__ : int ) -> int:
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE = nn.AdaptiveAvgPoolad((1, 1) )
SCREAMING_SNAKE_CASE = nn.Sequential(
nn.Convad(lowerCamelCase__ ,lowerCamelCase__ ,kernel_size=1 ) ,nn.ReLU() ,nn.Convad(lowerCamelCase__ ,lowerCamelCase__ ,kernel_size=1 ) ,nn.Sigmoid() ,)
def SCREAMING_SNAKE_CASE__ ( self : List[str] ,lowerCamelCase__ : Any ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.pooler(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.attention(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = hidden_state * attention
return hidden_state
class UpperCamelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] ,lowerCamelCase__ : RegNetConfig ,lowerCamelCase__ : int ,lowerCamelCase__ : int ,lowerCamelCase__ : int = 1 ) -> str:
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE = in_channels != out_channels or stride != 1
SCREAMING_SNAKE_CASE = max(1 ,out_channels // config.groups_width )
SCREAMING_SNAKE_CASE = (
RegNetShortCut(lowerCamelCase__ ,lowerCamelCase__ ,stride=lowerCamelCase__ ) if should_apply_shortcut else nn.Identity()
)
SCREAMING_SNAKE_CASE = nn.Sequential(
RegNetConvLayer(lowerCamelCase__ ,lowerCamelCase__ ,kernel_size=1 ,activation=config.hidden_act ) ,RegNetConvLayer(lowerCamelCase__ ,lowerCamelCase__ ,stride=lowerCamelCase__ ,groups=lowerCamelCase__ ,activation=config.hidden_act ) ,RegNetConvLayer(lowerCamelCase__ ,lowerCamelCase__ ,kernel_size=1 ,activation=lowerCamelCase__ ) ,)
SCREAMING_SNAKE_CASE = ACTaFN[config.hidden_act]
def SCREAMING_SNAKE_CASE__ ( self : Dict ,lowerCamelCase__ : Optional[Any] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = hidden_state
SCREAMING_SNAKE_CASE = self.layer(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.shortcut(lowerCamelCase__ )
hidden_state += residual
SCREAMING_SNAKE_CASE = self.activation(lowerCamelCase__ )
return hidden_state
class UpperCamelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict ,lowerCamelCase__ : RegNetConfig ,lowerCamelCase__ : int ,lowerCamelCase__ : int ,lowerCamelCase__ : int = 1 ) -> Optional[int]:
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE = in_channels != out_channels or stride != 1
SCREAMING_SNAKE_CASE = max(1 ,out_channels // config.groups_width )
SCREAMING_SNAKE_CASE = (
RegNetShortCut(lowerCamelCase__ ,lowerCamelCase__ ,stride=lowerCamelCase__ ) if should_apply_shortcut else nn.Identity()
)
SCREAMING_SNAKE_CASE = nn.Sequential(
RegNetConvLayer(lowerCamelCase__ ,lowerCamelCase__ ,kernel_size=1 ,activation=config.hidden_act ) ,RegNetConvLayer(lowerCamelCase__ ,lowerCamelCase__ ,stride=lowerCamelCase__ ,groups=lowerCamelCase__ ,activation=config.hidden_act ) ,RegNetSELayer(lowerCamelCase__ ,reduced_channels=int(round(in_channels / 4 ) ) ) ,RegNetConvLayer(lowerCamelCase__ ,lowerCamelCase__ ,kernel_size=1 ,activation=lowerCamelCase__ ) ,)
SCREAMING_SNAKE_CASE = ACTaFN[config.hidden_act]
def SCREAMING_SNAKE_CASE__ ( self : Dict ,lowerCamelCase__ : Tuple ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = hidden_state
SCREAMING_SNAKE_CASE = self.layer(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.shortcut(lowerCamelCase__ )
hidden_state += residual
SCREAMING_SNAKE_CASE = self.activation(lowerCamelCase__ )
return hidden_state
class UpperCamelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : int ,lowerCamelCase__ : RegNetConfig ,lowerCamelCase__ : int ,lowerCamelCase__ : int ,lowerCamelCase__ : int = 2 ,lowerCamelCase__ : int = 2 ,) -> Tuple:
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE = RegNetXLayer if config.layer_type == """x""" else RegNetYLayer
SCREAMING_SNAKE_CASE = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,stride=lowerCamelCase__ ,) ,*[layer(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ) for _ in range(depth - 1 )] ,)
def SCREAMING_SNAKE_CASE__ ( self : Dict ,lowerCamelCase__ : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.layers(lowerCamelCase__ )
return hidden_state
class UpperCamelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] ,lowerCamelCase__ : RegNetConfig ) -> str:
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE = nn.ModuleList([] )
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
lowerCamelCase__ ,config.embedding_size ,config.hidden_sizes[0] ,stride=2 if config.downsample_in_first_stage else 1 ,depth=config.depths[0] ,) )
SCREAMING_SNAKE_CASE = zip(config.hidden_sizes ,config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(lowerCamelCase__ ,config.depths[1:] ):
self.stages.append(RegNetStage(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,depth=lowerCamelCase__ ) )
def SCREAMING_SNAKE_CASE__ ( self : int ,lowerCamelCase__ : Tensor ,lowerCamelCase__ : bool = False ,lowerCamelCase__ : bool = True ) -> BaseModelOutputWithNoAttention:
'''simple docstring'''
SCREAMING_SNAKE_CASE = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
SCREAMING_SNAKE_CASE = hidden_states + (hidden_state,)
SCREAMING_SNAKE_CASE = stage_module(lowerCamelCase__ )
if output_hidden_states:
SCREAMING_SNAKE_CASE = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=lowerCamelCase__ ,hidden_states=lowerCamelCase__ )
class UpperCamelCase__ ( lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : List[Any] = RegNetConfig
__snake_case : Union[str, Any] = "regnet"
__snake_case : Optional[Any] = "pixel_values"
__snake_case : List[Any] = True
def SCREAMING_SNAKE_CASE__ ( self : List[str] ,lowerCamelCase__ : int ) -> Any:
'''simple docstring'''
if isinstance(lowerCamelCase__ ,nn.Convad ):
nn.init.kaiming_normal_(module.weight ,mode="""fan_out""" ,nonlinearity="""relu""" )
elif isinstance(lowerCamelCase__ ,(nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight ,1 )
nn.init.constant_(module.bias ,0 )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : str=False ) -> str:
'''simple docstring'''
if isinstance(lowerCamelCase__ ,lowerCamelCase__ ):
SCREAMING_SNAKE_CASE = value
SCREAMING_SNAKE_CASE_ = r"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
SCREAMING_SNAKE_CASE_ = r"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
"The bare RegNet model outputting raw features without any specific head on top." , lowerCAmelCase_ , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class UpperCamelCase__ ( lowerCAmelCase_ ):
'''simple docstring'''
def __init__( self : str ,lowerCamelCase__ : str ) -> Any:
'''simple docstring'''
super().__init__(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = config
SCREAMING_SNAKE_CASE = RegNetEmbeddings(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = RegNetEncoder(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCamelCase__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC ,output_type=lowerCamelCase__ ,config_class=_CONFIG_FOR_DOC ,modality="""vision""" ,expected_output=_EXPECTED_OUTPUT_SHAPE ,)
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ,lowerCamelCase__ : Tensor ,lowerCamelCase__ : Optional[bool] = None ,lowerCamelCase__ : Optional[bool] = None ) -> BaseModelOutputWithPoolingAndNoAttention:
'''simple docstring'''
SCREAMING_SNAKE_CASE = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
SCREAMING_SNAKE_CASE = return_dict if return_dict is not None else self.config.use_return_dict
SCREAMING_SNAKE_CASE = self.embedder(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.encoder(
lowerCamelCase__ ,output_hidden_states=lowerCamelCase__ ,return_dict=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = encoder_outputs[0]
SCREAMING_SNAKE_CASE = self.pooler(lowerCamelCase__ )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowerCamelCase__ ,pooler_output=lowerCamelCase__ ,hidden_states=encoder_outputs.hidden_states ,)
@add_start_docstrings(
"\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , lowerCAmelCase_ , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class UpperCamelCase__ ( lowerCAmelCase_ ):
'''simple docstring'''
def __init__( self : Any ,lowerCamelCase__ : Optional[int] ) -> List[Any]:
'''simple docstring'''
super().__init__(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = config.num_labels
SCREAMING_SNAKE_CASE = RegNetModel(lowerCamelCase__ )
# classification head
SCREAMING_SNAKE_CASE = nn.Sequential(
nn.Flatten() ,nn.Linear(config.hidden_sizes[-1] ,config.num_labels ) if config.num_labels > 0 else nn.Identity() ,)
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCamelCase__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT ,output_type=lowerCamelCase__ ,config_class=_CONFIG_FOR_DOC ,expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT ,)
def SCREAMING_SNAKE_CASE__ ( self : str ,lowerCamelCase__ : Optional[torch.FloatTensor] = None ,lowerCamelCase__ : Optional[torch.LongTensor] = None ,lowerCamelCase__ : Optional[bool] = None ,lowerCamelCase__ : Optional[bool] = None ,) -> ImageClassifierOutputWithNoAttention:
'''simple docstring'''
SCREAMING_SNAKE_CASE = return_dict if return_dict is not None else self.config.use_return_dict
SCREAMING_SNAKE_CASE = self.regnet(lowerCamelCase__ ,output_hidden_states=lowerCamelCase__ ,return_dict=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = outputs.pooler_output if return_dict else outputs[1]
SCREAMING_SNAKE_CASE = self.classifier(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
SCREAMING_SNAKE_CASE = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
SCREAMING_SNAKE_CASE = """single_label_classification"""
else:
SCREAMING_SNAKE_CASE = """multi_label_classification"""
if self.config.problem_type == "regression":
SCREAMING_SNAKE_CASE = MSELoss()
if self.num_labels == 1:
SCREAMING_SNAKE_CASE = loss_fct(logits.squeeze() ,labels.squeeze() )
else:
SCREAMING_SNAKE_CASE = loss_fct(lowerCamelCase__ ,lowerCamelCase__ )
elif self.config.problem_type == "single_label_classification":
SCREAMING_SNAKE_CASE = CrossEntropyLoss()
SCREAMING_SNAKE_CASE = loss_fct(logits.view(-1 ,self.num_labels ) ,labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
SCREAMING_SNAKE_CASE = BCEWithLogitsLoss()
SCREAMING_SNAKE_CASE = loss_fct(lowerCamelCase__ ,lowerCamelCase__ )
if not return_dict:
SCREAMING_SNAKE_CASE = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=lowerCamelCase__ ,logits=lowerCamelCase__ ,hidden_states=outputs.hidden_states )
| 193 | 0 |
"""simple docstring"""
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def lowerCamelCase__ ( _lowerCamelCase : str ) -> str:
return "".join(sorted(_lowerCamelCase ) )
def lowerCamelCase__ ( _lowerCamelCase : str ) -> list[str]:
return word_by_signature[signature(_lowerCamelCase )]
_SCREAMING_SNAKE_CASE : str = Path(__file__).parent.joinpath('''words.txt''').read_text(encoding='''utf-8''')
_SCREAMING_SNAKE_CASE : List[Any] = sorted({word.strip().lower() for word in data.splitlines()})
_SCREAMING_SNAKE_CASE : str = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Tuple = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open('''anagrams.txt''', '''w''') as file:
file.write('''all_anagrams = \n ''')
file.write(pprint.pformat(all_anagrams))
| 183 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_lxmert import LxmertTokenizer
_SCREAMING_SNAKE_CASE : Union[str, Any] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''vocab_file''': {
'''unc-nlp/lxmert-base-uncased''': '''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt''',
},
'''tokenizer_file''': {
'''unc-nlp/lxmert-base-uncased''': (
'''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json'''
),
},
}
_SCREAMING_SNAKE_CASE : Any = {
'''unc-nlp/lxmert-base-uncased''': 512,
}
_SCREAMING_SNAKE_CASE : int = {
'''unc-nlp/lxmert-base-uncased''': {'''do_lower_case''': True},
}
class a ( __snake_case ):
SCREAMING_SNAKE_CASE : int = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE : Any = PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE : Optional[Any] = LxmertTokenizer
def __init__( self : str , __SCREAMING_SNAKE_CASE : Optional[Any]=None , __SCREAMING_SNAKE_CASE : Optional[Any]=None , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : List[Any]="[UNK]" , __SCREAMING_SNAKE_CASE : int="[SEP]" , __SCREAMING_SNAKE_CASE : int="[PAD]" , __SCREAMING_SNAKE_CASE : List[Any]="[CLS]" , __SCREAMING_SNAKE_CASE : Any="[MASK]" , __SCREAMING_SNAKE_CASE : List[str]=True , __SCREAMING_SNAKE_CASE : List[str]=None , **__SCREAMING_SNAKE_CASE : Any , ) -> Any:
super().__init__(
__SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , do_lower_case=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , tokenize_chinese_chars=__SCREAMING_SNAKE_CASE , strip_accents=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
lowerCamelCase_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , __SCREAMING_SNAKE_CASE ) != do_lower_case
or normalizer_state.get('strip_accents' , __SCREAMING_SNAKE_CASE ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , __SCREAMING_SNAKE_CASE ) != tokenize_chinese_chars
):
lowerCamelCase_ = getattr(__SCREAMING_SNAKE_CASE , normalizer_state.pop('type' ) )
lowerCamelCase_ = do_lower_case
lowerCamelCase_ = strip_accents
lowerCamelCase_ = tokenize_chinese_chars
lowerCamelCase_ = normalizer_class(**__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = do_lower_case
def UpperCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Tuple=None ) -> Dict:
lowerCamelCase_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase ( self : str , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None ) -> List[int]:
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[str] = None ) -> Tuple[str]:
lowerCamelCase_ = self._tokenizer.model.save(__SCREAMING_SNAKE_CASE , name=__SCREAMING_SNAKE_CASE )
return tuple(__SCREAMING_SNAKE_CASE )
| 183 | 1 |
"""simple docstring"""
import argparse
from copy import deepcopy
import numpy as np
from datasets import ClassLabel, DatasetDict, load_dataset
from evaluate import load
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
Trainer,
TrainerCallback,
TrainingArguments,
set_seed,
)
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : Dict = argparse.ArgumentParser()
parser.add_argument("""--model_ckpt""" , type=snake_case__ , default="""microsoft/unixcoder-base-nine""" )
parser.add_argument("""--num_epochs""" , type=snake_case__ , default=5 )
parser.add_argument("""--batch_size""" , type=snake_case__ , default=6 )
parser.add_argument("""--gradient_accumulation_steps""" , type=snake_case__ , default=1 )
parser.add_argument("""--freeze""" , type=snake_case__ , default=snake_case__ )
parser.add_argument("""--learning_rate""" , type=snake_case__ , default=5e-4 )
parser.add_argument("""--seed""" , type=snake_case__ , default=0 )
parser.add_argument("""--lr_scheduler_type""" , type=snake_case__ , default="""cosine""" )
parser.add_argument("""--num_warmup_steps""" , type=snake_case__ , default=10 )
parser.add_argument("""--weight_decay""" , type=snake_case__ , default=0.01 )
parser.add_argument("""--output_dir""" , type=snake_case__ , default="""./results""" )
return parser.parse_args()
A_ = load('''accuracy''')
def UpperCAmelCase__ (snake_case__ : Dict ):
"""simple docstring"""
_snake_case : Optional[int] = eval_pred
_snake_case : Tuple = np.argmax(snake_case__ , axis=1 )
return metric.compute(predictions=snake_case__ , references=snake_case__ )
class lowercase( __a ):
'''simple docstring'''
def __init__( self: List[Any], a_: Optional[int] ):
'''simple docstring'''
super().__init__()
_snake_case : Dict = trainer
def UpperCamelCase_ ( self: int, a_: Optional[Any], a_: Union[str, Any], a_: Any, **a_: Tuple ):
'''simple docstring'''
if control.should_evaluate:
_snake_case : Dict = deepcopy(a_ )
self._trainer.evaluate(eval_dataset=self._trainer.train_dataset, metric_key_prefix="""train""" )
return control_copy
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : List[Any] = get_args()
set_seed(args.seed )
_snake_case : str = load_dataset("""codeparrot/codecomplex""" , split="""train""" )
_snake_case : str = dataset.train_test_split(test_size=0.2 )
_snake_case : str = train_test["""test"""].train_test_split(test_size=0.5 )
_snake_case : str = DatasetDict(
{
"""train""": train_test["""train"""],
"""test""": test_validation["""train"""],
"""valid""": test_validation["""test"""],
} )
print("""Loading tokenizer and model""" )
_snake_case : Union[str, Any] = AutoTokenizer.from_pretrained(args.model_ckpt )
_snake_case : Optional[Any] = tokenizer.eos_token
_snake_case : List[str] = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt , num_labels=7 )
_snake_case : str = model.config.eos_token_id
if args.freeze:
for param in model.roberta.parameters():
_snake_case : Any = False
_snake_case : List[Any] = ClassLabel(num_classes=7 , names=list(set(train_test_validation["""train"""]["""complexity"""] ) ) )
def tokenize(snake_case__ : Union[str, Any] ):
_snake_case : Dict = tokenizer(example["""src"""] , truncation=snake_case__ , max_length=10_24 )
_snake_case : List[Any] = labels.straint(example["""complexity"""] )
return {
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"label": label,
}
_snake_case : Union[str, Any] = train_test_validation.map(
snake_case__ , batched=snake_case__ , remove_columns=train_test_validation["""train"""].column_names , )
_snake_case : List[str] = DataCollatorWithPadding(tokenizer=snake_case__ )
_snake_case : List[Any] = TrainingArguments(
output_dir=args.output_dir , learning_rate=args.learning_rate , lr_scheduler_type=args.lr_scheduler_type , evaluation_strategy="""epoch""" , save_strategy="""epoch""" , logging_strategy="""epoch""" , per_device_train_batch_size=args.batch_size , per_device_eval_batch_size=args.batch_size , num_train_epochs=args.num_epochs , gradient_accumulation_steps=args.gradient_accumulation_steps , weight_decay=0.01 , metric_for_best_model="""accuracy""" , run_name="""complexity-java""" , report_to="""wandb""" , )
_snake_case : int = Trainer(
model=snake_case__ , args=snake_case__ , train_dataset=tokenized_datasets["""train"""] , eval_dataset=tokenized_datasets["""valid"""] , tokenizer=snake_case__ , data_collator=snake_case__ , compute_metrics=snake_case__ , )
print("""Training...""" )
trainer.add_callback(CustomCallback(snake_case__ ) )
trainer.train()
if __name__ == "__main__":
main()
| 359 |
"""simple docstring"""
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
A_ = (
'''https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py'''
)
A_ = logging.get_logger(__name__) # pylint: disable=invalid-name
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : Tuple = """https://pypi.org/pypi/diffusers/json"""
_snake_case : Optional[int] = json.loads(request.urlopen(snake_case__ ).read() )["""releases"""].keys()
return sorted(snake_case__ , key=lambda snake_case__ : version.Version(snake_case__ ) )
def UpperCAmelCase__ ():
"""simple docstring"""
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(snake_case__ )
os.makedirs(snake_case__ , exist_ok=snake_case__ )
_snake_case : str = Path(snake_case__ ) / """__init__.py"""
if not init_path.exists():
init_path.touch()
def UpperCAmelCase__ (snake_case__ : Union[str, os.PathLike] ):
"""simple docstring"""
init_hf_modules()
_snake_case : List[Any] = Path(snake_case__ ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(snake_case__ , exist_ok=snake_case__ )
_snake_case : List[Any] = dynamic_module_path / """__init__.py"""
if not init_path.exists():
init_path.touch()
def UpperCAmelCase__ (snake_case__ : Optional[Any] ):
"""simple docstring"""
with open(snake_case__ , """r""" , encoding="""utf-8""" ) as f:
_snake_case : Union[str, Any] = f.read()
# Imports of the form `import .xxx`
_snake_case : Tuple = re.findall("""^\s*import\s+\.(\S+)\s*$""" , snake_case__ , flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall("""^\s*from\s+\.(\S+)\s+import""" , snake_case__ , flags=re.MULTILINE )
# Unique-ify
return list(set(snake_case__ ) )
def UpperCAmelCase__ (snake_case__ : Tuple ):
"""simple docstring"""
_snake_case : Tuple = False
_snake_case : Any = [module_file]
_snake_case : str = []
# Let's recurse through all relative imports
while not no_change:
_snake_case : Dict = []
for f in files_to_check:
new_imports.extend(get_relative_imports(snake_case__ ) )
_snake_case : Dict = Path(snake_case__ ).parent
_snake_case : Dict = [str(module_path / m ) for m in new_imports]
_snake_case : Tuple = [f for f in new_import_files if f not in all_relative_imports]
_snake_case : str = [F"{f}.py" for f in new_import_files]
_snake_case : Dict = len(snake_case__ ) == 0
all_relative_imports.extend(snake_case__ )
return all_relative_imports
def UpperCAmelCase__ (snake_case__ : Tuple ):
"""simple docstring"""
with open(snake_case__ , """r""" , encoding="""utf-8""" ) as f:
_snake_case : int = f.read()
# Imports of the form `import xxx`
_snake_case : Tuple = re.findall("""^\s*import\s+(\S+)\s*$""" , snake_case__ , flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall("""^\s*from\s+(\S+)\s+import""" , snake_case__ , flags=re.MULTILINE )
# Only keep the top-level module
_snake_case : Tuple = [imp.split(""".""" )[0] for imp in imports if not imp.startswith(""".""" )]
# Unique-ify and test we got them all
_snake_case : Any = list(set(snake_case__ ) )
_snake_case : int = []
for imp in imports:
try:
importlib.import_module(snake_case__ )
except ImportError:
missing_packages.append(snake_case__ )
if len(snake_case__ ) > 0:
raise ImportError(
"""This modeling file requires the following packages that were not found in your environment: """
F"{', '.join(snake_case__ )}. Run `pip install {' '.join(snake_case__ )}`" )
return get_relative_imports(snake_case__ )
def UpperCAmelCase__ (snake_case__ : int , snake_case__ : Tuple ):
"""simple docstring"""
_snake_case : List[Any] = module_path.replace(os.path.sep , """.""" )
_snake_case : int = importlib.import_module(snake_case__ )
if class_name is None:
return find_pipeline_class(snake_case__ )
return getattr(snake_case__ , snake_case__ )
def UpperCAmelCase__ (snake_case__ : List[Any] ):
"""simple docstring"""
from ..pipelines import DiffusionPipeline
_snake_case : Tuple = dict(inspect.getmembers(snake_case__ , inspect.isclass ) )
_snake_case : Dict = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls , snake_case__ )
and cls.__module__.split(""".""" )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
F"Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:"
F" {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in"
F" {loaded_module}." )
_snake_case : List[str] = cls
return pipeline_class
def UpperCAmelCase__ (snake_case__ : Union[str, os.PathLike] , snake_case__ : str , snake_case__ : Optional[Union[str, os.PathLike]] = None , snake_case__ : bool = False , snake_case__ : bool = False , snake_case__ : Optional[Dict[str, str]] = None , snake_case__ : Optional[Union[bool, str]] = None , snake_case__ : Optional[str] = None , snake_case__ : bool = False , ):
"""simple docstring"""
_snake_case : List[str] = str(snake_case__ )
_snake_case : Optional[Any] = os.path.join(snake_case__ , snake_case__ )
if os.path.isfile(snake_case__ ):
_snake_case : List[str] = module_file_or_url
_snake_case : Optional[Any] = """local"""
elif pretrained_model_name_or_path.count("""/""" ) == 0:
_snake_case : Tuple = get_diffusers_versions()
# cut ".dev0"
_snake_case : Union[str, Any] = """v""" + """.""".join(__version__.split(""".""" )[:3] )
# retrieve github version that matches
if revision is None:
_snake_case : int = latest_version if latest_version[1:] in available_versions else """main"""
logger.info(F"Defaulting to latest_version: {revision}." )
elif revision in available_versions:
_snake_case : Optional[Any] = F"v{revision}"
elif revision == "main":
_snake_case : int = revision
else:
raise ValueError(
F"`custom_revision`: {revision} does not exist. Please make sure to choose one of"
F" {', '.join(available_versions + ['main'] )}." )
# community pipeline on GitHub
_snake_case : List[str] = COMMUNITY_PIPELINES_URL.format(revision=snake_case__ , pipeline=snake_case__ )
try:
_snake_case : Dict = cached_download(
snake_case__ , cache_dir=snake_case__ , force_download=snake_case__ , proxies=snake_case__ , resume_download=snake_case__ , local_files_only=snake_case__ , use_auth_token=snake_case__ , )
_snake_case : Union[str, Any] = """git"""
_snake_case : str = pretrained_model_name_or_path + """.py"""
except EnvironmentError:
logger.error(F"Could not locate the {module_file} inside {pretrained_model_name_or_path}." )
raise
else:
try:
# Load from URL or cache if already cached
_snake_case : str = hf_hub_download(
snake_case__ , snake_case__ , cache_dir=snake_case__ , force_download=snake_case__ , proxies=snake_case__ , resume_download=snake_case__ , local_files_only=snake_case__ , use_auth_token=snake_case__ , )
_snake_case : Optional[Any] = os.path.join("""local""" , """--""".join(pretrained_model_name_or_path.split("""/""" ) ) )
except EnvironmentError:
logger.error(F"Could not locate the {module_file} inside {pretrained_model_name_or_path}." )
raise
# Check we have all the requirements in our environment
_snake_case : int = check_imports(snake_case__ )
# Now we move the module inside our cached dynamic modules.
_snake_case : Optional[int] = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(snake_case__ )
_snake_case : Any = Path(snake_case__ ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(snake_case__ , submodule_path / module_file )
for module_needed in modules_needed:
_snake_case : Any = F"{module_needed}.py"
shutil.copy(os.path.join(snake_case__ , snake_case__ ) , submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(snake_case__ , snake_case__ ):
_snake_case : Any = use_auth_token
elif use_auth_token is True:
_snake_case : int = HfFolder.get_token()
else:
_snake_case : Optional[int] = None
_snake_case : int = model_info(snake_case__ , revision=snake_case__ , token=snake_case__ ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
_snake_case : int = submodule_path / commit_hash
_snake_case : Optional[int] = full_submodule + os.path.sep + commit_hash
create_dynamic_module(snake_case__ )
if not (submodule_path / module_file).exists():
shutil.copy(snake_case__ , submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
snake_case__ , F"{module_needed}.py" , cache_dir=snake_case__ , force_download=snake_case__ , resume_download=snake_case__ , proxies=snake_case__ , use_auth_token=snake_case__ , revision=snake_case__ , local_files_only=snake_case__ , )
return os.path.join(snake_case__ , snake_case__ )
def UpperCAmelCase__ (snake_case__ : Union[str, os.PathLike] , snake_case__ : str , snake_case__ : Optional[str] = None , snake_case__ : Optional[Union[str, os.PathLike]] = None , snake_case__ : bool = False , snake_case__ : bool = False , snake_case__ : Optional[Dict[str, str]] = None , snake_case__ : Optional[Union[bool, str]] = None , snake_case__ : Optional[str] = None , snake_case__ : bool = False , **snake_case__ : Tuple , ):
"""simple docstring"""
_snake_case : Union[str, Any] = get_cached_module_file(
snake_case__ , snake_case__ , cache_dir=snake_case__ , force_download=snake_case__ , resume_download=snake_case__ , proxies=snake_case__ , use_auth_token=snake_case__ , revision=snake_case__ , local_files_only=snake_case__ , )
return get_class_in_module(snake_case__ , final_module.replace(""".py""" , """""" ) )
| 132 | 0 |
"""simple docstring"""
def _A (__a , __a ) -> Union[str, Any]:
"""simple docstring"""
return int((input_a, input_a).count(0 ) != 0 )
def _A () -> str:
"""simple docstring"""
assert nand_gate(0 , 0 ) == 1
assert nand_gate(0 , 1 ) == 1
assert nand_gate(1 , 0 ) == 1
assert nand_gate(1 , 1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1))
| 91 |
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=99 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=512 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=None , ) -> Dict:
UpperCamelCase :Any = parent
UpperCamelCase :Dict = 13
UpperCamelCase :List[Any] = 7
UpperCamelCase :List[Any] = True
UpperCamelCase :Dict = True
UpperCamelCase :Union[str, Any] = True
UpperCamelCase :List[str] = True
UpperCamelCase :Dict = 99
UpperCamelCase :Any = 32
UpperCamelCase :Tuple = 2
UpperCamelCase :Union[str, Any] = 4
UpperCamelCase :List[str] = 37
UpperCamelCase :Dict = '''gelu'''
UpperCamelCase :Dict = 0.1
UpperCamelCase :Tuple = 0.1
UpperCamelCase :Dict = 512
UpperCamelCase :str = 16
UpperCamelCase :Optional[Any] = 2
UpperCamelCase :Dict = 0.02
UpperCamelCase :Optional[int] = 3
UpperCamelCase :int = 4
UpperCamelCase :Dict = None
def UpperCAmelCase ( self ) -> Tuple:
UpperCamelCase :Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase :Optional[int] = None
if self.use_input_mask:
UpperCamelCase :Dict = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase :Dict = None
if self.use_token_type_ids:
UpperCamelCase :List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase :Union[str, Any] = None
UpperCamelCase :Optional[int] = None
UpperCamelCase :Any = None
if self.use_labels:
UpperCamelCase :Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase :Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase :int = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase :Union[str, Any] = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=SCREAMING_SNAKE_CASE_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
UpperCamelCase :Optional[Any] = TFRoFormerModel(config=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Dict = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
UpperCamelCase :int = [input_ids, input_mask]
UpperCamelCase :List[Any] = model(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :int = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int:
UpperCamelCase :List[Any] = True
UpperCamelCase :Union[str, Any] = TFRoFormerForCausalLM(config=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :str = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
UpperCamelCase :Any = model(SCREAMING_SNAKE_CASE_ )['''logits''']
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
UpperCamelCase :str = TFRoFormerForMaskedLM(config=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[Any] = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
UpperCamelCase :List[Any] = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[str]:
UpperCamelCase :List[Any] = self.num_labels
UpperCamelCase :int = TFRoFormerForSequenceClassification(config=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :str = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
UpperCamelCase :Optional[Any] = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int:
UpperCamelCase :List[Any] = self.num_choices
UpperCamelCase :Any = TFRoFormerForMultipleChoice(config=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Dict = tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE_ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase :int = tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE_ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase :Any = tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE_ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase :List[Any] = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
UpperCamelCase :Dict = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Tuple:
UpperCamelCase :Union[str, Any] = self.num_labels
UpperCamelCase :Dict = TFRoFormerForTokenClassification(config=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :str = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
UpperCamelCase :Tuple = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
UpperCamelCase :Union[str, Any] = TFRoFormerForQuestionAnswering(config=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Dict = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
UpperCamelCase :List[Any] = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase ( self ) -> Tuple:
UpperCamelCase :Optional[int] = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) :Union[str, Any] = config_and_inputs
UpperCamelCase :Any = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( lowercase, lowercase, unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : str =(
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
UpperCamelCase_ : Tuple =(
{
'feature-extraction': TFRoFormerModel,
'fill-mask': TFRoFormerForMaskedLM,
'question-answering': TFRoFormerForQuestionAnswering,
'text-classification': TFRoFormerForSequenceClassification,
'text-generation': TFRoFormerForCausalLM,
'token-classification': TFRoFormerForTokenClassification,
'zero-shot': TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase_ : Tuple =False
UpperCamelCase_ : Optional[Any] =False
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase :Any = TFRoFormerModelTester(self )
UpperCamelCase :Optional[int] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , hidden_size=37 )
def UpperCAmelCase ( self ) -> List[str]:
self.config_tester.run_common_tests()
def UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> str:
UpperCamelCase :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase :Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> Dict:
UpperCamelCase :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*SCREAMING_SNAKE_CASE_ )
@slow
def UpperCAmelCase ( self ) -> Dict:
UpperCamelCase :Dict = TFRoFormerModel.from_pretrained('''junnyu/roformer_chinese_base''' )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase ( self ) -> Dict:
UpperCamelCase :Tuple = TFRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''' )
UpperCamelCase :Union[str, Any] = tf.constant([[0, 1, 2, 3, 4, 5]] )
UpperCamelCase :str = model(SCREAMING_SNAKE_CASE_ )[0]
# TODO Replace vocab size
UpperCamelCase :Tuple = 5_0000
UpperCamelCase :Optional[Any] = [1, 6, vocab_size]
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE_ )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
UpperCamelCase :int = tf.constant(
[
[
[-0.1205_3341, -1.026_4901, 0.2922_1946],
[-1.513_3783, 0.19_7433, 0.1519_0607],
[-5.013_5403, -3.90_0256, -0.8403_8764],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 )
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : Optional[int] =1E-4
def UpperCAmelCase ( self ) -> Dict:
UpperCamelCase :str = tf.constant([[4, 10]] )
UpperCamelCase :List[Any] = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
UpperCamelCase :str = emba(input_ids.shape )
UpperCamelCase :List[str] = tf.constant(
[[0.0000, 0.0000, 0.0000, 1.0000, 1.0000, 1.0000], [0.8415, 0.0464, 0.0022, 0.5403, 0.9989, 1.0000]] )
tf.debugging.assert_near(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=self.tolerance )
def UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase :Dict = tf.constant(
[
[0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
[0.8415, 0.8219, 0.8020, 0.7819, 0.7617],
[0.9093, 0.9364, 0.9581, 0.9749, 0.9870],
] )
UpperCamelCase :Dict = TFRoFormerSinusoidalPositionalEmbedding(num_positions=512 , embedding_dim=512 )
emba([2, 16, 512] )
UpperCamelCase :Any = emba.weight[:3, :5]
tf.debugging.assert_near(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=self.tolerance )
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : List[Any] =1E-4
def UpperCAmelCase ( self ) -> List[str]:
# 2,12,16,64
UpperCamelCase :List[Any] = tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
UpperCamelCase :List[Any] = -tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
UpperCamelCase :List[Any] = TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 )
UpperCamelCase :int = embed_positions([2, 16, 768] )[None, None, :, :]
UpperCamelCase , UpperCamelCase :List[str] = TFRoFormerSelfAttention.apply_rotary_position_embeddings(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase :str = tf.constant(
[
[0.0000, 0.0100, 0.0200, 0.0300, 0.0400, 0.0500, 0.0600, 0.0700],
[-0.2012, 0.8897, 0.0263, 0.9401, 0.2074, 0.9463, 0.3481, 0.9343],
[-1.7057, 0.6271, -1.2145, 1.3897, -0.6303, 1.7647, -0.1173, 1.8985],
[-2.1731, -1.6397, -2.7358, 0.2854, -2.1840, 1.7183, -1.3018, 2.4871],
[0.2717, -3.6173, -2.9206, -2.1988, -3.6638, 0.3858, -2.9155, 2.2980],
[3.9859, -2.1580, -0.7984, -4.4904, -4.1181, -2.0252, -4.4782, 1.1253],
] )
UpperCamelCase :Optional[int] = tf.constant(
[
[0.0000, -0.0100, -0.0200, -0.0300, -0.0400, -0.0500, -0.0600, -0.0700],
[0.2012, -0.8897, -0.0263, -0.9401, -0.2074, -0.9463, -0.3481, -0.9343],
[1.7057, -0.6271, 1.2145, -1.3897, 0.6303, -1.7647, 0.1173, -1.8985],
[2.1731, 1.6397, 2.7358, -0.2854, 2.1840, -1.7183, 1.3018, -2.4871],
[-0.2717, 3.6173, 2.9206, 2.1988, 3.6638, -0.3858, 2.9155, -2.2980],
[-3.9859, 2.1580, 0.7984, 4.4904, 4.1181, 2.0252, 4.4782, -1.1253],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , SCREAMING_SNAKE_CASE_ , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , SCREAMING_SNAKE_CASE_ , atol=self.tolerance )
| 259 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
lowerCAmelCase_ = TypeVar('T')
class __A ( Generic[T] ):
'''simple docstring'''
def __init__( self : List[Any] ,_snake_case : T ) -> Tuple:
"""simple docstring"""
lowercase__ : Tuple = data
lowercase__ : Node[T] | None = None
def __str__( self : List[Any] ) -> str:
"""simple docstring"""
return f"""{self.data}"""
class __A ( Generic[T] ):
'''simple docstring'''
def __init__( self : int ) -> None:
"""simple docstring"""
lowercase__ : Node[T] | None = None
def __iter__( self : Tuple ) -> Iterator[T]:
"""simple docstring"""
lowercase__ : Tuple = self.top
while node:
yield node.data
lowercase__ : Optional[Any] = node.next
def __str__( self : int ) -> str:
"""simple docstring"""
return "->".join([str(_snake_case ) for item in self] )
def __len__( self : Dict ) -> int:
"""simple docstring"""
return len(tuple(iter(self ) ) )
def UpperCAmelCase ( self : List[str] ) -> bool:
"""simple docstring"""
return self.top is None
def UpperCAmelCase ( self : int ,_snake_case : T ) -> None:
"""simple docstring"""
lowercase__ : Optional[int] = Node(_snake_case )
if not self.is_empty():
lowercase__ : Tuple = self.top
lowercase__ : Union[str, Any] = node
def UpperCAmelCase ( self : List[str] ) -> T:
"""simple docstring"""
if self.is_empty():
raise IndexError('''pop from empty stack''' )
assert isinstance(self.top ,_snake_case )
lowercase__ : List[str] = self.top
lowercase__ : Union[str, Any] = self.top.next
return pop_node.data
def UpperCAmelCase ( self : Dict ) -> T:
"""simple docstring"""
if self.is_empty():
raise IndexError('''peek from empty stack''' )
assert self.top is not None
return self.top.data
def UpperCAmelCase ( self : Tuple ) -> None:
"""simple docstring"""
lowercase__ : Any = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 302 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : UNetaDModel
lowerCAmelCase : ScoreSdeVeScheduler
def __init__( self : Optional[Any] ,_snake_case : UNetaDModel ,_snake_case : ScoreSdeVeScheduler ) -> str:
"""simple docstring"""
super().__init__()
self.register_modules(unet=_snake_case ,scheduler=_snake_case )
@torch.no_grad()
def __call__( self : Any ,_snake_case : int = 1 ,_snake_case : int = 2_000 ,_snake_case : Optional[Union[torch.Generator, List[torch.Generator]]] = None ,_snake_case : Optional[str] = "pil" ,_snake_case : bool = True ,**_snake_case : Any ,) -> Union[ImagePipelineOutput, Tuple]:
"""simple docstring"""
lowercase__ : Optional[Any] = self.unet.config.sample_size
lowercase__ : Dict = (batch_size, 3, img_size, img_size)
lowercase__ : Tuple = self.unet
lowercase__ : Any = randn_tensor(_snake_case ,generator=_snake_case ) * self.scheduler.init_noise_sigma
lowercase__ : Union[str, Any] = sample.to(self.device )
self.scheduler.set_timesteps(_snake_case )
self.scheduler.set_sigmas(_snake_case )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
lowercase__ : Tuple = self.scheduler.sigmas[i] * torch.ones(shape[0] ,device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
lowercase__ : List[str] = self.unet(_snake_case ,_snake_case ).sample
lowercase__ : Optional[Any] = self.scheduler.step_correct(_snake_case ,_snake_case ,generator=_snake_case ).prev_sample
# prediction step
lowercase__ : str = model(_snake_case ,_snake_case ).sample
lowercase__ : List[Any] = self.scheduler.step_pred(_snake_case ,_snake_case ,_snake_case ,generator=_snake_case )
lowercase__ , lowercase__ : Optional[int] = output.prev_sample, output.prev_sample_mean
lowercase__ : Union[str, Any] = sample_mean.clamp(0 ,1 )
lowercase__ : int = sample.cpu().permute(0 ,2 ,3 ,1 ).numpy()
if output_type == "pil":
lowercase__ : Any = self.numpy_to_pil(_snake_case )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=_snake_case )
| 302 | 1 |
import unittest
from knapsack import knapsack as k
class _A ( unittest.TestCase):
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = 0
SCREAMING_SNAKE_CASE_ : Dict = [0]
SCREAMING_SNAKE_CASE_ : List[str] = [0]
SCREAMING_SNAKE_CASE_ : int = len(_SCREAMING_SNAKE_CASE )
self.assertEqual(k.knapsack(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , 0 )
SCREAMING_SNAKE_CASE_ : Optional[int] = [60]
SCREAMING_SNAKE_CASE_ : List[Any] = [10]
SCREAMING_SNAKE_CASE_ : Optional[int] = len(_SCREAMING_SNAKE_CASE )
self.assertEqual(k.knapsack(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , 0 )
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = 3
SCREAMING_SNAKE_CASE_ : List[str] = [1, 2, 3]
SCREAMING_SNAKE_CASE_ : Tuple = [3, 2, 1]
SCREAMING_SNAKE_CASE_ : Tuple = len(_SCREAMING_SNAKE_CASE )
self.assertEqual(k.knapsack(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , 5 )
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = 50
SCREAMING_SNAKE_CASE_ : List[Any] = [60, 100, 120]
SCREAMING_SNAKE_CASE_ : List[Any] = [10, 20, 30]
SCREAMING_SNAKE_CASE_ : str = len(_SCREAMING_SNAKE_CASE )
self.assertEqual(k.knapsack(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , 220 )
if __name__ == "__main__":
unittest.main()
| 253 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase : Any = {'vocab_file': 'spiece.model'}
lowerCAmelCase : Tuple = {
'vocab_file': {
'bert_for_seq_generation': (
'https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model'
),
}
}
lowerCAmelCase : Optional[int] = {'bert_for_seq_generation': 5_12}
class _A ( __magic_name__):
SCREAMING_SNAKE_CASE : Dict = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE : List[int] = []
SCREAMING_SNAKE_CASE : Dict = ['''input_ids''', '''attention_mask''']
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="<s>" , _SCREAMING_SNAKE_CASE="</s>" , _SCREAMING_SNAKE_CASE="<unk>" , _SCREAMING_SNAKE_CASE="<pad>" , _SCREAMING_SNAKE_CASE="<::::>" , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs
# Add extra_ids to the special token list
super().__init__(
bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **_SCREAMING_SNAKE_CASE , )
SCREAMING_SNAKE_CASE_ : List[str] = vocab_file
SCREAMING_SNAKE_CASE_ : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_SCREAMING_SNAKE_CASE )
@property
def UpperCAmelCase ( self ):
"""simple docstring"""
return self.sp_model.get_piece_size()
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = {self.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = self.__dict__.copy()
SCREAMING_SNAKE_CASE_ : List[Any] = None
return state
def __setstate__( self , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
SCREAMING_SNAKE_CASE_ : Dict = {}
SCREAMING_SNAKE_CASE_ : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.sp_model.encode(_SCREAMING_SNAKE_CASE , out_type=_SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.sp_model.piece_to_id(_SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = self.sp_model.IdToPiece(_SCREAMING_SNAKE_CASE )
return token
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = []
SCREAMING_SNAKE_CASE_ : Optional[int] = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(_SCREAMING_SNAKE_CASE ) + token
SCREAMING_SNAKE_CASE_ : Optional[int] = []
else:
current_sub_tokens.append(_SCREAMING_SNAKE_CASE )
out_string += self.sp_model.decode(_SCREAMING_SNAKE_CASE )
return out_string.strip()
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
SCREAMING_SNAKE_CASE_ : Optional[Any] = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(_SCREAMING_SNAKE_CASE , 'wb' ) as fi:
SCREAMING_SNAKE_CASE_ : List[Any] = self.sp_model.serialized_model_proto()
fi.write(_SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 253 | 1 |
'''simple docstring'''
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
_lowerCAmelCase = Lock()
def UpperCamelCase ( a , a , a , a , a , a , a ) -> int:
'''simple docstring'''
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(a )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
__magic_name__ =rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
__magic_name__ =min(a , a )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(a )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
__magic_name__ =lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
__magic_name__ =max(a , a )
# after all swaps are performed, send the values back to main
result_pipe[1].send(a )
def UpperCamelCase ( a ) -> int:
'''simple docstring'''
__magic_name__ =[]
__magic_name__ =[]
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
__magic_name__ =Pipe()
__magic_name__ =Pipe()
process_array_.append(
Process(
target=a , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
__magic_name__ =temp_rs
__magic_name__ =temp_rr
for i in range(1 , len(a ) - 1 ):
__magic_name__ =Pipe()
__magic_name__ =Pipe()
process_array_.append(
Process(
target=a , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
__magic_name__ =temp_rs
__magic_name__ =temp_rr
process_array_.append(
Process(
target=a , args=(
len(a ) - 1,
arr[len(a ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(a ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(a ) ):
__magic_name__ =result_pipe[p][0].recv()
process_array_[p].join()
return arr
def UpperCamelCase ( ) -> List[Any]:
'''simple docstring'''
__magic_name__ =list(range(10 , 0 , -1 ) )
print('''Initial List''' )
print(*a )
__magic_name__ =odd_even_transposition(a )
print('''Sorted List\n''' )
print(*a )
if __name__ == "__main__":
main()
| 351 |
'''simple docstring'''
from pathlib import Path
import fire
from tqdm import tqdm
def UpperCamelCase ( a="ro" , a="en" , a="wmt16" , a=None ) -> None:
'''simple docstring'''
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError('''run pip install datasets''' )
__magic_name__ = F'''{src_lang}-{tgt_lang}'''
print(F'''Converting {dataset}-{pair}''' )
__magic_name__ = datasets.load_dataset(a , a )
if save_dir is None:
__magic_name__ = F'''{dataset}-{pair}'''
__magic_name__ = Path(a )
save_dir.mkdir(exist_ok=a )
for split in ds.keys():
print(F'''Splitting {split} with {ds[split].num_rows} records''' )
# to save to val.source, val.target like summary datasets
__magic_name__ = '''val''' if split == '''validation''' else split
__magic_name__ = save_dir.joinpath(F'''{fn}.source''' )
__magic_name__ = save_dir.joinpath(F'''{fn}.target''' )
__magic_name__ = src_path.open('''w+''' )
__magic_name__ = tgt_path.open('''w+''' )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
__magic_name__ = x['''translation''']
src_fp.write(ex[src_lang] + '''\n''' )
tgt_fp.write(ex[tgt_lang] + '''\n''' )
print(F'''Saved {dataset} dataset to {save_dir}''' )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset)
| 98 | 0 |
"""simple docstring"""
from ... import PretrainedConfig
_UpperCamelCase : List[str] = {
'sijunhe/nezha-cn-base': 'https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json',
}
class a ( a_ ):
UpperCAmelCase_ : int =NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
UpperCAmelCase_ : List[str] ="nezha"
def __init__( self , _lowerCamelCase=2_1_1_2_8 , _lowerCamelCase=7_6_8 , _lowerCamelCase=1_2 , _lowerCamelCase=1_2 , _lowerCamelCase=3_0_7_2 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=5_1_2 , _lowerCamelCase=6_4 , _lowerCamelCase=2 , _lowerCamelCase=0.0_2 , _lowerCamelCase=1e-12 , _lowerCamelCase=0.1 , _lowerCamelCase=0 , _lowerCamelCase=2 , _lowerCamelCase=3 , _lowerCamelCase=True , **_lowerCamelCase , ):
super().__init__(pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , **_lowerCamelCase )
lowercase = vocab_size
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = hidden_act
lowercase = intermediate_size
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = max_position_embeddings
lowercase = max_relative_position
lowercase = type_vocab_size
lowercase = initializer_range
lowercase = layer_norm_eps
lowercase = classifier_dropout
lowercase = use_cache
| 220 |
"""simple docstring"""
# limitations under the License.
from typing import Optional, Tuple, Union
import torch
from diffusers import DiffusionPipeline, ImagePipelineOutput
class a ( a_ ):
def __init__( self , _lowerCamelCase , _lowerCamelCase ):
super().__init__()
self.register_modules(unet=_lowerCamelCase , scheduler=_lowerCamelCase )
@torch.no_grad()
def __call__( self , _lowerCamelCase = 1 , _lowerCamelCase = None , _lowerCamelCase = 5_0 , _lowerCamelCase = "pil" , _lowerCamelCase = True , **_lowerCamelCase , ):
lowercase = torch.randn(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=_lowerCamelCase , )
lowercase = image.to(self.device )
# set step values
self.scheduler.set_timesteps(_lowerCamelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
lowercase = self.unet(_lowerCamelCase , _lowerCamelCase ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
lowercase = self.scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ).prev_sample
lowercase = (image / 2 + 0.5).clamp(0 , 1 )
lowercase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowercase = self.numpy_to_pil(_lowerCamelCase )
if not return_dict:
return (image,), "This is a local test"
return ImagePipelineOutput(images=_lowerCamelCase ), "This is a local test"
| 220 | 1 |
"""simple docstring"""
def lowercase__ ( lowercase_ ) -> None:
"""simple docstring"""
_UpperCamelCase : Optional[Any] = generate_pascal_triangle(lowercase_ )
for row_idx in range(lowercase_ ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=" " )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] ,end=" " )
else:
print(triangle[row_idx][col_idx] ,end="" )
print()
def lowercase__ ( lowercase_ ) -> list[list[int]]:
"""simple docstring"""
if not isinstance(lowercase_ ,lowercase_ ):
raise TypeError("The input value of 'num_rows' should be 'int'" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"The input value of 'num_rows' should be greater than or equal to 0" )
_UpperCamelCase : list[list[int]] = []
for current_row_idx in range(lowercase_ ):
_UpperCamelCase : Any = populate_current_row(lowercase_ ,lowercase_ )
triangle.append(lowercase_ )
return triangle
def lowercase__ ( lowercase_ ,lowercase_ ) -> list[int]:
"""simple docstring"""
_UpperCamelCase : str = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
_UpperCamelCase : Union[str, Any] = 1, 1
for current_col_idx in range(1 ,lowercase_ ):
calculate_current_element(
lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ )
return current_row
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,) -> None:
"""simple docstring"""
_UpperCamelCase : Tuple = triangle[current_row_idx - 1][current_col_idx - 1]
_UpperCamelCase : Union[str, Any] = triangle[current_row_idx - 1][current_col_idx]
_UpperCamelCase : str = above_to_left_elt + above_to_right_elt
def lowercase__ ( lowercase_ ) -> list[list[int]]:
"""simple docstring"""
if not isinstance(lowercase_ ,lowercase_ ):
raise TypeError("The input value of 'num_rows' should be 'int'" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"The input value of 'num_rows' should be greater than or equal to 0" )
_UpperCamelCase : list[list[int]] = [[1]]
for row_index in range(1 ,lowercase_ ):
_UpperCamelCase : Tuple = [0] + result[-1] + [0]
_UpperCamelCase : Union[str, Any] = row_index + 1
# Calculate the number of distinct elements in a row
_UpperCamelCase : List[Any] = sum(divmod(lowercase_ ,2 ) )
_UpperCamelCase : Optional[int] = [
temp_row[i - 1] + temp_row[i] for i in range(1 ,distinct_elements + 1 )
]
_UpperCamelCase : Any = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
_UpperCamelCase : int = row_first_half + row_second_half
result.append(lowercase_ )
return result
def lowercase__ ( ) -> None:
"""simple docstring"""
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(lowercase_ ,lowercase_ ) -> None:
_UpperCamelCase : int = F'''{func.__name__}({value})'''
_UpperCamelCase : Optional[int] = timeit(F'''__main__.{call}''' ,setup="import __main__" )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(F'''{call:38} -- {timing:.4f} seconds''' )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(lowercase_ ,lowercase_ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 355 |
"""simple docstring"""
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
lowerCamelCase__ = importlib.util.find_spec("s3fs") is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
lowerCamelCase__ = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(f"""A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.""")
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def lowercase__ ( lowercase_ ) -> str:
"""simple docstring"""
if "://" in dataset_path:
_UpperCamelCase : List[Any] = dataset_path.split("://" )[1]
return dataset_path
def lowercase__ ( lowercase_ ) -> bool:
"""simple docstring"""
if fs is not None and fs.protocol != "file":
return True
else:
return False
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase : List[str] = not is_remote_filesystem(lowercase_ )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(lowercase_ ) ,fs._strip_protocol(lowercase_ ) )
else:
fs.mv(lowercase_ ,lowercase_ ,recursive=lowercase_ )
def lowercase__ ( ) -> None:
"""simple docstring"""
if hasattr(fsspec.asyn ,"reset_lock" ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
_UpperCamelCase : Dict = None
_UpperCamelCase : str = None
_UpperCamelCase : str = threading.Lock()
| 310 | 0 |
"""simple docstring"""
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def _snake_case ( lowercase__ : Any , lowercase__ : int , lowercase__ : Tuple ) -> Any:
'''simple docstring'''
return params[f"""{prefix}/{prefix}/relpos_bias/rel_embedding"""][:, i, :]
def _snake_case ( lowercase__ : Union[str, Any] , lowercase__ : List[Any] , lowercase__ : Any , lowercase__ : Tuple="attention" ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ :Optional[Any] = np.ascontiguousarray(params[f"""{prefix}/{prefix}/{layer_name}/key/kernel"""][:, i, :, :] )
lowerCAmelCase_ :str = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] )
lowerCAmelCase_ :Any = np.ascontiguousarray(params[f"""{prefix}/{prefix}/{layer_name}/out/kernel"""][:, i, :, :] )
lowerCAmelCase_ :Tuple = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] )
lowerCAmelCase_ :str = np.ascontiguousarray(params[f"""{prefix}/{prefix}/{layer_name}/query/kernel"""][:, i, :, :] )
lowerCAmelCase_ :Dict = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] )
lowerCAmelCase_ :Optional[Any] = np.ascontiguousarray(params[f"""{prefix}/{prefix}/{layer_name}/value/kernel"""][:, i, :, :] )
lowerCAmelCase_ :Optional[Any] = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def _snake_case ( lowercase__ : Tuple , lowercase__ : Tuple , lowercase__ : Dict , lowercase__ : Optional[int]=False ) -> List[Any]:
'''simple docstring'''
if split_mlp_wi:
lowerCAmelCase_ :Tuple = params[f"""{prefix}/{prefix}/mlp/wi_0/kernel"""][:, i, :]
lowerCAmelCase_ :Optional[Any] = params[f"""{prefix}/{prefix}/mlp/wi_1/kernel"""][:, i, :]
lowerCAmelCase_ :List[Any] = (wi_a, wi_a)
else:
lowerCAmelCase_ :List[str] = params[f"""{prefix}/{prefix}/mlp/wi/kernel"""][:, i, :]
lowerCAmelCase_ :Union[str, Any] = params[f"""{prefix}/{prefix}/mlp/wo/kernel"""][:, i, :]
return wi, wo
def _snake_case ( lowercase__ : int , lowercase__ : Dict , lowercase__ : List[str] , lowercase__ : Optional[int] ) -> List[str]:
'''simple docstring'''
return params[f"""{prefix}/{prefix}/{layer_name}/scale"""][:, i]
def _snake_case ( lowercase__ : dict , *, lowercase__ : int , lowercase__ : bool , lowercase__ : bool = False ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ :int = traverse_util.flatten_dict(variables["""target"""] )
lowerCAmelCase_ :Optional[int] = {"""/""".join(lowercase__ ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
lowerCAmelCase_ :Union[str, Any] = """encoder/encoder/mlp/wi_0/kernel""" in old
print("""Split MLP:""" , lowercase__ )
lowerCAmelCase_ :List[Any] = collections.OrderedDict()
# Shared embeddings.
lowerCAmelCase_ :Tuple = old["""token_embedder/embedding"""]
# Encoder.
for i in range(lowercase__ ):
# Block i, layer 0 (Self Attention).
lowerCAmelCase_ :List[Any] = tax_layer_norm_lookup(lowercase__ , lowercase__ , """encoder""" , """pre_attention_layer_norm""" )
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :Optional[Any] = tax_attention_lookup(lowercase__ , lowercase__ , """encoder""" , """attention""" )
lowerCAmelCase_ :int = layer_norm
lowerCAmelCase_ :Optional[int] = k.T
lowerCAmelCase_ :List[Any] = o.T
lowerCAmelCase_ :Dict = q.T
lowerCAmelCase_ :List[str] = v.T
# Block i, layer 1 (MLP).
lowerCAmelCase_ :Union[str, Any] = tax_layer_norm_lookup(lowercase__ , lowercase__ , """encoder""" , """pre_mlp_layer_norm""" )
lowerCAmelCase_ , lowerCAmelCase_ :str = tax_mlp_lookup(lowercase__ , lowercase__ , """encoder""" , lowercase__ )
lowerCAmelCase_ :List[str] = layer_norm
if split_mlp_wi:
lowerCAmelCase_ :Tuple = wi[0].T
lowerCAmelCase_ :List[Any] = wi[1].T
else:
lowerCAmelCase_ :Dict = wi.T
lowerCAmelCase_ :str = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
lowerCAmelCase_ :Optional[Any] = tax_relpos_bias_lookup(
lowercase__ , lowercase__ , """encoder""" ).T
lowerCAmelCase_ :Tuple = old["""encoder/encoder_norm/scale"""]
if not scalable_attention:
lowerCAmelCase_ :List[str] = tax_relpos_bias_lookup(
lowercase__ , 0 , """encoder""" ).T
lowerCAmelCase_ :Dict = tax_relpos_bias_lookup(
lowercase__ , 0 , """decoder""" ).T
if not is_encoder_only:
# Decoder.
for i in range(lowercase__ ):
# Block i, layer 0 (Self Attention).
lowerCAmelCase_ :Union[str, Any] = tax_layer_norm_lookup(lowercase__ , lowercase__ , """decoder""" , """pre_self_attention_layer_norm""" )
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :Dict = tax_attention_lookup(lowercase__ , lowercase__ , """decoder""" , """self_attention""" )
lowerCAmelCase_ :Any = layer_norm
lowerCAmelCase_ :str = k.T
lowerCAmelCase_ :str = o.T
lowerCAmelCase_ :str = q.T
lowerCAmelCase_ :List[Any] = v.T
# Block i, layer 1 (Cross Attention).
lowerCAmelCase_ :Tuple = tax_layer_norm_lookup(lowercase__ , lowercase__ , """decoder""" , """pre_cross_attention_layer_norm""" )
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :Union[str, Any] = tax_attention_lookup(lowercase__ , lowercase__ , """decoder""" , """encoder_decoder_attention""" )
lowerCAmelCase_ :str = layer_norm
lowerCAmelCase_ :Optional[int] = k.T
lowerCAmelCase_ :Optional[int] = o.T
lowerCAmelCase_ :Dict = q.T
lowerCAmelCase_ :Union[str, Any] = v.T
# Block i, layer 2 (MLP).
lowerCAmelCase_ :Optional[int] = tax_layer_norm_lookup(lowercase__ , lowercase__ , """decoder""" , """pre_mlp_layer_norm""" )
lowerCAmelCase_ , lowerCAmelCase_ :str = tax_mlp_lookup(lowercase__ , lowercase__ , """decoder""" , lowercase__ )
lowerCAmelCase_ :str = layer_norm
if split_mlp_wi:
lowerCAmelCase_ :Optional[int] = wi[0].T
lowerCAmelCase_ :Tuple = wi[1].T
else:
lowerCAmelCase_ :str = wi.T
lowerCAmelCase_ :Tuple = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
lowerCAmelCase_ :Dict = tax_relpos_bias_lookup(lowercase__ , lowercase__ , """decoder""" ).T
lowerCAmelCase_ :int = old["""decoder/decoder_norm/scale"""]
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
lowerCAmelCase_ :Dict = old["""decoder/logits_dense/kernel"""].T
return new
def _snake_case ( lowercase__ : Any , lowercase__ : bool ) -> str:
'''simple docstring'''
lowerCAmelCase_ :str = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
lowerCAmelCase_ :Any = state_dict["""shared.weight"""]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
lowerCAmelCase_ :Union[str, Any] = state_dict["""shared.weight"""]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("""Using shared word embeddings as lm_head.""" )
lowerCAmelCase_ :Tuple = state_dict["""shared.weight"""]
return state_dict
def _snake_case ( lowercase__ : Optional[int] , lowercase__ : Union[str, Any] , lowercase__ : str , lowercase__ : int , lowercase__ : Union[str, Any] ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ :Tuple = checkpoints.load_tax_checkpoint(lowercase__ )
lowerCAmelCase_ :List[Any] = convert_tax_to_pytorch(
lowercase__ , num_layers=config.num_layers , is_encoder_only=lowercase__ , scalable_attention=lowercase__ )
lowerCAmelCase_ :Optional[Any] = make_state_dict(lowercase__ , lowercase__ )
model.load_state_dict(lowercase__ , strict=lowercase__ )
def _snake_case ( lowercase__ : Any , lowercase__ : Dict , lowercase__ : Union[str, Any] , lowercase__ : bool = False , lowercase__ : bool = False , ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ :Any = MTaConfig.from_json_file(lowercase__ )
print(f"""Building PyTorch model from configuration: {config}""" )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
lowerCAmelCase_ :Tuple = UMTaEncoderModel(lowercase__ )
else:
lowerCAmelCase_ :Union[str, Any] = UMTaForConditionalGeneration(lowercase__ )
# Load weights from tf checkpoint
load_tax_weights_in_ta(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(lowercase__ )
# Verify that we can load the checkpoint.
model.from_pretrained(lowercase__ )
print("""Done""" )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser(description='Converts a native T5X checkpoint into a PyTorch checkpoint.')
# Required parameters
parser.add_argument(
'--t5x_checkpoint_path', default=None, type=str, required=True, help='Path to the T5X checkpoint.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--is_encoder_only', action='store_true', help='Check if the model is encoder-decoder model', default=False
)
parser.add_argument(
'--scalable_attention',
action='store_true',
help='Whether the model uses scaled attention (umt5 model)',
default=False,
)
__UpperCAmelCase = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 84 | """simple docstring"""
from __future__ import annotations
def lowercase__( __SCREAMING_SNAKE_CASE : list ):
if not nums:
raise ValueError('List is empty' )
return sum(__SCREAMING_SNAKE_CASE ) / len(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 213 | 0 |
"""simple docstring"""
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
A_ = '''sshleifer/bart-tiny-random'''
A_ = '''patrickvonplaten/t5-tiny-random'''
@require_torch
class lowercase( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
return AutoConfig.from_pretrained(a_ )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : List[Any] = create_student_by_copying_alternating_layers(a_, tempfile.mkdtemp(), e=1, d=1 )
self.assertEqual(student.config.num_hidden_layers, 1 )
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
_snake_case : Tuple = create_student_by_copying_alternating_layers(a_, tempfile.mkdtemp(), e=1, d=a_ )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case : Optional[Any] = create_student_by_copying_alternating_layers(a_, tempfile.mkdtemp(), e=1, d=a_ )
self.assertEqual(student.config.encoder_layers, 1 )
self.assertEqual(student.config.decoder_layers, self.teacher_config.encoder_layers )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case : Tuple = create_student_by_copying_alternating_layers(a_, tempfile.mkdtemp(), e=1, d=1 )
self.assertEqual(student.config.encoder_layers, 1 )
self.assertEqual(student.config.decoder_layers, 1 )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
with self.assertRaises(a_ ):
create_student_by_copying_alternating_layers(a_, tempfile.mkdtemp(), e=a_, d=a_ )
| 358 |
"""simple docstring"""
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def UpperCAmelCase__ (snake_case__ : Optional[int] , snake_case__ : Any , snake_case__ : List[str] , snake_case__ : int ):
"""simple docstring"""
if isinstance(snake_case__ , snake_case__ ):
_snake_case : List[Any] = np.full((len(snake_case__ ), sequence_length, 2) , snake_case__ )
else:
_snake_case : Any = np.full((len(snake_case__ ), sequence_length) , snake_case__ )
for i, tensor in enumerate(snake_case__ ):
if padding_side == "right":
if isinstance(snake_case__ , snake_case__ ):
_snake_case : Dict = tensor[:sequence_length]
else:
_snake_case : List[Any] = tensor[:sequence_length]
else:
if isinstance(snake_case__ , snake_case__ ):
_snake_case : str = tensor[:sequence_length]
else:
_snake_case : Tuple = tensor[:sequence_length]
return out_tensor.tolist()
def UpperCAmelCase__ (snake_case__ : Optional[int] ):
"""simple docstring"""
_snake_case : str = ord(snake_case__ )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 1_23 and cp <= 1_26):
return True
_snake_case : Union[str, Any] = unicodedata.category(snake_case__ )
if cat.startswith("""P""" ):
return True
return False
@dataclass
class lowercase( __a ):
'''simple docstring'''
lowercase__ = 42
lowercase__ = True
lowercase__ = None
lowercase__ = None
lowercase__ = -1_00
lowercase__ = "pt"
def UpperCamelCase_ ( self: Any, a_: Union[str, Any] ):
'''simple docstring'''
import torch
_snake_case : Optional[Any] = """label""" if """label""" in features[0].keys() else """labels"""
_snake_case : str = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
_snake_case : Any = self.tokenizer.pad(
a_, padding=self.padding, max_length=self.max_length, pad_to_multiple_of=self.pad_to_multiple_of, return_tensors="""pt""" if labels is None else None, )
if labels is None:
return batch
_snake_case : Optional[int] = torch.tensor(batch["""entity_ids"""] ).shape[1]
_snake_case : Any = self.tokenizer.padding_side
if padding_side == "right":
_snake_case : Optional[int] = [
list(a_ ) + [self.label_pad_token_id] * (sequence_length - len(a_ )) for label in labels
]
else:
_snake_case : Union[str, Any] = [
[self.label_pad_token_id] * (sequence_length - len(a_ )) + list(a_ ) for label in labels
]
_snake_case : List[Any] = [feature["""ner_tags"""] for feature in features]
_snake_case : str = padding_tensor(a_, -1, a_, a_ )
_snake_case : Any = [feature["""original_entity_spans"""] for feature in features]
_snake_case : int = padding_tensor(a_, (-1, -1), a_, a_ )
_snake_case : str = {k: torch.tensor(a_, dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 132 | 0 |
import argparse
import os
from . import (
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
AlbertConfig,
BartConfig,
BertConfig,
CamembertConfig,
CTRLConfig,
DistilBertConfig,
DPRConfig,
ElectraConfig,
FlaubertConfig,
GPTaConfig,
LayoutLMConfig,
LxmertConfig,
OpenAIGPTConfig,
RobertaConfig,
TaConfig,
TFAlbertForPreTraining,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFCamembertForMaskedLM,
TFCTRLLMHeadModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
TFElectraForPreTraining,
TFFlaubertWithLMHeadModel,
TFGPTaLMHeadModel,
TFLayoutLMForMaskedLM,
TFLxmertForPreTraining,
TFLxmertVisualFeatureEncoder,
TFOpenAIGPTLMHeadModel,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForSequenceClassification,
TFTaForConditionalGeneration,
TFTransfoXLLMHeadModel,
TFWavaVecaModel,
TFXLMRobertaForMaskedLM,
TFXLMWithLMHeadModel,
TFXLNetLMHeadModel,
TransfoXLConfig,
WavaVecaConfig,
WavaVecaModel,
XLMConfig,
XLMRobertaConfig,
XLNetConfig,
is_torch_available,
load_pytorch_checkpoint_in_tfa_model,
)
from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging
if is_torch_available():
import numpy as np
import torch
from . import (
AlbertForPreTraining,
BartForConditionalGeneration,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
CamembertForMaskedLM,
CTRLLMHeadModel,
DistilBertForMaskedLM,
DistilBertForQuestionAnswering,
DPRContextEncoder,
DPRQuestionEncoder,
DPRReader,
ElectraForPreTraining,
FlaubertWithLMHeadModel,
GPTaLMHeadModel,
LayoutLMForMaskedLM,
LxmertForPreTraining,
LxmertVisualFeatureEncoder,
OpenAIGPTLMHeadModel,
RobertaForMaskedLM,
RobertaForSequenceClassification,
TaForConditionalGeneration,
TransfoXLLMHeadModel,
XLMRobertaForMaskedLM,
XLMWithLMHeadModel,
XLNetLMHeadModel,
)
logging.set_verbosity_info()
UpperCAmelCase : Tuple = {
"bart": (
BartConfig,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
BartForConditionalGeneration,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"bert": (
BertConfig,
TFBertForPreTraining,
BertForPreTraining,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"bert-large-uncased-whole-word-masking-finetuned-squad": (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"bert-large-cased-whole-word-masking-finetuned-squad": (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"bert-base-cased-finetuned-mrpc": (
BertConfig,
TFBertForSequenceClassification,
BertForSequenceClassification,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"dpr": (
DPRConfig,
TFDPRQuestionEncoder,
TFDPRContextEncoder,
TFDPRReader,
DPRQuestionEncoder,
DPRContextEncoder,
DPRReader,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"gpt2": (
GPTaConfig,
TFGPTaLMHeadModel,
GPTaLMHeadModel,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"xlnet": (
XLNetConfig,
TFXLNetLMHeadModel,
XLNetLMHeadModel,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"xlm": (
XLMConfig,
TFXLMWithLMHeadModel,
XLMWithLMHeadModel,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"xlm-roberta": (
XLMRobertaConfig,
TFXLMRobertaForMaskedLM,
XLMRobertaForMaskedLM,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"transfo-xl": (
TransfoXLConfig,
TFTransfoXLLMHeadModel,
TransfoXLLMHeadModel,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"openai-gpt": (
OpenAIGPTConfig,
TFOpenAIGPTLMHeadModel,
OpenAIGPTLMHeadModel,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"roberta": (
RobertaConfig,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
RobertaForMaskedLM,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"layoutlm": (
LayoutLMConfig,
TFLayoutLMForMaskedLM,
LayoutLMForMaskedLM,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"roberta-large-mnli": (
RobertaConfig,
TFRobertaForSequenceClassification,
RobertaForSequenceClassification,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"camembert": (
CamembertConfig,
TFCamembertForMaskedLM,
CamembertForMaskedLM,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"flaubert": (
FlaubertConfig,
TFFlaubertWithLMHeadModel,
FlaubertWithLMHeadModel,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"distilbert": (
DistilBertConfig,
TFDistilBertForMaskedLM,
DistilBertForMaskedLM,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"distilbert-base-distilled-squad": (
DistilBertConfig,
TFDistilBertForQuestionAnswering,
DistilBertForQuestionAnswering,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"lxmert": (
LxmertConfig,
TFLxmertForPreTraining,
LxmertForPreTraining,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"lxmert-visual-feature-encoder": (
LxmertConfig,
TFLxmertVisualFeatureEncoder,
LxmertVisualFeatureEncoder,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"ctrl": (
CTRLConfig,
TFCTRLLMHeadModel,
CTRLLMHeadModel,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"albert": (
AlbertConfig,
TFAlbertForPreTraining,
AlbertForPreTraining,
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"t5": (
TaConfig,
TFTaForConditionalGeneration,
TaForConditionalGeneration,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"electra": (
ElectraConfig,
TFElectraForPreTraining,
ElectraForPreTraining,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"wav2vec2": (
WavaVecaConfig,
TFWavaVecaModel,
WavaVecaModel,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
}
def __lowerCamelCase ( lowerCamelCase__ : str , lowerCamelCase__ : Tuple , lowerCamelCase__ : List[Any] , lowerCamelCase__ : List[str] , lowerCamelCase__ : Optional[int]=False , lowerCamelCase__ : Union[str, Any]=True ):
'''simple docstring'''
if model_type not in MODEL_CLASSES:
raise ValueError(f'Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}.' )
lowerCamelCase = MODEL_CLASSES[model_type]
# Initialise TF model
if config_file in aws_config_map:
lowerCamelCase = cached_file(lowerCamelCase__ , lowerCamelCase__ , force_download=not use_cached_models )
lowerCamelCase = config_class.from_json_file(lowerCamelCase__ )
lowerCamelCase = True
lowerCamelCase = True
print(f'Building TensorFlow model from configuration: {config}' )
lowerCamelCase = model_class(lowerCamelCase__ )
# Load weights from tf checkpoint
if pytorch_checkpoint_path in aws_config_map.keys():
lowerCamelCase = cached_file(
lowerCamelCase__ , lowerCamelCase__ , force_download=not use_cached_models )
# Load PyTorch checkpoint in tf2 model:
lowerCamelCase = load_pytorch_checkpoint_in_tfa_model(lowerCamelCase__ , lowerCamelCase__ )
if compare_with_pt_model:
lowerCamelCase = tf_model(tf_model.dummy_inputs , training=lowerCamelCase__ ) # build the network
lowerCamelCase = torch.load(lowerCamelCase__ , map_location="""cpu""" )
lowerCamelCase = pt_model_class.from_pretrained(
pretrained_model_name_or_path=lowerCamelCase__ , config=lowerCamelCase__ , state_dict=lowerCamelCase__ )
with torch.no_grad():
lowerCamelCase = pt_model(**pt_model.dummy_inputs )
lowerCamelCase = pto[0].numpy()
lowerCamelCase = tfo[0].numpy()
lowerCamelCase = np.amax(np.abs(np_pt - np_tf ) )
print(f'Max absolute difference between models outputs {diff}' )
assert diff <= 2E-2, f'Error, model absolute difference is >2e-2: {diff}'
# Save pytorch-model
print(f'Save TensorFlow model to {tf_dump_path}' )
tf_model.save_weights(lowerCamelCase__ , save_format="""h5""" )
def __lowerCamelCase ( lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Dict , lowerCamelCase__ : List[str]=None , lowerCamelCase__ : str=None , lowerCamelCase__ : int=False , lowerCamelCase__ : int=False , lowerCamelCase__ : int=False , lowerCamelCase__ : List[str]=False , ):
'''simple docstring'''
if args_model_type is None:
lowerCamelCase = list(MODEL_CLASSES.keys() )
else:
lowerCamelCase = [args_model_type]
for j, model_type in enumerate(lowerCamelCase__ , start=1 ):
print("""=""" * 100 )
print(f' Converting model type {j}/{len(lowerCamelCase__ )}: {model_type}' )
print("""=""" * 100 )
if model_type not in MODEL_CLASSES:
raise ValueError(f'Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}.' )
lowerCamelCase = MODEL_CLASSES[model_type]
if model_shortcut_names_or_path is None:
lowerCamelCase = list(aws_model_maps.keys() )
if config_shortcut_names_or_path is None:
lowerCamelCase = model_shortcut_names_or_path
for i, (model_shortcut_name, config_shortcut_name) in enumerate(
zip(lowerCamelCase__ , lowerCamelCase__ ) , start=1 ):
print("""-""" * 100 )
if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name:
if not only_convert_finetuned_models:
print(f' Skipping finetuned checkpoint {model_shortcut_name}' )
continue
lowerCamelCase = model_shortcut_name
elif only_convert_finetuned_models:
print(f' Skipping not finetuned checkpoint {model_shortcut_name}' )
continue
print(
f' Converting checkpoint {i}/{len(lowerCamelCase__ )}: {model_shortcut_name} - model_type {model_type}' )
print("""-""" * 100 )
if config_shortcut_name in aws_config_map:
lowerCamelCase = cached_file(lowerCamelCase__ , lowerCamelCase__ , force_download=not use_cached_models )
else:
lowerCamelCase = config_shortcut_name
if model_shortcut_name in aws_model_maps:
lowerCamelCase = cached_file(lowerCamelCase__ , lowerCamelCase__ , force_download=not use_cached_models )
else:
lowerCamelCase = model_shortcut_name
if os.path.isfile(lowerCamelCase__ ):
lowerCamelCase = "converted_model"
convert_pt_checkpoint_to_tf(
model_type=lowerCamelCase__ , pytorch_checkpoint_path=lowerCamelCase__ , config_file=lowerCamelCase__ , tf_dump_path=os.path.join(lowerCamelCase__ , model_shortcut_name + """-tf_model.h5""" ) , compare_with_pt_model=lowerCamelCase__ , )
if remove_cached_files:
os.remove(lowerCamelCase__ )
os.remove(lowerCamelCase__ )
if __name__ == "__main__":
UpperCAmelCase : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_dump_path", default=None, type=str, required=True, help="Path to the output Tensorflow dump file."
)
parser.add_argument(
"--model_type",
default=None,
type=str,
help=(
f"""Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and """
"convert all the models from AWS."
),
)
parser.add_argument(
"--pytorch_checkpoint_path",
default=None,
type=str,
help=(
"Path to the PyTorch checkpoint path or shortcut name to download from AWS. "
"If not given, will download and convert all the checkpoints from AWS."
),
)
parser.add_argument(
"--config_file",
default=None,
type=str,
help=(
"The config json file corresponding to the pre-trained model. \n"
"This specifies the model architecture. If not given and "
"--pytorch_checkpoint_path is not given or is a shortcut name "
"use the configuration associated to the shortcut name on the AWS"
),
)
parser.add_argument(
"--compare_with_pt_model", action="store_true", help="Compare Tensorflow and PyTorch model predictions."
)
parser.add_argument(
"--use_cached_models",
action="store_true",
help="Use cached models if possible instead of updating to latest checkpoint versions.",
)
parser.add_argument(
"--remove_cached_files",
action="store_true",
help="Remove pytorch models after conversion (save memory when converting in batches).",
)
parser.add_argument("--only_convert_finetuned_models", action="store_true", help="Only convert finetuned models.")
UpperCAmelCase : Tuple = parser.parse_args()
# if args.pytorch_checkpoint_path is not None:
# convert_pt_checkpoint_to_tf(args.model_type.lower(),
# args.pytorch_checkpoint_path,
# args.config_file if args.config_file is not None else args.pytorch_checkpoint_path,
# args.tf_dump_path,
# compare_with_pt_model=args.compare_with_pt_model,
# use_cached_models=args.use_cached_models)
# else:
convert_all_pt_checkpoints_to_tf(
args.model_type.lower() if args.model_type is not None else None,
args.tf_dump_path,
model_shortcut_names_or_path=[args.pytorch_checkpoint_path]
if args.pytorch_checkpoint_path is not None
else None,
config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None,
compare_with_pt_model=args.compare_with_pt_model,
use_cached_models=args.use_cached_models,
remove_cached_files=args.remove_cached_files,
only_convert_finetuned_models=args.only_convert_finetuned_models,
)
| 252 |
'''simple docstring'''
from collections.abc import Generator
from math import sin
def lowercase ( __magic_name__ ):
'''simple docstring'''
if len(__magic_name__ ) != 32:
raise ValueError("Input must be of length 32" )
UpperCAmelCase : Union[str, Any] = b""
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def lowercase ( __magic_name__ ):
'''simple docstring'''
if i < 0:
raise ValueError("Input must be non-negative" )
UpperCAmelCase : Dict = format(__magic_name__ , "08x" )[-8:]
UpperCAmelCase : List[str] = b""
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode("utf-8" )
return little_endian_hex
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : int = b""
for char in message:
bit_string += format(__magic_name__ , "08b" ).encode("utf-8" )
UpperCAmelCase : List[Any] = format(len(__magic_name__ ) , "064b" ).encode("utf-8" )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(__magic_name__ ) % 512 != 448:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def lowercase ( __magic_name__ ):
'''simple docstring'''
if len(__magic_name__ ) % 512 != 0:
raise ValueError("Input must have length that's a multiple of 512" )
for pos in range(0 , len(__magic_name__ ) , 512 ):
UpperCAmelCase : Union[str, Any] = bit_string[pos : pos + 512]
UpperCAmelCase : Tuple = []
for i in range(0 , 512 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def lowercase ( __magic_name__ ):
'''simple docstring'''
if i < 0:
raise ValueError("Input must be non-negative" )
UpperCAmelCase : Any = format(__magic_name__ , "032b" )
UpperCAmelCase : int = ""
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(__magic_name__ , 2 )
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
return (a + b) % 2**32
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
if i < 0:
raise ValueError("Input must be non-negative" )
if shift < 0:
raise ValueError("Shift must be non-negative" )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Dict = preprocess(__magic_name__ )
UpperCAmelCase : List[Any] = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
UpperCAmelCase : List[str] = 0X67452301
UpperCAmelCase : Tuple = 0XEFCDAB89
UpperCAmelCase : List[Any] = 0X98BADCFE
UpperCAmelCase : List[str] = 0X10325476
UpperCAmelCase : Dict = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(__magic_name__ ):
UpperCAmelCase : Optional[Any] = aa
UpperCAmelCase : List[Any] = ba
UpperCAmelCase : Optional[Any] = ca
UpperCAmelCase : Any = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
UpperCAmelCase : Tuple = d ^ (b & (c ^ d))
UpperCAmelCase : List[str] = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
UpperCAmelCase : int = c ^ (d & (b ^ c))
UpperCAmelCase : Tuple = (5 * i + 1) % 16
elif i <= 47:
UpperCAmelCase : Any = b ^ c ^ d
UpperCAmelCase : Union[str, Any] = (3 * i + 5) % 16
else:
UpperCAmelCase : Dict = c ^ (b | not_aa(__magic_name__ ))
UpperCAmelCase : Dict = (7 * i) % 16
UpperCAmelCase : List[str] = (f + a + added_consts[i] + block_words[g]) % 2**32
UpperCAmelCase : List[Any] = d
UpperCAmelCase : Any = c
UpperCAmelCase : Dict = b
UpperCAmelCase : Union[str, Any] = sum_aa(__magic_name__ , left_rotate_aa(__magic_name__ , shift_amounts[i] ) )
# Add hashed chunk to running total
UpperCAmelCase : List[str] = sum_aa(__magic_name__ , __magic_name__ )
UpperCAmelCase : Any = sum_aa(__magic_name__ , __magic_name__ )
UpperCAmelCase : List[Any] = sum_aa(__magic_name__ , __magic_name__ )
UpperCAmelCase : Optional[int] = sum_aa(__magic_name__ , __magic_name__ )
UpperCAmelCase : List[str] = reformat_hex(__magic_name__ ) + reformat_hex(__magic_name__ ) + reformat_hex(__magic_name__ ) + reformat_hex(__magic_name__ )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 311 | 0 |
"""simple docstring"""
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
__magic_name__ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = question_encoder
__SCREAMING_SNAKE_CASE = generator
__SCREAMING_SNAKE_CASE = self.question_encoder
def snake_case_ ( self , lowerCAmelCase__):
if os.path.isfile(__lowerCamelCase):
raise ValueError(f"Provided path ({save_directory}) should be a directory, not a file")
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase)
__SCREAMING_SNAKE_CASE = os.path.join(__lowerCamelCase , """question_encoder_tokenizer""")
__SCREAMING_SNAKE_CASE = os.path.join(__lowerCamelCase , """generator_tokenizer""")
self.question_encoder.save_pretrained(__lowerCamelCase)
self.generator.save_pretrained(__lowerCamelCase)
@classmethod
def snake_case_ ( cls , lowerCAmelCase__ , **lowerCAmelCase__):
from ..auto.tokenization_auto import AutoTokenizer
__SCREAMING_SNAKE_CASE = kwargs.pop("""config""" , __lowerCamelCase)
if config is None:
__SCREAMING_SNAKE_CASE = RagConfig.from_pretrained(__lowerCamelCase)
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(
__lowerCamelCase , config=config.question_encoder , subfolder="""question_encoder_tokenizer""")
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(
__lowerCamelCase , config=config.generator , subfolder="""generator_tokenizer""")
return cls(question_encoder=__lowerCamelCase , generator=__lowerCamelCase)
def __call__( self , *lowerCAmelCase__ , **lowerCAmelCase__):
return self.current_tokenizer(*__lowerCamelCase , **__lowerCamelCase)
def snake_case_ ( self , *lowerCAmelCase__ , **lowerCAmelCase__):
return self.generator.batch_decode(*__lowerCamelCase , **__lowerCamelCase)
def snake_case_ ( self , *lowerCAmelCase__ , **lowerCAmelCase__):
return self.generator.decode(*__lowerCamelCase , **__lowerCamelCase)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.question_encoder
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.generator
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = "longest" , lowerCAmelCase__ = None , lowerCAmelCase__ = True , **lowerCAmelCase__ , ):
warnings.warn(
"""`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the """
"""regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` """
"""context manager to prepare your targets. See the documentation of your specific tokenizer for more """
"""details""" , __lowerCamelCase , )
if max_length is None:
__SCREAMING_SNAKE_CASE = self.current_tokenizer.model_max_length
__SCREAMING_SNAKE_CASE = self(
__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_tensors=__lowerCamelCase , max_length=__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , **__lowerCamelCase , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
__SCREAMING_SNAKE_CASE = self.current_tokenizer.model_max_length
__SCREAMING_SNAKE_CASE = self(
text_target=__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_tensors=__lowerCamelCase , padding=__lowerCamelCase , max_length=__lowerCamelCase , truncation=__lowerCamelCase , **__lowerCamelCase , )
__SCREAMING_SNAKE_CASE = labels['''input_ids''']
return model_inputs
| 356 |
"""simple docstring"""
import fire
from utils import calculate_rouge, save_json
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=None , **UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = [x.strip() for x in open(UpperCamelCase_ ).readlines()]
__SCREAMING_SNAKE_CASE = [x.strip() for x in open(UpperCamelCase_ ).readlines()][: len(UpperCamelCase_ )]
__SCREAMING_SNAKE_CASE = calculate_rouge(UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ )
if save_path is not None:
save_json(UpperCamelCase_ , UpperCamelCase_ , indent=UpperCamelCase_ )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 255 | 0 |
import qiskit
def __magic_name__ ( __lowerCAmelCase : int , __lowerCAmelCase : int ) -> qiskit.result.counts.Counts:
__lowerCamelCase = qiskit.Aer.get_backend('''aer_simulator''' )
# Create a Quantum Circuit acting on the q register
__lowerCamelCase = qiskit.QuantumCircuit(__lowerCAmelCase , __lowerCAmelCase )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1] , [0, 1] )
# Execute the circuit on the qasm simulator
__lowerCamelCase = qiskit.execute(__lowerCAmelCase , __lowerCAmelCase , shots=1000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(__lowerCAmelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Dict = single_qubit_measure(2, 2)
print(F'Total count for various states are: {counts}')
| 270 |
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
SCREAMING_SNAKE_CASE__ : Union[str, Any] = re.compile(r"\b(a|an|the)\b", re.UNICODE)
SCREAMING_SNAKE_CASE__ : int = None
def __magic_name__ ( ) -> str:
__lowerCamelCase = argparse.ArgumentParser('''Official evaluation script for SQuAD version 2.0.''' )
parser.add_argument('''data_file''' , metavar='''data.json''' , help='''Input data JSON file.''' )
parser.add_argument('''pred_file''' , metavar='''pred.json''' , help='''Model predictions.''' )
parser.add_argument(
'''--out-file''' , '''-o''' , metavar='''eval.json''' , help='''Write accuracy metrics to file (default is stdout).''' )
parser.add_argument(
'''--na-prob-file''' , '''-n''' , metavar='''na_prob.json''' , help='''Model estimates of probability of no answer.''' )
parser.add_argument(
'''--na-prob-thresh''' , '''-t''' , type=__lowerCAmelCase , default=1.0 , help='''Predict "" if no-answer probability exceeds this (default = 1.0).''' , )
parser.add_argument(
'''--out-image-dir''' , '''-p''' , metavar='''out_images''' , default=__lowerCAmelCase , help='''Save precision-recall curves to directory.''' )
parser.add_argument('''--verbose''' , '''-v''' , action='''store_true''' )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def __magic_name__ ( __lowerCAmelCase : List[str] ) -> Union[str, Any]:
__lowerCamelCase = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
__lowerCamelCase = bool(qa['''answers''']['''text'''] )
return qid_to_has_ans
def __magic_name__ ( __lowerCAmelCase : Dict ) -> Optional[Any]:
def remove_articles(__lowerCAmelCase : Optional[int] ):
return ARTICLES_REGEX.sub(''' ''' , __lowerCAmelCase )
def white_space_fix(__lowerCAmelCase : Optional[int] ):
return " ".join(text.split() )
def remove_punc(__lowerCAmelCase : Union[str, Any] ):
__lowerCamelCase = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__lowerCAmelCase : Dict ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__lowerCAmelCase ) ) ) )
def __magic_name__ ( __lowerCAmelCase : List[Any] ) -> Optional[int]:
if not s:
return []
return normalize_answer(__lowerCAmelCase ).split()
def __magic_name__ ( __lowerCAmelCase : Tuple , __lowerCAmelCase : Tuple ) -> int:
return int(normalize_answer(__lowerCAmelCase ) == normalize_answer(__lowerCAmelCase ) )
def __magic_name__ ( __lowerCAmelCase : Any , __lowerCAmelCase : Tuple ) -> str:
__lowerCamelCase = get_tokens(__lowerCAmelCase )
__lowerCamelCase = get_tokens(__lowerCAmelCase )
__lowerCamelCase = collections.Counter(__lowerCAmelCase ) & collections.Counter(__lowerCAmelCase )
__lowerCamelCase = sum(common.values() )
if len(__lowerCAmelCase ) == 0 or len(__lowerCAmelCase ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
__lowerCamelCase = 1.0 * num_same / len(__lowerCAmelCase )
__lowerCamelCase = 1.0 * num_same / len(__lowerCAmelCase )
__lowerCamelCase = (2 * precision * recall) / (precision + recall)
return fa
def __magic_name__ ( __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] ) -> Optional[Any]:
__lowerCamelCase = {}
__lowerCamelCase = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
__lowerCamelCase = qa['''id''']
__lowerCamelCase = [t for t in qa['''answers''']['''text'''] if normalize_answer(__lowerCAmelCase )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
__lowerCamelCase = ['''''']
if qid not in preds:
print(f'''Missing prediction for {qid}''' )
continue
__lowerCamelCase = preds[qid]
# Take max over all gold answers
__lowerCamelCase = max(compute_exact(__lowerCAmelCase , __lowerCAmelCase ) for a in gold_answers )
__lowerCamelCase = max(compute_fa(__lowerCAmelCase , __lowerCAmelCase ) for a in gold_answers )
return exact_scores, fa_scores
def __magic_name__ ( __lowerCAmelCase : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Union[str, Any] ) -> List[str]:
__lowerCamelCase = {}
for qid, s in scores.items():
__lowerCamelCase = na_probs[qid] > na_prob_thresh
if pred_na:
__lowerCamelCase = float(not qid_to_has_ans[qid] )
else:
__lowerCamelCase = s
return new_scores
def __magic_name__ ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[Any]=None ) -> Union[str, Any]:
if not qid_list:
__lowerCamelCase = len(__lowerCAmelCase )
return collections.OrderedDict(
[
('''exact''', 100.0 * sum(exact_scores.values() ) / total),
('''f1''', 100.0 * sum(fa_scores.values() ) / total),
('''total''', total),
] )
else:
__lowerCamelCase = len(__lowerCAmelCase )
return collections.OrderedDict(
[
('''exact''', 100.0 * sum(exact_scores[k] for k in qid_list ) / total),
('''f1''', 100.0 * sum(fa_scores[k] for k in qid_list ) / total),
('''total''', total),
] )
def __magic_name__ ( __lowerCAmelCase : Any , __lowerCAmelCase : str , __lowerCAmelCase : Optional[Any] ) -> int:
for k in new_eval:
__lowerCamelCase = new_eval[k]
def __magic_name__ ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] ) -> Optional[Any]:
plt.step(__lowerCAmelCase , __lowerCAmelCase , color='''b''' , alpha=0.2 , where='''post''' )
plt.fill_between(__lowerCAmelCase , __lowerCAmelCase , step='''post''' , alpha=0.2 , color='''b''' )
plt.xlabel('''Recall''' )
plt.ylabel('''Precision''' )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(__lowerCAmelCase )
plt.savefig(__lowerCAmelCase )
plt.clf()
def __magic_name__ ( __lowerCAmelCase : Any , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any]=None , __lowerCAmelCase : Tuple=None ) -> int:
__lowerCamelCase = sorted(__lowerCAmelCase , key=lambda __lowerCAmelCase : na_probs[k] )
__lowerCamelCase = 0.0
__lowerCamelCase = 1.0
__lowerCamelCase = 0.0
__lowerCamelCase = [1.0]
__lowerCamelCase = [0.0]
__lowerCamelCase = 0.0
for i, qid in enumerate(__lowerCAmelCase ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
__lowerCamelCase = true_pos / float(i + 1 )
__lowerCamelCase = true_pos / float(__lowerCAmelCase )
if i == len(__lowerCAmelCase ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(__lowerCAmelCase )
recalls.append(__lowerCAmelCase )
if out_image:
plot_pr_curve(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return {"ap": 100.0 * avg_prec}
def __magic_name__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[Any] ) -> List[Any]:
if out_image_dir and not os.path.exists(__lowerCAmelCase ):
os.makedirs(__lowerCAmelCase )
__lowerCamelCase = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
__lowerCamelCase = make_precision_recall_eval(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , out_image=os.path.join(__lowerCAmelCase , '''pr_exact.png''' ) , title='''Precision-Recall curve for Exact Match score''' , )
__lowerCamelCase = make_precision_recall_eval(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , out_image=os.path.join(__lowerCAmelCase , '''pr_f1.png''' ) , title='''Precision-Recall curve for F1 score''' , )
__lowerCamelCase = {k: float(__lowerCAmelCase ) for k, v in qid_to_has_ans.items()}
__lowerCamelCase = make_precision_recall_eval(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , out_image=os.path.join(__lowerCAmelCase , '''pr_oracle.png''' ) , title='''Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)''' , )
merge_eval(__lowerCAmelCase , __lowerCAmelCase , '''pr_exact''' )
merge_eval(__lowerCAmelCase , __lowerCAmelCase , '''pr_f1''' )
merge_eval(__lowerCAmelCase , __lowerCAmelCase , '''pr_oracle''' )
def __magic_name__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int ) -> Optional[Any]:
if not qid_list:
return
__lowerCamelCase = [na_probs[k] for k in qid_list]
__lowerCamelCase = np.ones_like(__lowerCAmelCase ) / float(len(__lowerCAmelCase ) )
plt.hist(__lowerCAmelCase , weights=__lowerCAmelCase , bins=20 , range=(0.0, 1.0) )
plt.xlabel('''Model probability of no-answer''' )
plt.ylabel('''Proportion of dataset''' )
plt.title(f'''Histogram of no-answer probability: {name}''' )
plt.savefig(os.path.join(__lowerCAmelCase , f'''na_prob_hist_{name}.png''' ) )
plt.clf()
def __magic_name__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] ) -> Optional[int]:
__lowerCamelCase = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
__lowerCamelCase = num_no_ans
__lowerCamelCase = cur_score
__lowerCamelCase = 0.0
__lowerCamelCase = sorted(__lowerCAmelCase , key=lambda __lowerCAmelCase : na_probs[k] )
for i, qid in enumerate(__lowerCAmelCase ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
__lowerCamelCase = scores[qid]
else:
if preds[qid]:
__lowerCamelCase = -1
else:
__lowerCamelCase = 0
cur_score += diff
if cur_score > best_score:
__lowerCamelCase = cur_score
__lowerCamelCase = na_probs[qid]
return 100.0 * best_score / len(__lowerCAmelCase ), best_thresh
def __magic_name__ ( __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : int , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int , __lowerCAmelCase : Optional[Any] ) -> int:
__lowerCamelCase , __lowerCamelCase = find_best_thresh(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
__lowerCamelCase , __lowerCamelCase = find_best_thresh(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
__lowerCamelCase = best_exact
__lowerCamelCase = exact_thresh
__lowerCamelCase = best_fa
__lowerCamelCase = fa_thresh
def __magic_name__ ( ) -> Optional[int]:
with open(OPTS.data_file ) as f:
__lowerCamelCase = json.load(__lowerCAmelCase )
__lowerCamelCase = dataset_json['''data''']
with open(OPTS.pred_file ) as f:
__lowerCamelCase = json.load(__lowerCAmelCase )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
__lowerCamelCase = json.load(__lowerCAmelCase )
else:
__lowerCamelCase = {k: 0.0 for k in preds}
__lowerCamelCase = make_qid_to_has_ans(__lowerCAmelCase ) # maps qid to True/False
__lowerCamelCase = [k for k, v in qid_to_has_ans.items() if v]
__lowerCamelCase = [k for k, v in qid_to_has_ans.items() if not v]
__lowerCamelCase , __lowerCamelCase = get_raw_scores(__lowerCAmelCase , __lowerCAmelCase )
__lowerCamelCase = apply_no_ans_threshold(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , OPTS.na_prob_thresh )
__lowerCamelCase = apply_no_ans_threshold(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , OPTS.na_prob_thresh )
__lowerCamelCase = make_eval_dict(__lowerCAmelCase , __lowerCAmelCase )
if has_ans_qids:
__lowerCamelCase = make_eval_dict(__lowerCAmelCase , __lowerCAmelCase , qid_list=__lowerCAmelCase )
merge_eval(__lowerCAmelCase , __lowerCAmelCase , '''HasAns''' )
if no_ans_qids:
__lowerCamelCase = make_eval_dict(__lowerCAmelCase , __lowerCAmelCase , qid_list=__lowerCAmelCase )
merge_eval(__lowerCAmelCase , __lowerCAmelCase , '''NoAns''' )
if OPTS.na_prob_file:
find_all_best_thresh(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , OPTS.out_image_dir )
histogram_na_prob(__lowerCAmelCase , __lowerCAmelCase , OPTS.out_image_dir , '''hasAns''' )
histogram_na_prob(__lowerCAmelCase , __lowerCAmelCase , OPTS.out_image_dir , '''noAns''' )
if OPTS.out_file:
with open(OPTS.out_file , '''w''' ) as f:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
else:
print(json.dumps(__lowerCAmelCase , indent=2 ) )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Any = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
main()
| 270 | 1 |
'''simple docstring'''
from typing import Any
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
if not input_list:
return []
A : Any = [input_list.count(snake_case__ ) for value in input_list]
A : Any = max(snake_case__ ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(snake_case__ ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 311 |
'''simple docstring'''
from __future__ import annotations
from random import random
class A :
def __init__( self , SCREAMING_SNAKE_CASE = None ) -> Tuple:
"""simple docstring"""
A : Optional[Any] = value
A : Any = random()
A : Node | None = None
A : Node | None = None
def __repr__( self ) -> str:
"""simple docstring"""
from pprint import pformat
if self.left is None and self.right is None:
return F'\'{self.value}: {self.prior:.5}\''
else:
return pformat(
{F'{self.value}: {self.prior:.5}': (self.left, self.right)} , indent=1 )
def __str__( self ) -> str:
"""simple docstring"""
A : Optional[Any] = str(self.value ) + ''' '''
A : Union[str, Any] = str(self.left or '''''' )
A : Any = str(self.right or '''''' )
return value + left + right
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
A, A : Any = split(root.left , snake_case__ )
return left, root
else:
A, A : Optional[int] = split(root.right , snake_case__ )
return root, right
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
A : List[str] = merge(left.right , snake_case__ )
return left
else:
A : Tuple = merge(snake_case__ , right.left )
return right
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
A : List[Any] = Node(snake_case__ )
A, A : Tuple = split(snake_case__ , snake_case__ )
return merge(merge(snake_case__ , snake_case__ ) , snake_case__ )
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
A, A : Dict = split(snake_case__ , value - 1 )
A, A : Any = split(snake_case__ , snake_case__ )
return merge(snake_case__ , snake_case__ )
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
if not root: # None
return
else:
inorder(root.left )
print(root.value , end=''',''' )
inorder(root.right )
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
for arg in args.split():
if arg[0] == "+":
A : int = insert(snake_case__ , int(arg[1:] ) )
elif arg[0] == "-":
A : int = erase(snake_case__ , int(arg[1:] ) )
else:
print('''Unknown command''' )
return root
def lowerCAmelCase_ ( ):
'''simple docstring'''
A : Union[str, Any] = None
print(
'''enter numbers to create a tree, + value to add value into treap, '''
'''- value to erase all nodes with value. \'q\' to quit. ''' )
A : Optional[int] = input()
while args != "q":
A : str = interact_treap(snake_case__ , snake_case__ )
print(snake_case__ )
A : Union[str, Any] = input()
print('''good by!''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 311 | 1 |
'''simple docstring'''
import torch
from torch import nn
from transformers import CLIPPreTrainedModel, CLIPVisionModel
from ...models.attention import BasicTransformerBlock
from ...utils import logging
_lowercase : Dict = logging.get_logger(__name__) # pylint: disable=invalid-name
class __magic_name__ ( _UpperCAmelCase):
def __init__( self : int , lowercase_ : Any , lowercase_ : List[Any]=768 ):
super().__init__(lowercase_ )
lowercase_ : Optional[Any] = proj_size
lowercase_ : Any = CLIPVisionModel(lowercase_ )
lowercase_ : List[str] = PaintByExampleMapper(lowercase_ )
lowercase_ : Union[str, Any] = nn.LayerNorm(config.hidden_size )
lowercase_ : Dict = nn.Linear(config.hidden_size , self.proj_size )
# uncondition for scaling
lowercase_ : Union[str, Any] = nn.Parameter(torch.randn((1, 1, self.proj_size) ) )
def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : Optional[Any] , lowercase_ : str=False ):
lowercase_ : Any = self.model(pixel_values=lowercase_ )
lowercase_ : Optional[Any] = clip_output.pooler_output
lowercase_ : str = self.mapper(latent_states[:, None] )
lowercase_ : List[str] = self.final_layer_norm(lowercase_ )
lowercase_ : str = self.proj_out(lowercase_ )
if return_uncond_vector:
return latent_states, self.uncond_vector
return latent_states
class __magic_name__ ( nn.Module):
def __init__( self : List[Any] , lowercase_ : Optional[int] ):
super().__init__()
lowercase_ : Dict = (config.num_hidden_layers + 1) // 5
lowercase_ : Dict = config.hidden_size
lowercase_ : List[str] = 1
lowercase_ : Optional[Any] = nn.ModuleList(
[
BasicTransformerBlock(lowercase_ , lowercase_ , lowercase_ , activation_fn="""gelu""" , attention_bias=lowercase_ )
for _ in range(lowercase_ )
] )
def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : Optional[int] ):
for block in self.blocks:
lowercase_ : Optional[int] = block(lowercase_ )
return hidden_states
| 239 | '''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : Optional[Any] = logging.get_logger(__name__)
_lowercase : Tuple = {
"BAAI/AltCLIP": "https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json",
# See all AltCLIP models at https://huggingface.co/models?filter=altclip
}
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = '''altclip_text_model'''
def __init__( self : Union[str, Any] , lowercase_ : str=250002 , lowercase_ : Union[str, Any]=1024 , lowercase_ : Any=24 , lowercase_ : Union[str, Any]=16 , lowercase_ : Any=4096 , lowercase_ : Union[str, Any]="gelu" , lowercase_ : Union[str, Any]=0.1 , lowercase_ : Union[str, Any]=0.1 , lowercase_ : int=514 , lowercase_ : Union[str, Any]=1 , lowercase_ : List[str]=0.02 , lowercase_ : Optional[int]=0.02 , lowercase_ : str=1E-05 , lowercase_ : List[str]=1 , lowercase_ : List[Any]=0 , lowercase_ : Dict=2 , lowercase_ : Union[str, Any]="absolute" , lowercase_ : Any=True , lowercase_ : Union[str, Any]=768 , **lowercase_ : Any , ):
super().__init__(pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ )
lowercase_ : Union[str, Any] = vocab_size
lowercase_ : str = hidden_size
lowercase_ : Optional[Any] = num_hidden_layers
lowercase_ : int = num_attention_heads
lowercase_ : str = hidden_act
lowercase_ : List[str] = intermediate_size
lowercase_ : Optional[int] = hidden_dropout_prob
lowercase_ : str = attention_probs_dropout_prob
lowercase_ : str = max_position_embeddings
lowercase_ : List[str] = type_vocab_size
lowercase_ : Union[str, Any] = initializer_range
lowercase_ : List[Any] = initializer_factor
lowercase_ : str = layer_norm_eps
lowercase_ : Tuple = position_embedding_type
lowercase_ : List[Any] = use_cache
lowercase_ : Tuple = project_dim
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = '''altclip_vision_model'''
def __init__( self : Dict , lowercase_ : Any=768 , lowercase_ : Dict=3072 , lowercase_ : Optional[Any]=512 , lowercase_ : Dict=12 , lowercase_ : Optional[int]=12 , lowercase_ : Optional[Any]=3 , lowercase_ : str=224 , lowercase_ : List[Any]=32 , lowercase_ : Union[str, Any]="quick_gelu" , lowercase_ : Dict=1E-5 , lowercase_ : Optional[int]=0.0 , lowercase_ : Optional[Any]=0.02 , lowercase_ : Optional[Any]=1.0 , **lowercase_ : Dict , ):
super().__init__(**lowercase_ )
lowercase_ : Tuple = hidden_size
lowercase_ : Optional[Any] = intermediate_size
lowercase_ : Optional[Any] = projection_dim
lowercase_ : Tuple = num_hidden_layers
lowercase_ : List[Any] = num_attention_heads
lowercase_ : Any = num_channels
lowercase_ : Any = patch_size
lowercase_ : Dict = image_size
lowercase_ : Optional[Any] = initializer_range
lowercase_ : str = initializer_factor
lowercase_ : Any = attention_dropout
lowercase_ : Optional[int] = layer_norm_eps
lowercase_ : int = hidden_act
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Optional[int] , lowercase_ : Union[str, os.PathLike] , **lowercase_ : Any ):
cls._set_token_in_kwargs(lowercase_ )
lowercase_ , lowercase_ : str = cls.get_config_dict(lowercase_ , **lowercase_ )
# get the vision config dict if we are loading from AltCLIPConfig
if config_dict.get("""model_type""" ) == "altclip":
lowercase_ : List[str] = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(lowercase_ , **lowercase_ )
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = '''altclip'''
UpperCamelCase__ = True
def __init__( self : Optional[int] , lowercase_ : Dict=None , lowercase_ : List[Any]=None , lowercase_ : Tuple=768 , lowercase_ : List[str]=2.65_92 , **lowercase_ : List[Any] ):
# If `_config_dict` exist, we use them for the backward compatibility.
# We pop out these 2 attributes before calling `super().__init__` to avoid them being saved (which causes a lot
# of confusion!).
lowercase_ : Dict = kwargs.pop("""text_config_dict""" , lowercase_ )
lowercase_ : str = kwargs.pop("""vision_config_dict""" , lowercase_ )
super().__init__(**lowercase_ )
# Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in
# `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most
# cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`.
if text_config_dict is not None:
if text_config is None:
lowercase_ : Dict = {}
# This is the complete result when using `text_config_dict`.
lowercase_ : List[str] = AltCLIPTextConfig(**lowercase_ ).to_dict()
# Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.
for key, value in _text_config_dict.items():
if key in text_config and value != text_config[key] and key not in ["transformers_version"]:
# If specified in `text_config_dict`
if key in text_config_dict:
lowercase_ : Optional[Any] = (
f'''`{key}` is found in both `text_config_dict` and `text_config` but with different values. '''
f'''The value `text_config_dict["{key}"]` will be used instead.'''
)
# If inferred from default argument values (just to be super careful)
else:
lowercase_ : Tuple = (
f'''`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The '''
f'''value `text_config["{key}"]` will be overriden.'''
)
logger.warning(lowercase_ )
# Update all values in `text_config` with the ones in `_text_config_dict`.
text_config.update(_text_config_dict )
if vision_config_dict is not None:
if vision_config is None:
lowercase_ : int = {}
# This is the complete result when using `vision_config_dict`.
lowercase_ : List[str] = AltCLIPVisionConfig(**lowercase_ ).to_dict()
# convert keys to string instead of integer
if "id2label" in _vision_config_dict:
lowercase_ : List[str] = {
str(lowercase_ ): value for key, value in _vision_config_dict["""id2label"""].items()
}
# Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different.
for key, value in _vision_config_dict.items():
if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]:
# If specified in `vision_config_dict`
if key in vision_config_dict:
lowercase_ : Any = (
f'''`{key}` is found in both `vision_config_dict` and `vision_config` but with different '''
f'''values. The value `vision_config_dict["{key}"]` will be used instead.'''
)
# If inferred from default argument values (just to be super careful)
else:
lowercase_ : List[str] = (
f'''`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. '''
f'''The value `vision_config["{key}"]` will be overriden.'''
)
logger.warning(lowercase_ )
# Update all values in `vision_config` with the ones in `_vision_config_dict`.
vision_config.update(_vision_config_dict )
if text_config is None:
lowercase_ : int = {}
logger.info("""`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values.""" )
if vision_config is None:
lowercase_ : Optional[int] = {}
logger.info("""`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values.""" )
lowercase_ : Optional[int] = AltCLIPTextConfig(**lowercase_ )
lowercase_ : Any = AltCLIPVisionConfig(**lowercase_ )
lowercase_ : List[Any] = projection_dim
lowercase_ : Optional[Any] = logit_scale_init_value
lowercase_ : int = 1.0
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Dict , lowercase_ : AltCLIPTextConfig , lowercase_ : AltCLIPVisionConfig , **lowercase_ : Optional[int] ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
lowercase_ : Union[str, Any] = copy.deepcopy(self.__dict__ )
lowercase_ : Optional[int] = self.text_config.to_dict()
lowercase_ : Any = self.vision_config.to_dict()
lowercase_ : List[str] = self.__class__.model_type
return output
| 239 | 1 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class _a ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
A : str = RoCBertTokenizer
A : int = None
A : List[Any] = False
A : str = True
A : List[str] = filter_non_english
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().setUp()
SCREAMING_SNAKE_CASE : Optional[int] = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', '你', '好', '是', '谁', 'a', 'b', 'c', 'd']
SCREAMING_SNAKE_CASE : List[Any] = {}
SCREAMING_SNAKE_CASE : List[str] = {}
for i, value in enumerate(A ):
SCREAMING_SNAKE_CASE : str = i
SCREAMING_SNAKE_CASE : List[str] = i
SCREAMING_SNAKE_CASE : Dict = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file'] )
SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['word_shape_file'] )
SCREAMING_SNAKE_CASE : Optional[Any] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['word_pronunciation_file'] )
with open(self.vocab_file, 'w', encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
with open(self.word_shape_file, 'w', encoding='utf-8' ) as word_shape_writer:
json.dump(A, A, ensure_ascii=A )
with open(self.word_pronunciation_file, 'w', encoding='utf-8' ) as word_pronunciation_writer:
json.dump(A, A, ensure_ascii=A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer_class(self.vocab_file, self.word_shape_file, self.word_pronunciation_file )
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.tokenize('你好[SEP]你是谁' )
self.assertListEqual(A, ['你', '好', '[SEP]', '你', '是', '谁'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ), [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(A ), [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(A ), [5, 6, 2, 5, 7, 8] )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ), ['ah', '\u535A', '\u63A8', 'zz'] )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = RoCBertBasicTokenizer(do_lower_case=A )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ), ['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ), ['hello'] )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = RoCBertBasicTokenizer(do_lower_case=A, strip_accents=A )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ), ['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ), ['h\u00E9llo'] )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = RoCBertBasicTokenizer(do_lower_case=A, strip_accents=A )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ), ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ), ['hello'] )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = RoCBertBasicTokenizer(do_lower_case=A )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ), ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ), ['hello'] )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = RoCBertBasicTokenizer(do_lower_case=A )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ), ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = RoCBertBasicTokenizer(do_lower_case=A, strip_accents=A )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ), ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = RoCBertBasicTokenizer(do_lower_case=A, strip_accents=A )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ), ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = RoCBertBasicTokenizer(do_lower_case=A, never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ), ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
SCREAMING_SNAKE_CASE : str = {}
for i, token in enumerate(A ):
SCREAMING_SNAKE_CASE : str = i
SCREAMING_SNAKE_CASE : List[Any] = RoCBertWordpieceTokenizer(vocab=A, unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ), [] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ), ['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ), ['[UNK]', 'runn', '##ing'] )
def UpperCamelCase_ ( self ):
'''simple docstring'''
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def UpperCamelCase_ ( self ):
'''simple docstring'''
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def UpperCamelCase_ ( self ):
'''simple docstring'''
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(A ) for t in ['Test', '\xad', 'test']], [['[UNK]'], [], ['[UNK]']] )
if self.test_rust_tokenizer:
SCREAMING_SNAKE_CASE : Dict = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(A ) for t in ['Test', '\xad', 'test']], [['[UNK]'], [], ['[UNK]']] )
def UpperCamelCase_ ( self ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
SCREAMING_SNAKE_CASE : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(A, **A )
SCREAMING_SNAKE_CASE : List[Any] = F"A, naïve {tokenizer_r.mask_token} AllenNLP sentence."
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer_r.encode_plus(
A, return_attention_mask=A, return_token_type_ids=A, return_offsets_mapping=A, add_special_tokens=A, )
SCREAMING_SNAKE_CASE : Optional[int] = tokenizer_r.do_lower_case if hasattr(A, 'do_lower_case' ) else False
SCREAMING_SNAKE_CASE : Tuple = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), 'A'),
((1, 2), ','),
((3, 5), 'na'),
((5, 6), '##ï'),
((6, 8), '##ve'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'Allen'),
((21, 23), '##NL'),
((23, 24), '##P'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), 'a'),
((1, 2), ','),
((3, 8), 'naive'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'allen'),
((21, 23), '##nl'),
((23, 24), '##p'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results], tokenizer_r.convert_ids_to_tokens(tokens['input_ids'] ) )
self.assertEqual([e[0] for e in expected_results], tokens['offset_mapping'] )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = ['的', '人', '有']
SCREAMING_SNAKE_CASE : Dict = ''.join(A )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
SCREAMING_SNAKE_CASE : List[Any] = True
SCREAMING_SNAKE_CASE : Union[str, Any] = self.tokenizer_class.from_pretrained(A, **A )
SCREAMING_SNAKE_CASE : Tuple = self.rust_tokenizer_class.from_pretrained(A, **A )
SCREAMING_SNAKE_CASE : Dict = tokenizer_p.encode(A, add_special_tokens=A )
SCREAMING_SNAKE_CASE : Dict = tokenizer_r.encode(A, add_special_tokens=A )
SCREAMING_SNAKE_CASE : Tuple = tokenizer_r.convert_ids_to_tokens(A )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer_p.convert_ids_to_tokens(A )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(A, A )
self.assertListEqual(A, A )
SCREAMING_SNAKE_CASE : Union[str, Any] = False
SCREAMING_SNAKE_CASE : List[Any] = self.rust_tokenizer_class.from_pretrained(A, **A )
SCREAMING_SNAKE_CASE : int = self.tokenizer_class.from_pretrained(A, **A )
SCREAMING_SNAKE_CASE : str = tokenizer_r.encode(A, add_special_tokens=A )
SCREAMING_SNAKE_CASE : Optional[int] = tokenizer_p.encode(A, add_special_tokens=A )
SCREAMING_SNAKE_CASE : Tuple = tokenizer_r.convert_ids_to_tokens(A )
SCREAMING_SNAKE_CASE : str = tokenizer_p.convert_ids_to_tokens(A )
# it is expected that only the first Chinese character is not preceded by "##".
SCREAMING_SNAKE_CASE : str = [
F"##{token}" if idx != 0 else token for idx, token in enumerate(A )
]
self.assertListEqual(A, A )
self.assertListEqual(A, A )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.tokenizer_class(self.vocab_file, self.word_shape_file, self.word_pronunciation_file )
SCREAMING_SNAKE_CASE : str = tokenizer.encode('你好', add_special_tokens=A )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.encode('你是谁', add_special_tokens=A )
SCREAMING_SNAKE_CASE : List[str] = tokenizer.build_inputs_with_special_tokens(A )
SCREAMING_SNAKE_CASE : str = tokenizer.build_inputs_with_special_tokens(A, A )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.get_tokenizers(do_lower_case=A )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
SCREAMING_SNAKE_CASE : Any = '你好,你是谁'
SCREAMING_SNAKE_CASE : Tuple = tokenizer.tokenize(A )
SCREAMING_SNAKE_CASE : str = tokenizer.convert_tokens_to_ids(A )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.convert_tokens_to_shape_ids(A )
SCREAMING_SNAKE_CASE : int = tokenizer.convert_tokens_to_pronunciation_ids(A )
SCREAMING_SNAKE_CASE : str = tokenizer.prepare_for_model(
A, A, A, add_special_tokens=A )
SCREAMING_SNAKE_CASE : List[str] = tokenizer.encode_plus(A, add_special_tokens=A )
self.assertEqual(A, A )
| 246 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
UpperCamelCase_ = {"processing_wav2vec2_with_lm": ["Wav2Vec2ProcessorWithLM"]}
if TYPE_CHECKING:
from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 246 | 1 |
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowercase( __a ):
'''simple docstring'''
lowercase__ = ["image_processor", "tokenizer"]
lowercase__ = "LayoutLMv2ImageProcessor"
lowercase__ = ("LayoutXLMTokenizer", "LayoutXLMTokenizerFast")
def __init__( self: Optional[int], a_: Union[str, Any]=None, a_: Union[str, Any]=None, **a_: str ):
'''simple docstring'''
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""", a_, )
_snake_case : List[Any] = kwargs.pop("""feature_extractor""" )
_snake_case : Tuple = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(a_, a_ )
def __call__( self: List[Any], a_: Union[str, Any], a_: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None, a_: Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None, a_: Union[List[List[int]], List[List[List[int]]]] = None, a_: Optional[Union[List[int], List[List[int]]]] = None, a_: bool = True, a_: Union[bool, str, PaddingStrategy] = False, a_: Union[bool, str, TruncationStrategy] = None, a_: Optional[int] = None, a_: int = 0, a_: Optional[int] = None, a_: Optional[bool] = None, a_: Optional[bool] = None, a_: bool = False, a_: bool = False, a_: bool = False, a_: bool = False, a_: bool = True, a_: Optional[Union[str, TensorType]] = None, **a_: List[Any], ):
'''simple docstring'''
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"""You cannot provide bounding boxes """
"""if you initialized the image processor with apply_ocr set to True.""" )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"""You cannot provide word labels if you initialized the image processor with apply_ocr set to True.""" )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError("""You cannot return overflowing tokens without returning the offsets mapping.""" )
# first, apply the image processor
_snake_case : List[Any] = self.image_processor(images=a_, return_tensors=a_ )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(a_, a_ ):
_snake_case : Union[str, Any] = [text] # add batch dimension (as the image processor always adds a batch dimension)
_snake_case : Union[str, Any] = features["""words"""]
_snake_case : List[Any] = self.tokenizer(
text=text if text is not None else features["""words"""], text_pair=text_pair if text_pair is not None else None, boxes=boxes if boxes is not None else features["""boxes"""], word_labels=a_, add_special_tokens=a_, padding=a_, truncation=a_, max_length=a_, stride=a_, pad_to_multiple_of=a_, return_token_type_ids=a_, return_attention_mask=a_, return_overflowing_tokens=a_, return_special_tokens_mask=a_, return_offsets_mapping=a_, return_length=a_, verbose=a_, return_tensors=a_, **a_, )
# add pixel values
_snake_case : int = features.pop("""pixel_values""" )
if return_overflowing_tokens is True:
_snake_case : int = self.get_overflowing_images(a_, encoded_inputs["""overflow_to_sample_mapping"""] )
_snake_case : Optional[int] = images
return encoded_inputs
def UpperCamelCase_ ( self: List[Any], a_: List[Any], a_: Union[str, Any] ):
'''simple docstring'''
_snake_case : Optional[int] = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(a_ ) != len(a_ ):
raise ValueError(
"""Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"""
f" {len(a_ )} and {len(a_ )}" )
return images_with_overflow
def UpperCamelCase_ ( self: Union[str, Any], *a_: List[str], **a_: Dict ):
'''simple docstring'''
return self.tokenizer.batch_decode(*a_, **a_ )
def UpperCamelCase_ ( self: List[str], *a_: Tuple, **a_: Dict ):
'''simple docstring'''
return self.tokenizer.decode(*a_, **a_ )
@property
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""", a_, )
return self.image_processor_class
@property
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""", a_, )
return self.image_processor
| 64 |
from __future__ import annotations
def __lowerCamelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : list[str] | None = None , UpperCAmelCase_ : dict[str, float] | None = None , UpperCAmelCase_ : bool = False , ):
"""simple docstring"""
a :str = cipher_alphabet or [chr(UpperCAmelCase_ ) for i in range(97 , 123 )]
# If the argument is None or the user provided an empty dictionary
if not frequencies_dict:
# Frequencies of letters in the english language (how much they show up)
a :List[Any] = {
'''a''': 0.08497,
'''b''': 0.01492,
'''c''': 0.02202,
'''d''': 0.04253,
'''e''': 0.11162,
'''f''': 0.02228,
'''g''': 0.02015,
'''h''': 0.06094,
'''i''': 0.07546,
'''j''': 0.00153,
'''k''': 0.01292,
'''l''': 0.04025,
'''m''': 0.02406,
'''n''': 0.06749,
'''o''': 0.07507,
'''p''': 0.01929,
'''q''': 0.00095,
'''r''': 0.07587,
'''s''': 0.06327,
'''t''': 0.09356,
'''u''': 0.02758,
'''v''': 0.00978,
'''w''': 0.02560,
'''x''': 0.00150,
'''y''': 0.01994,
'''z''': 0.00077,
}
else:
# Custom frequencies dictionary
a :Dict = frequencies_dict
if not case_sensitive:
a :Union[str, Any] = ciphertext.lower()
# Chi squared statistic values
a :dict[int, tuple[float, str]] = {}
# cycle through all of the shifts
for shift in range(len(UpperCAmelCase_ ) ):
a :int = ''''''
# decrypt the message with the shift
for letter in ciphertext:
try:
# Try to index the letter in the alphabet
a :Dict = (alphabet_letters.index(letter.lower() ) - shift) % len(
UpperCAmelCase_ )
decrypted_with_shift += (
alphabet_letters[new_key].upper()
if case_sensitive and letter.isupper()
else alphabet_letters[new_key]
)
except ValueError:
# Append the character if it isn't in the alphabet
decrypted_with_shift += letter
a :List[Any] = 0.0
# Loop through each letter in the decoded message with the shift
for letter in decrypted_with_shift:
if case_sensitive:
a :Optional[int] = letter.lower()
if letter in frequencies:
# Get the amount of times the letter occurs in the message
a :List[Any] = decrypted_with_shift.lower().count(UpperCAmelCase_ )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
a :Dict = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
a :Any = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
else:
if letter.lower() in frequencies:
# Get the amount of times the letter occurs in the message
a :int = decrypted_with_shift.count(UpperCAmelCase_ )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
a :Tuple = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
a :Optional[Any] = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
# Add the data to the chi_squared_statistic_values dictionary
a :Optional[Any] = (
chi_squared_statistic,
decrypted_with_shift,
)
# Get the most likely cipher by finding the cipher with the smallest chi squared
# statistic
def chi_squared_statistic_values_sorting_key(UpperCAmelCase_ : int ) -> tuple[float, str]:
return chi_squared_statistic_values[key]
a :int = min(
UpperCAmelCase_ , key=UpperCAmelCase_ , )
# Get all the data from the most likely cipher (key, decoded message)
(
(
a
) , (
a
) ,
) :Optional[int] = chi_squared_statistic_values[most_likely_cipher]
# Return the data on the most likely shift
return (
most_likely_cipher,
most_likely_cipher_chi_squared_value,
decoded_most_likely_cipher,
)
| 94 | 0 |
'''simple docstring'''
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
snake_case__ : List[Any] = logging.get_logger(__name__)
snake_case__ : Any = OrderedDict(
[
('''align''', '''EfficientNetImageProcessor'''),
('''beit''', '''BeitImageProcessor'''),
('''bit''', '''BitImageProcessor'''),
('''blip''', '''BlipImageProcessor'''),
('''blip-2''', '''BlipImageProcessor'''),
('''bridgetower''', '''BridgeTowerImageProcessor'''),
('''chinese_clip''', '''ChineseCLIPImageProcessor'''),
('''clip''', '''CLIPImageProcessor'''),
('''clipseg''', '''ViTImageProcessor'''),
('''conditional_detr''', '''ConditionalDetrImageProcessor'''),
('''convnext''', '''ConvNextImageProcessor'''),
('''convnextv2''', '''ConvNextImageProcessor'''),
('''cvt''', '''ConvNextImageProcessor'''),
('''data2vec-vision''', '''BeitImageProcessor'''),
('''deformable_detr''', '''DeformableDetrImageProcessor'''),
('''deit''', '''DeiTImageProcessor'''),
('''deta''', '''DetaImageProcessor'''),
('''detr''', '''DetrImageProcessor'''),
('''dinat''', '''ViTImageProcessor'''),
('''donut-swin''', '''DonutImageProcessor'''),
('''dpt''', '''DPTImageProcessor'''),
('''efficientformer''', '''EfficientFormerImageProcessor'''),
('''efficientnet''', '''EfficientNetImageProcessor'''),
('''flava''', '''FlavaImageProcessor'''),
('''focalnet''', '''BitImageProcessor'''),
('''git''', '''CLIPImageProcessor'''),
('''glpn''', '''GLPNImageProcessor'''),
('''groupvit''', '''CLIPImageProcessor'''),
('''imagegpt''', '''ImageGPTImageProcessor'''),
('''instructblip''', '''BlipImageProcessor'''),
('''layoutlmv2''', '''LayoutLMv2ImageProcessor'''),
('''layoutlmv3''', '''LayoutLMv3ImageProcessor'''),
('''levit''', '''LevitImageProcessor'''),
('''mask2former''', '''Mask2FormerImageProcessor'''),
('''maskformer''', '''MaskFormerImageProcessor'''),
('''mgp-str''', '''ViTImageProcessor'''),
('''mobilenet_v1''', '''MobileNetV1ImageProcessor'''),
('''mobilenet_v2''', '''MobileNetV2ImageProcessor'''),
('''mobilevit''', '''MobileViTImageProcessor'''),
('''mobilevit''', '''MobileViTImageProcessor'''),
('''mobilevitv2''', '''MobileViTImageProcessor'''),
('''nat''', '''ViTImageProcessor'''),
('''oneformer''', '''OneFormerImageProcessor'''),
('''owlvit''', '''OwlViTImageProcessor'''),
('''perceiver''', '''PerceiverImageProcessor'''),
('''pix2struct''', '''Pix2StructImageProcessor'''),
('''poolformer''', '''PoolFormerImageProcessor'''),
('''regnet''', '''ConvNextImageProcessor'''),
('''resnet''', '''ConvNextImageProcessor'''),
('''sam''', '''SamImageProcessor'''),
('''segformer''', '''SegformerImageProcessor'''),
('''swiftformer''', '''ViTImageProcessor'''),
('''swin''', '''ViTImageProcessor'''),
('''swin2sr''', '''Swin2SRImageProcessor'''),
('''swinv2''', '''ViTImageProcessor'''),
('''table-transformer''', '''DetrImageProcessor'''),
('''timesformer''', '''VideoMAEImageProcessor'''),
('''tvlt''', '''TvltImageProcessor'''),
('''upernet''', '''SegformerImageProcessor'''),
('''van''', '''ConvNextImageProcessor'''),
('''videomae''', '''VideoMAEImageProcessor'''),
('''vilt''', '''ViltImageProcessor'''),
('''vit''', '''ViTImageProcessor'''),
('''vit_hybrid''', '''ViTHybridImageProcessor'''),
('''vit_mae''', '''ViTImageProcessor'''),
('''vit_msn''', '''ViTImageProcessor'''),
('''xclip''', '''CLIPImageProcessor'''),
('''yolos''', '''YolosImageProcessor'''),
]
)
snake_case__ : str = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def _lowerCamelCase ( lowerCamelCase_ : str ):
"""simple docstring"""
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
UpperCAmelCase_ : int = model_type_to_module_name(lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = importlib.import_module(F'''.{module_name}''' , 'transformers.models' )
try:
return getattr(lowerCamelCase_ , lowerCamelCase_ )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(lowerCamelCase_ , '__name__' , lowerCamelCase_ ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
UpperCAmelCase_ : Tuple = importlib.import_module('transformers' )
if hasattr(lowerCamelCase_ , lowerCamelCase_ ):
return getattr(lowerCamelCase_ , lowerCamelCase_ )
return None
def _lowerCamelCase ( lowerCamelCase_ : Union[str, os.PathLike] , lowerCamelCase_ : Optional[Union[str, os.PathLike]] = None , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[Dict[str, str]] = None , lowerCamelCase_ : Optional[Union[bool, str]] = None , lowerCamelCase_ : Optional[str] = None , lowerCamelCase_ : bool = False , **lowerCamelCase_ : str , ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = get_file_from_repo(
lowerCamelCase_ , lowerCamelCase_ , cache_dir=lowerCamelCase_ , force_download=lowerCamelCase_ , resume_download=lowerCamelCase_ , proxies=lowerCamelCase_ , use_auth_token=lowerCamelCase_ , revision=lowerCamelCase_ , local_files_only=lowerCamelCase_ , )
if resolved_config_file is None:
logger.info(
'Could not locate the image processor configuration file, will try to use the model config instead.' )
return {}
with open(lowerCamelCase_ , encoding='utf-8' ) as reader:
return json.load(lowerCamelCase_ )
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self ):
'''simple docstring'''
raise EnvironmentError(
'AutoImageProcessor is designed to be instantiated '
'using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.' )
@classmethod
@replace_list_option_in_docstrings(snake_case_ )
def _UpperCamelCase ( cls , snake_case_ , **snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = kwargs.pop('config' , snake_case_ )
UpperCAmelCase_ : List[str] = kwargs.pop('trust_remote_code' , snake_case_ )
UpperCAmelCase_ : Optional[int] = True
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = ImageProcessingMixin.get_image_processor_dict(snake_case_ , **snake_case_ )
UpperCAmelCase_ : Optional[Any] = config_dict.get('image_processor_type' , snake_case_ )
UpperCAmelCase_ : int = None
if "AutoImageProcessor" in config_dict.get('auto_map' , {} ):
UpperCAmelCase_ : Dict = config_dict['auto_map']['AutoImageProcessor']
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
UpperCAmelCase_ : Any = config_dict.pop('feature_extractor_type' , snake_case_ )
if feature_extractor_class is not None:
logger.warning(
'Could not find image processor class in the image processor config or the model config. Loading'
' based on pattern matching with the model\'s feature extractor configuration.' )
UpperCAmelCase_ : str = feature_extractor_class.replace('FeatureExtractor' , 'ImageProcessor' )
if "AutoFeatureExtractor" in config_dict.get('auto_map' , {} ):
UpperCAmelCase_ : Union[str, Any] = config_dict['auto_map']['AutoFeatureExtractor']
UpperCAmelCase_ : Union[str, Any] = feature_extractor_auto_map.replace('FeatureExtractor' , 'ImageProcessor' )
logger.warning(
'Could not find image processor auto map in the image processor config or the model config.'
' Loading based on pattern matching with the model\'s feature extractor configuration.' )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(snake_case_ , snake_case_ ):
UpperCAmelCase_ : Tuple = AutoConfig.from_pretrained(snake_case_ , **snake_case_ )
# It could be in `config.image_processor_type``
UpperCAmelCase_ : Any = getattr(snake_case_ , 'image_processor_type' , snake_case_ )
if hasattr(snake_case_ , 'auto_map' ) and "AutoImageProcessor" in config.auto_map:
UpperCAmelCase_ : Tuple = config.auto_map['AutoImageProcessor']
if image_processor_class is not None:
UpperCAmelCase_ : List[str] = image_processor_class_from_name(snake_case_ )
UpperCAmelCase_ : Tuple = image_processor_auto_map is not None
UpperCAmelCase_ : Optional[int] = image_processor_class is not None or type(snake_case_ ) in IMAGE_PROCESSOR_MAPPING
UpperCAmelCase_ : List[Any] = resolve_trust_remote_code(
snake_case_ , snake_case_ , snake_case_ , snake_case_ )
if has_remote_code and trust_remote_code:
UpperCAmelCase_ : List[str] = get_class_from_dynamic_module(
snake_case_ , snake_case_ , **snake_case_ )
UpperCAmelCase_ : Union[str, Any] = kwargs.pop('code_revision' , snake_case_ )
if os.path.isdir(snake_case_ ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(snake_case_ , **snake_case_ )
elif image_processor_class is not None:
return image_processor_class.from_dict(snake_case_ , **snake_case_ )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(snake_case_ ) in IMAGE_PROCESSOR_MAPPING:
UpperCAmelCase_ : List[Any] = IMAGE_PROCESSOR_MAPPING[type(snake_case_ )]
return image_processor_class.from_dict(snake_case_ , **snake_case_ )
raise ValueError(
F'''Unrecognized image processor in {pretrained_model_name_or_path}. Should have a '''
F'''`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following '''
F'''`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}''' )
@staticmethod
def _UpperCamelCase ( snake_case_ , snake_case_ ):
'''simple docstring'''
IMAGE_PROCESSOR_MAPPING.register(snake_case_ , snake_case_ )
| 274 | '''simple docstring'''
def _lowerCamelCase ( lowerCamelCase_ : int , lowerCamelCase_ : int ):
"""simple docstring"""
return int(input_a == input_a == 0 )
def _lowerCamelCase ( ):
"""simple docstring"""
print('Truth Table of NOR Gate:' )
print('| Input 1 | Input 2 | Output |' )
print(F'''| 0 | 0 | {nor_gate(0 , 0 )} |''' )
print(F'''| 0 | 1 | {nor_gate(0 , 1 )} |''' )
print(F'''| 1 | 0 | {nor_gate(1 , 0 )} |''' )
print(F'''| 1 | 1 | {nor_gate(1 , 1 )} |''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 274 | 1 |
lowerCamelCase__ = {}
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
__a = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
__a = _calculate(days - 1 , _SCREAMING_SNAKE_CASE , late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
__a = _calculate(days - 1 , absent + 1 , 0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
__a = _calculate(days - 1 , _SCREAMING_SNAKE_CASE , 0 )
__a = state_late + state_absent + state_ontime
__a = prizestrings
return prizestrings
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int = 30 ):
"""simple docstring"""
return _calculate(_SCREAMING_SNAKE_CASE , absent=0 , late=0 )
if __name__ == "__main__":
print(solution())
| 302 |
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = Dict[str, Any]
lowerCamelCase__ = List[Prediction]
@add_end_docstrings(lowerCamelCase__ )
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
def __init__( self : Tuple , *__lowercase : Tuple , **__lowercase : Optional[int] ):
'''simple docstring'''
super().__init__(*__lowercase , **__lowercase )
if self.framework == "tf":
raise ValueError(F"The {self.__class__} is only available in PyTorch." )
requires_backends(self , """vision""" )
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) )
def UpperCamelCase_ ( self : Optional[int] , **__lowercase : List[str] ):
'''simple docstring'''
__a = {}
if "threshold" in kwargs:
__a = kwargs["""threshold"""]
return {}, {}, postprocess_kwargs
def __call__( self : List[Any] , *__lowercase : Any , **__lowercase : Tuple ):
'''simple docstring'''
return super().__call__(*__lowercase , **__lowercase )
def UpperCamelCase_ ( self : str , __lowercase : Tuple ):
'''simple docstring'''
__a = load_image(__lowercase )
__a = torch.IntTensor([[image.height, image.width]] )
__a = self.image_processor(images=[image] , return_tensors="""pt""" )
if self.tokenizer is not None:
__a = self.tokenizer(text=inputs["""words"""] , boxes=inputs["""boxes"""] , return_tensors="""pt""" )
__a = target_size
return inputs
def UpperCamelCase_ ( self : Dict , __lowercase : List[str] ):
'''simple docstring'''
__a = model_inputs.pop("""target_size""" )
__a = self.model(**__lowercase )
__a = outputs.__class__({"""target_size""": target_size, **outputs} )
if self.tokenizer is not None:
__a = model_inputs["""bbox"""]
return model_outputs
def UpperCamelCase_ ( self : Optional[int] , __lowercase : List[Any] , __lowercase : List[Any]=0.9 ):
'''simple docstring'''
__a = model_outputs["""target_size"""]
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
__a , __a = target_size[0].tolist()
def unnormalize(__lowercase : Optional[Any] ):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 1000),
(height * bbox[1] / 1000),
(width * bbox[2] / 1000),
(height * bbox[3] / 1000),
] ) )
__a , __a = model_outputs["""logits"""].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 )
__a = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
__a = [unnormalize(__lowercase ) for bbox in model_outputs["""bbox"""].squeeze(0 )]
__a = ["""score""", """label""", """box"""]
__a = [dict(zip(__lowercase , __lowercase ) ) for vals in zip(scores.tolist() , __lowercase , __lowercase ) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
__a = self.image_processor.post_process_object_detection(__lowercase , __lowercase , __lowercase )
__a = raw_annotations[0]
__a = raw_annotation["""scores"""]
__a = raw_annotation["""labels"""]
__a = raw_annotation["""boxes"""]
__a = scores.tolist()
__a = [self.model.config.idalabel[label.item()] for label in labels]
__a = [self._get_bounding_box(__lowercase ) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
__a = ["""score""", """label""", """box"""]
__a = [
dict(zip(__lowercase , __lowercase ) )
for vals in zip(raw_annotation["""scores"""] , raw_annotation["""labels"""] , raw_annotation["""boxes"""] )
]
return annotation
def UpperCamelCase_ ( self : Optional[int] , __lowercase : "torch.Tensor" ):
'''simple docstring'''
if self.framework != "pt":
raise ValueError("""The ObjectDetectionPipeline is only available in PyTorch.""" )
__a , __a , __a , __a = box.int().tolist()
__a = {
"""xmin""": xmin,
"""ymin""": ymin,
"""xmax""": xmax,
"""ymax""": ymax,
}
return bbox
| 302 | 1 |
from __future__ import annotations
from functools import lru_cache
from math import ceil
_snake_case : Tuple = 100
_snake_case : int = set(range(3, NUM_PRIMES, 2))
primes.add(2)
_snake_case : int
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=1_0_0 )
def __lowercase ( __lowerCamelCase ):
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
__snake_case : set[int] = set()
__snake_case : int
__snake_case : int
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime ):
ret.add(sub * prime )
return ret
def __lowercase ( __lowerCamelCase = 5_0_0_0 ):
for number_to_partition in range(1 , __lowerCamelCase ):
if len(partition(__lowerCamelCase ) ) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(f'''{solution() = }''')
| 355 |
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
_snake_case : int = "scheduler_config.json"
class a (_lowerCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : Tuple = 1
__UpperCAmelCase : Tuple = 2
__UpperCAmelCase : Union[str, Any] = 3
__UpperCAmelCase : List[Any] = 4
__UpperCAmelCase : Tuple = 5
@dataclass
class a (_lowerCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : jnp.ndarray
class a :
"""simple docstring"""
__UpperCAmelCase : Dict = SCHEDULER_CONFIG_NAME
__UpperCAmelCase : Union[str, Any] = ["dtype"]
__UpperCAmelCase : Tuple = []
__UpperCAmelCase : int = True
@classmethod
def __snake_case ( cls : List[str] , lowerCamelCase : Dict[str, Any] = None , lowerCamelCase : Optional[str] = None , lowerCamelCase : List[str]=False , **lowerCamelCase : Union[str, Any] , ) -> List[str]:
__snake_case , __snake_case : List[str] = cls.load_config(
pretrained_model_name_or_path=lowerCamelCase , subfolder=lowerCamelCase , return_unused_kwargs=lowerCamelCase , **lowerCamelCase , )
__snake_case , __snake_case : Dict = cls.from_config(lowerCamelCase , return_unused_kwargs=lowerCamelCase , **lowerCamelCase )
if hasattr(lowerCamelCase , "create_state" ) and getattr(lowerCamelCase , "has_state" , lowerCamelCase ):
__snake_case : Tuple = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def __snake_case ( self : Any , lowerCamelCase : Union[str, os.PathLike] , lowerCamelCase : bool = False , **lowerCamelCase : List[Any] ) -> int:
self.save_config(save_directory=lowerCamelCase , push_to_hub=lowerCamelCase , **lowerCamelCase )
@property
def __snake_case ( self : Tuple ) -> List[Any]:
return self._get_compatibles()
@classmethod
def __snake_case ( cls : int ) -> Dict:
__snake_case : Tuple = list(set([cls.__name__] + cls._compatibles ) )
__snake_case : int = importlib.import_module(__name__.split("." )[0] )
__snake_case : Tuple = [
getattr(lowerCamelCase , lowerCamelCase ) for c in compatible_classes_str if hasattr(lowerCamelCase , lowerCamelCase )
]
return compatible_classes
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
assert len(__lowerCamelCase ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(__lowerCamelCase ) - x.ndim) ) , __lowerCamelCase )
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase=0.9_9_9 , __lowerCamelCase=jnp.floataa ):
def alpha_bar(__lowerCamelCase ):
return math.cos((time_step + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
__snake_case : List[Any] = []
for i in range(__lowerCamelCase ):
__snake_case : Dict = i / num_diffusion_timesteps
__snake_case : str = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(__lowerCamelCase ) / alpha_bar(__lowerCamelCase ) , __lowerCamelCase ) )
return jnp.array(__lowerCamelCase , dtype=__lowerCamelCase )
@flax.struct.dataclass
class a :
"""simple docstring"""
__UpperCAmelCase : jnp.ndarray
__UpperCAmelCase : jnp.ndarray
__UpperCAmelCase : jnp.ndarray
@classmethod
def __snake_case ( cls : Union[str, Any] , lowerCamelCase : int ) -> List[Any]:
__snake_case : Dict = scheduler.config
if config.trained_betas is not None:
__snake_case : Dict = jnp.asarray(config.trained_betas , dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
__snake_case : Optional[int] = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
__snake_case : str = (
jnp.linspace(
config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
__snake_case : Optional[Any] = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype )
else:
raise NotImplementedError(
F'beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}' )
__snake_case : Any = 1.0 - betas
__snake_case : int = jnp.cumprod(lowerCamelCase , axis=0 )
return cls(
alphas=lowerCamelCase , betas=lowerCamelCase , alphas_cumprod=lowerCamelCase , )
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
__snake_case : List[Any] = state.alphas_cumprod
__snake_case : str = alphas_cumprod[timesteps] ** 0.5
__snake_case : Dict = sqrt_alpha_prod.flatten()
__snake_case : str = broadcast_to_shape_from_left(__lowerCamelCase , original_samples.shape )
__snake_case : Tuple = (1 - alphas_cumprod[timesteps]) ** 0.5
__snake_case : str = sqrt_one_minus_alpha_prod.flatten()
__snake_case : Tuple = broadcast_to_shape_from_left(__lowerCamelCase , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
__snake_case , __snake_case : Union[str, Any] = get_sqrt_alpha_prod(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
__snake_case : List[str] = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
__snake_case , __snake_case : Dict = get_sqrt_alpha_prod(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
__snake_case : Optional[int] = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 134 | 0 |
"""simple docstring"""
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
UpperCAmelCase_ : List[str] = logging.getLogger(__name__)
class lowerCAmelCase__ ( __snake_case ):
'''simple docstring'''
__UpperCamelCase = """summarization"""
__UpperCamelCase = ["""loss"""]
__UpperCamelCase = ROUGE_KEYS
__UpperCamelCase = """rouge2"""
def __init__( self : Tuple , lowercase_ : Union[str, Any] , **lowercase_ : List[str]):
'''simple docstring'''
if hparams.sortish_sampler and hparams.gpus > 1:
SCREAMING_SNAKE_CASE_ : int = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError('''Dynamic Batch size does not work for multi-gpu training''')
if hparams.sortish_sampler:
raise ValueError('''--sortish_sampler and --max_tokens_per_batch may not be used simultaneously''')
super().__init__(UpperCamelCase__ , num_labels=UpperCamelCase__ , mode=self.mode , **UpperCamelCase__)
use_task_specific_params(self.model , '''summarization''')
save_git_info(self.hparams.output_dir)
SCREAMING_SNAKE_CASE_ : Optional[int] = Path(self.output_dir) / "metrics.json"
SCREAMING_SNAKE_CASE_ : str = Path(self.output_dir) / "hparams.pkl"
pickle_save(self.hparams , self.hparams_save_path)
SCREAMING_SNAKE_CASE_ : Dict = 0
SCREAMING_SNAKE_CASE_ : Tuple = defaultdict(UpperCamelCase__)
SCREAMING_SNAKE_CASE_ : int = self.config.model_type
SCREAMING_SNAKE_CASE_ : Tuple = self.config.tgt_vocab_size if self.model_type == "fsmt" else self.config.vocab_size
SCREAMING_SNAKE_CASE_ : dict = {
"data_dir": self.hparams.data_dir,
"max_source_length": self.hparams.max_source_length,
"prefix": self.model.config.prefix or "",
}
SCREAMING_SNAKE_CASE_ : Dict = {
"train": self.hparams.n_train,
"val": self.hparams.n_val,
"test": self.hparams.n_test,
}
SCREAMING_SNAKE_CASE_ : Dict = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
SCREAMING_SNAKE_CASE_ : Optional[Any] = {
"train": self.hparams.max_target_length,
"val": self.hparams.val_max_target_length,
"test": self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], F'target_lens: {self.target_lens}'
assert self.target_lens["train"] <= self.target_lens["test"], F'target_lens: {self.target_lens}'
if self.hparams.freeze_embeds:
freeze_embeds(self.model)
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder())
assert_all_frozen(self.model.get_encoder())
SCREAMING_SNAKE_CASE_ : str = get_git_info()["repo_sha"]
SCREAMING_SNAKE_CASE_ : Optional[Any] = hparams.num_workers
SCREAMING_SNAKE_CASE_ : List[str] = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , UpperCamelCase__):
SCREAMING_SNAKE_CASE_ : Dict = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
SCREAMING_SNAKE_CASE_ : List[Any] = self.decoder_start_token_id
SCREAMING_SNAKE_CASE_ : Optional[Any] = (
SeqaSeqDataset if hasattr(self.tokenizer , '''prepare_seq2seq_batch''') else LegacySeqaSeqDataset
)
SCREAMING_SNAKE_CASE_ : Dict = False
SCREAMING_SNAKE_CASE_ : int = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
SCREAMING_SNAKE_CASE_ : List[Any] = self.hparams.eval_max_gen_length
else:
SCREAMING_SNAKE_CASE_ : Dict = self.model.config.max_length
SCREAMING_SNAKE_CASE_ : List[Any] = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def _SCREAMING_SNAKE_CASE ( self : str , lowercase_ : Dict[str, torch.Tensor]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = {
k: self.tokenizer.batch_decode(v.tolist()) if "mask" not in k else v.shape for k, v in batch.items()
}
save_json(UpperCamelCase__ , Path(self.output_dir) / '''text_batch.json''')
save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir) / '''tok_batch.json''')
SCREAMING_SNAKE_CASE_ : Any = True
return readable_batch
def _SCREAMING_SNAKE_CASE ( self : int , lowercase_ : str , **lowercase_ : Optional[int]):
'''simple docstring'''
return self.model(UpperCamelCase__ , **UpperCamelCase__)
def _SCREAMING_SNAKE_CASE ( self : str , lowercase_ : List[int]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = self.tokenizer.batch_decode(
UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ , clean_up_tokenization_spaces=UpperCamelCase__)
return lmap(str.strip , UpperCamelCase__)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowercase_ : dict):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = self.tokenizer.pad_token_id
SCREAMING_SNAKE_CASE_ : Union[str, Any] = batch["input_ids"], batch["attention_mask"]
SCREAMING_SNAKE_CASE_ : Optional[Any] = batch["labels"]
if isinstance(self.model , UpperCamelCase__):
SCREAMING_SNAKE_CASE_ : List[Any] = self.model._shift_right(UpperCamelCase__)
else:
SCREAMING_SNAKE_CASE_ : Tuple = shift_tokens_right(UpperCamelCase__ , UpperCamelCase__)
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
SCREAMING_SNAKE_CASE_ : Tuple = decoder_input_ids
self.save_readable_batch(UpperCamelCase__)
SCREAMING_SNAKE_CASE_ : Optional[int] = self(UpperCamelCase__ , attention_mask=UpperCamelCase__ , decoder_input_ids=UpperCamelCase__ , use_cache=UpperCamelCase__)
SCREAMING_SNAKE_CASE_ : str = outputs["logits"]
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
SCREAMING_SNAKE_CASE_ : Optional[int] = nn.CrossEntropyLoss(ignore_index=UpperCamelCase__)
assert lm_logits.shape[-1] == self.vocab_size
SCREAMING_SNAKE_CASE_ : Tuple = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1]) , tgt_ids.view(-1))
else:
SCREAMING_SNAKE_CASE_ : Optional[Any] = nn.functional.log_softmax(UpperCamelCase__ , dim=-1)
SCREAMING_SNAKE_CASE_ : int = label_smoothed_nll_loss(
UpperCamelCase__ , UpperCamelCase__ , self.hparams.label_smoothing , ignore_index=UpperCamelCase__)
return (loss,)
@property
def _SCREAMING_SNAKE_CASE ( self : List[str]):
'''simple docstring'''
return self.tokenizer.pad_token_id
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowercase_ : List[Any] , lowercase_ : Dict):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = self._step(UpperCamelCase__)
SCREAMING_SNAKE_CASE_ : Any = dict(zip(self.loss_names , UpperCamelCase__))
# tokens per batch
SCREAMING_SNAKE_CASE_ : Any = batch["input_ids"].ne(self.pad).sum() + batch["labels"].ne(self.pad).sum()
SCREAMING_SNAKE_CASE_ : List[Any] = batch["input_ids"].shape[0]
SCREAMING_SNAKE_CASE_ : Optional[int] = batch["input_ids"].eq(self.pad).sum()
SCREAMING_SNAKE_CASE_ : Any = batch["input_ids"].eq(self.pad).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowercase_ : Optional[int] , lowercase_ : str):
'''simple docstring'''
return self._generative_step(UpperCamelCase__)
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowercase_ : Any , lowercase_ : List[str]="val"):
'''simple docstring'''
self.step_count += 1
SCREAMING_SNAKE_CASE_ : int = {k: torch.stack([x[k] for x in outputs]).mean() for k in self.loss_names}
SCREAMING_SNAKE_CASE_ : Tuple = losses["loss"]
SCREAMING_SNAKE_CASE_ : Any = {
k: np.array([x[k] for x in outputs]).mean() for k in self.metric_names + ["gen_time", "gen_len"]
}
SCREAMING_SNAKE_CASE_ : Tuple = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
SCREAMING_SNAKE_CASE_ : torch.FloatTensor = torch.tensor(UpperCamelCase__).type_as(UpperCamelCase__)
generative_metrics.update({k: v.item() for k, v in losses.items()})
losses.update(UpperCamelCase__)
SCREAMING_SNAKE_CASE_ : Optional[Any] = {F'{prefix}_avg_{k}': x for k, x in losses.items()}
SCREAMING_SNAKE_CASE_ : Tuple = self.step_count
self.metrics[prefix].append(UpperCamelCase__) # callback writes this to self.metrics_save_path
SCREAMING_SNAKE_CASE_ : Optional[int] = flatten_list([x['''preds'''] for x in outputs])
return {
"log": all_metrics,
"preds": preds,
F'{prefix}_loss': loss,
F'{prefix}_{self.val_metric}': metric_tensor,
}
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowercase_ : int , lowercase_ : List[Any]):
'''simple docstring'''
return calculate_rouge(UpperCamelCase__ , UpperCamelCase__)
def _SCREAMING_SNAKE_CASE ( self : int , lowercase_ : dict):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
SCREAMING_SNAKE_CASE_ : Dict = self.model.generate(
batch['''input_ids'''] , attention_mask=batch['''attention_mask'''] , use_cache=UpperCamelCase__ , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , )
SCREAMING_SNAKE_CASE_ : List[Any] = (time.time() - ta) / batch["input_ids"].shape[0]
SCREAMING_SNAKE_CASE_ : List[str] = self.ids_to_clean_text(UpperCamelCase__)
SCREAMING_SNAKE_CASE_ : List[str] = self.ids_to_clean_text(batch['''labels'''])
SCREAMING_SNAKE_CASE_ : Optional[Any] = self._step(UpperCamelCase__)
SCREAMING_SNAKE_CASE_ : int = dict(zip(self.loss_names , UpperCamelCase__))
SCREAMING_SNAKE_CASE_ : Dict = self.calc_generative_metrics(UpperCamelCase__ , UpperCamelCase__)
SCREAMING_SNAKE_CASE_ : Optional[Any] = np.mean(lmap(UpperCamelCase__ , UpperCamelCase__))
base_metrics.update(gen_time=UpperCamelCase__ , gen_len=UpperCamelCase__ , preds=UpperCamelCase__ , target=UpperCamelCase__ , **UpperCamelCase__)
return base_metrics
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowercase_ : Tuple , lowercase_ : Optional[Any]):
'''simple docstring'''
return self._generative_step(UpperCamelCase__)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowercase_ : List[str]):
'''simple docstring'''
return self.validation_epoch_end(UpperCamelCase__ , prefix='''test''')
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowercase_ : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = self.n_obs[type_path]
SCREAMING_SNAKE_CASE_ : List[str] = self.target_lens[type_path]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.dataset_class(
self.tokenizer , type_path=UpperCamelCase__ , n_obs=UpperCamelCase__ , max_target_length=UpperCamelCase__ , **self.dataset_kwargs , )
return dataset
def _SCREAMING_SNAKE_CASE ( self : str , lowercase_ : str , lowercase_ : int , lowercase_ : bool = False):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = self.get_dataset(UpperCamelCase__)
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
SCREAMING_SNAKE_CASE_ : Optional[Any] = dataset.make_sortish_sampler(UpperCamelCase__ , distributed=self.hparams.gpus > 1)
return DataLoader(
UpperCamelCase__ , batch_size=UpperCamelCase__ , collate_fn=dataset.collate_fn , shuffle=UpperCamelCase__ , num_workers=self.num_workers , sampler=UpperCamelCase__ , )
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
SCREAMING_SNAKE_CASE_ : List[Any] = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1)
return DataLoader(
UpperCamelCase__ , batch_sampler=UpperCamelCase__ , collate_fn=dataset.collate_fn , num_workers=self.num_workers , )
else:
return DataLoader(
UpperCamelCase__ , batch_size=UpperCamelCase__ , collate_fn=dataset.collate_fn , shuffle=UpperCamelCase__ , num_workers=self.num_workers , sampler=UpperCamelCase__ , )
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.get_dataloader('''train''' , batch_size=self.hparams.train_batch_size , shuffle=UpperCamelCase__)
return dataloader
def _SCREAMING_SNAKE_CASE ( self : List[str]):
'''simple docstring'''
return self.get_dataloader('''val''' , batch_size=self.hparams.eval_batch_size)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
return self.get_dataloader('''test''' , batch_size=self.hparams.eval_batch_size)
@staticmethod
def _SCREAMING_SNAKE_CASE ( lowercase_ : str , lowercase_ : Tuple):
'''simple docstring'''
BaseTransformer.add_model_specific_args(UpperCamelCase__ , UpperCamelCase__)
add_generic_args(UpperCamelCase__ , UpperCamelCase__)
parser.add_argument(
'''--max_source_length''' , default=1024 , type=UpperCamelCase__ , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--max_target_length''' , default=56 , type=UpperCamelCase__ , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--val_max_target_length''' , default=142 , type=UpperCamelCase__ , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--test_max_target_length''' , default=142 , type=UpperCamelCase__ , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument('''--freeze_encoder''' , action='''store_true''')
parser.add_argument('''--freeze_embeds''' , action='''store_true''')
parser.add_argument('''--sortish_sampler''' , action='''store_true''' , default=UpperCamelCase__)
parser.add_argument('''--overwrite_output_dir''' , action='''store_true''' , default=UpperCamelCase__)
parser.add_argument('''--max_tokens_per_batch''' , type=UpperCamelCase__ , default=UpperCamelCase__)
parser.add_argument('''--logger_name''' , type=UpperCamelCase__ , choices=['''default''', '''wandb''', '''wandb_shared'''] , default='''default''')
parser.add_argument('''--n_train''' , type=UpperCamelCase__ , default=-1 , required=UpperCamelCase__ , help='''# examples. -1 means use all.''')
parser.add_argument('''--n_val''' , type=UpperCamelCase__ , default=500 , required=UpperCamelCase__ , help='''# examples. -1 means use all.''')
parser.add_argument('''--n_test''' , type=UpperCamelCase__ , default=-1 , required=UpperCamelCase__ , help='''# examples. -1 means use all.''')
parser.add_argument(
'''--task''' , type=UpperCamelCase__ , default='''summarization''' , required=UpperCamelCase__ , help='''# examples. -1 means use all.''')
parser.add_argument('''--label_smoothing''' , type=UpperCamelCase__ , default=0.0 , required=UpperCamelCase__)
parser.add_argument('''--src_lang''' , type=UpperCamelCase__ , default='''''' , required=UpperCamelCase__)
parser.add_argument('''--tgt_lang''' , type=UpperCamelCase__ , default='''''' , required=UpperCamelCase__)
parser.add_argument('''--eval_beams''' , type=UpperCamelCase__ , default=UpperCamelCase__ , required=UpperCamelCase__)
parser.add_argument(
'''--val_metric''' , type=UpperCamelCase__ , default=UpperCamelCase__ , required=UpperCamelCase__ , choices=['''bleu''', '''rouge2''', '''loss''', None])
parser.add_argument('''--eval_max_gen_length''' , type=UpperCamelCase__ , default=UpperCamelCase__ , help='''never generate more than n tokens''')
parser.add_argument('''--save_top_k''' , type=UpperCamelCase__ , default=1 , required=UpperCamelCase__ , help='''How many checkpoints to save''')
parser.add_argument(
'''--early_stopping_patience''' , type=UpperCamelCase__ , default=-1 , required=UpperCamelCase__ , help=(
'''-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So'''
''' val_check_interval will effect it.'''
) , )
return parser
class lowerCAmelCase__ ( __snake_case ):
'''simple docstring'''
__UpperCamelCase = """translation"""
__UpperCamelCase = ["""loss"""]
__UpperCamelCase = ["""bleu"""]
__UpperCamelCase = """bleu"""
def __init__( self : Dict , lowercase_ : List[Any] , **lowercase_ : int):
'''simple docstring'''
super().__init__(UpperCamelCase__ , **UpperCamelCase__)
SCREAMING_SNAKE_CASE_ : Optional[Any] = hparams.src_lang
SCREAMING_SNAKE_CASE_ : str = hparams.tgt_lang
def _SCREAMING_SNAKE_CASE ( self : str , lowercase_ : List[str] , lowercase_ : Optional[int]):
'''simple docstring'''
return calculate_bleu(UpperCamelCase__ , UpperCamelCase__)
def _A (__a , __a=None ) -> SummarizationModule:
"""simple docstring"""
Path(args.output_dir ).mkdir(exist_ok=__SCREAMING_SNAKE_CASE )
check_output_dir(__SCREAMING_SNAKE_CASE , expected_items=3 )
if model is None:
if "summarization" in args.task:
SCREAMING_SNAKE_CASE_ : SummarizationModule = SummarizationModule(__SCREAMING_SNAKE_CASE )
else:
SCREAMING_SNAKE_CASE_ : SummarizationModule = TranslationModule(__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Any = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith('''/tmp''' )
or str(args.output_dir ).startswith('''/var''' )
):
SCREAMING_SNAKE_CASE_ : Optional[int] = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
SCREAMING_SNAKE_CASE_ : str = os.environ.get('''WANDB_PROJECT''' , __SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : int = WandbLogger(name=model.output_dir.name , project=__SCREAMING_SNAKE_CASE )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
SCREAMING_SNAKE_CASE_ : Tuple = WandbLogger(name=model.output_dir.name , project=f'hf_{dataset}' )
if args.early_stopping_patience >= 0:
SCREAMING_SNAKE_CASE_ : Tuple = get_early_stopping_callback(model.val_metric , args.early_stopping_patience )
else:
SCREAMING_SNAKE_CASE_ : List[Any] = False
SCREAMING_SNAKE_CASE_ : List[str] = args.val_metric == "loss"
SCREAMING_SNAKE_CASE_ : pl.Trainer = generic_train(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , logging_callback=SeqaSeqLoggingCallback() , checkpoint_callback=get_checkpoint_callback(
args.output_dir , model.val_metric , args.save_top_k , __SCREAMING_SNAKE_CASE ) , early_stopping_callback=__SCREAMING_SNAKE_CASE , logger=__SCREAMING_SNAKE_CASE , )
pickle_save(model.hparams , model.output_dir / '''hparams.pkl''' )
if not args.do_predict:
return model
SCREAMING_SNAKE_CASE_ : Optional[Any] = ""
SCREAMING_SNAKE_CASE_ : Optional[int] = sorted(glob.glob(os.path.join(args.output_dir , '''*.ckpt''' ) , recursive=__SCREAMING_SNAKE_CASE ) )
if checkpoints:
SCREAMING_SNAKE_CASE_ : List[str] = checkpoints[-1]
SCREAMING_SNAKE_CASE_ : List[Any] = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
UpperCAmelCase_ : str = argparse.ArgumentParser()
UpperCAmelCase_ : int = pl.Trainer.add_argparse_args(parser)
UpperCAmelCase_ : Optional[int] = SummarizationModule.add_model_specific_args(parser, os.getcwd())
UpperCAmelCase_ : Any = parser.parse_args()
main(args)
| 91 |
"""simple docstring"""
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Optional[int]:
# Construct model
if gpta_config_file == "":
__lowerCAmelCase: Optional[int] = GPTaConfig()
else:
__lowerCAmelCase: List[str] = GPTaConfig.from_json_file(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Optional[int] = GPTaModel(__SCREAMING_SNAKE_CASE )
# Load weights from numpy
load_tf_weights_in_gpta(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Save pytorch-model
__lowerCAmelCase: str = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
__lowerCAmelCase: List[Any] = pytorch_dump_folder_path + "/" + CONFIG_NAME
print(F"Save PyTorch model to {pytorch_weights_dump_path}" )
torch.save(model.state_dict() , __SCREAMING_SNAKE_CASE )
print(F"Save configuration file to {pytorch_config_dump_path}" )
with open(__SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--gpt2_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--gpt2_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained OpenAI model. \n"
"This specifies the model architecture."
),
)
__A = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 217 | 0 |
"""simple docstring"""
from __future__ import annotations
class UpperCAmelCase :
"""simple docstring"""
def __init__( self , _UpperCAmelCase = 0 ):
lowercase__: Union[str, Any] = key
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase ):
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and isinstance(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: str = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(_UpperCAmelCase ) ^ key ) for ch in content]
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase ):
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and isinstance(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: int = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(_UpperCAmelCase ) ^ key ) for ch in content]
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = 0 ):
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and isinstance(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Optional[int] = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
lowercase__: Union[str, Any] = ''''''
for ch in content:
ans += chr(ord(_UpperCAmelCase ) ^ key )
return ans
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = 0 ):
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and isinstance(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Dict = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
lowercase__: Any = ''''''
for ch in content:
ans += chr(ord(_UpperCAmelCase ) ^ key )
return ans
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = 0 ):
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and isinstance(_UpperCAmelCase , _UpperCAmelCase )
try:
with open(_UpperCAmelCase ) as fin, open('''encrypt.out''' , '''w+''' ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(_UpperCAmelCase , _UpperCAmelCase ) )
except OSError:
return False
return True
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase ):
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and isinstance(_UpperCAmelCase , _UpperCAmelCase )
try:
with open(_UpperCAmelCase ) as fin, open('''decrypt.out''' , '''w+''' ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(_UpperCAmelCase , _UpperCAmelCase ) )
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| 2 | """simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional
from packaging import version
if TYPE_CHECKING:
from ... import PreTrainedTokenizer, TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import is_torch_available, logging
__A = logging.get_logger(__name__)
__A = {
"bigscience/bloom": "https://huggingface.co/bigscience/bloom/resolve/main/config.json",
"bigscience/bloom-560m": "https://huggingface.co/bigscience/bloom-560m/blob/main/config.json",
"bigscience/bloom-1b1": "https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json",
"bigscience/bloom-1b7": "https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json",
"bigscience/bloom-3b": "https://huggingface.co/bigscience/bloom-3b/blob/main/config.json",
"bigscience/bloom-7b1": "https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json",
}
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :str = "bloom"
_UpperCAmelCase :List[str] = ["past_key_values"]
_UpperCAmelCase :Optional[Any] = {
"num_hidden_layers": "n_layer",
"num_attention_heads": "n_head",
}
def __init__( self , _UpperCAmelCase=250880 , _UpperCAmelCase=64 , _UpperCAmelCase=2 , _UpperCAmelCase=8 , _UpperCAmelCase=1e-5 , _UpperCAmelCase=0.02 , _UpperCAmelCase=True , _UpperCAmelCase=1 , _UpperCAmelCase=2 , _UpperCAmelCase=False , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=1 , _UpperCAmelCase=False , **_UpperCAmelCase , ):
lowercase__: Any = vocab_size
# Backward compatibility with n_embed kwarg
lowercase__: Optional[Any] = kwargs.pop('''n_embed''' , _UpperCAmelCase )
lowercase__: int = hidden_size if n_embed is None else n_embed
lowercase__: int = n_layer
lowercase__: int = n_head
lowercase__: Optional[Any] = layer_norm_epsilon
lowercase__: int = initializer_range
lowercase__: List[Any] = use_cache
lowercase__: str = pretraining_tp
lowercase__: Tuple = apply_residual_connection_post_layernorm
lowercase__: int = hidden_dropout
lowercase__: Optional[Any] = attention_dropout
lowercase__: int = bos_token_id
lowercase__: Union[str, Any] = eos_token_id
lowercase__: Any = slow_but_exact
super().__init__(bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :int = version.parse("1.12" )
def __init__( self , _UpperCAmelCase , _UpperCAmelCase = "default" , _UpperCAmelCase = None , _UpperCAmelCase = False , ):
super().__init__(_UpperCAmelCase , task=_UpperCAmelCase , patching_specs=_UpperCAmelCase , use_past=_UpperCAmelCase )
if not getattr(self._config , '''pad_token_id''' , _UpperCAmelCase ):
# TODO: how to do that better?
lowercase__: Any = 0
@property
def _snake_case ( self ):
lowercase__: str = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
# BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344
self.fill_with_past_key_values_(_UpperCAmelCase , direction='''inputs''' , inverted_values_shape=_UpperCAmelCase )
lowercase__: List[str] = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
lowercase__: str = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def _snake_case ( self ):
return self._config.n_layer
@property
def _snake_case ( self ):
return self._config.n_head
@property
def _snake_case ( self ):
return 1e-3
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = -1 , _UpperCAmelCase = -1 , _UpperCAmelCase = False , _UpperCAmelCase = None , ):
lowercase__: str = super(_UpperCAmelCase , self ).generate_dummy_inputs(
_UpperCAmelCase , batch_size=_UpperCAmelCase , seq_length=_UpperCAmelCase , is_pair=_UpperCAmelCase , framework=_UpperCAmelCase )
# We need to order the input in the way they appears in the forward()
lowercase__: List[Any] = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
lowercase__, lowercase__: Optional[Any] = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
lowercase__: Tuple = seqlen + 2
lowercase__: str = self._config.hidden_size // self.num_attention_heads
lowercase__: Optional[int] = (
batch * self.num_attention_heads,
head_dim,
past_key_values_length,
)
lowercase__: Union[str, Any] = (
batch * self.num_attention_heads,
past_key_values_length,
head_dim,
)
lowercase__: str = [
(torch.zeros(_UpperCAmelCase ), torch.zeros(_UpperCAmelCase )) for _ in range(self.num_layers )
]
lowercase__: Tuple = common_inputs['''attention_mask''']
if self.use_past:
lowercase__: int = ordered_inputs['''attention_mask'''].dtype
lowercase__: List[str] = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(_UpperCAmelCase , _UpperCAmelCase , dtype=_UpperCAmelCase )] , dim=1 )
return ordered_inputs
@property
def _snake_case ( self ):
return 13
| 2 | 1 |
import numpy as np
def UpperCamelCase__( UpperCamelCase__ : np.array )->np.array:
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 193 |
a__: Dict = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
a__: str = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def UpperCamelCase__( UpperCamelCase__ : dict[int, list[int]] , UpperCamelCase__ : int , UpperCamelCase__ : list[bool] )->list[int]:
A__ = True
A__ = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
order.append(UpperCamelCase__ )
return order
def UpperCamelCase__( UpperCamelCase__ : dict[int, list[int]] , UpperCamelCase__ : int , UpperCamelCase__ : list[bool] )->list[int]:
A__ = True
A__ = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return component
def UpperCamelCase__( UpperCamelCase__ : dict[int, list[int]] )->list[list[int]]:
A__ = len(UpperCamelCase__ ) * [False]
A__ = {vert: [] for vert in range(len(UpperCamelCase__ ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(UpperCamelCase__ )
A__ = []
for i, was_visited in enumerate(UpperCamelCase__ ):
if not was_visited:
order += topology_sort(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
A__ = []
A__ = len(UpperCamelCase__ ) * [False]
for i in range(len(UpperCamelCase__ ) ):
A__ = order[len(UpperCamelCase__ ) - i - 1]
if not visited[vert]:
A__ = find_components(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
components_list.append(UpperCamelCase__ )
return components_list
| 193 | 1 |
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
_lowerCamelCase : str = "src/transformers"
_lowerCamelCase : Union[str, Any] = "docs/source/en"
_lowerCamelCase : int = "."
def a__ ( UpperCAmelCase : Dict , UpperCAmelCase : Any , UpperCAmelCase : int ) -> Optional[int]:
with open(UpperCAmelCase , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
UpperCAmelCase : Dict = f.readlines()
# Find the start prompt.
UpperCAmelCase : List[str] = 0
while not lines[start_index].startswith(UpperCAmelCase ):
start_index += 1
start_index += 1
UpperCAmelCase : Any = start_index
while not lines[end_index].startswith(UpperCAmelCase ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
_lowerCamelCase : Optional[Any] = "Model|Encoder|Decoder|ForConditionalGeneration"
# Regexes that match TF/Flax/PT model names.
_lowerCamelCase : List[str] = re.compile(R"TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
_lowerCamelCase : List[str] = re.compile(R"Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
_lowerCamelCase : Any = re.compile(R"(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# This is to make sure the transformers module imported is the one in the repo.
_lowerCamelCase : Any = direct_transformers_import(TRANSFORMERS_PATH)
def a__ ( UpperCAmelCase : Dict ) -> Any:
UpperCAmelCase : List[str] = re.finditer('''.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)''' , UpperCAmelCase )
return [m.group(0 ) for m in matches]
def a__ ( UpperCAmelCase : int , UpperCAmelCase : Optional[int] ) -> List[str]:
UpperCAmelCase : Any = 2 if text == '''✅''' or text == '''❌''' else len(UpperCAmelCase )
UpperCAmelCase : str = (width - text_length) // 2
UpperCAmelCase : int = width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def a__ ( ) -> List[str]:
UpperCAmelCase : Union[str, Any] = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
UpperCAmelCase : Optional[int] = {
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
UpperCAmelCase : Optional[int] = {name: config.replace('''Config''' , '''''' ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
UpperCAmelCase : Optional[int] = collections.defaultdict(UpperCAmelCase )
UpperCAmelCase : Optional[Any] = collections.defaultdict(UpperCAmelCase )
UpperCAmelCase : str = collections.defaultdict(UpperCAmelCase )
UpperCAmelCase : List[Any] = collections.defaultdict(UpperCAmelCase )
UpperCAmelCase : str = collections.defaultdict(UpperCAmelCase )
# Let's lookup through all transformers object (once).
for attr_name in dir(UpperCAmelCase ):
UpperCAmelCase : Any = None
if attr_name.endswith('''Tokenizer''' ):
UpperCAmelCase : Any = slow_tokenizers
UpperCAmelCase : str = attr_name[:-9]
elif attr_name.endswith('''TokenizerFast''' ):
UpperCAmelCase : List[Any] = fast_tokenizers
UpperCAmelCase : str = attr_name[:-13]
elif _re_tf_models.match(UpperCAmelCase ) is not None:
UpperCAmelCase : Optional[int] = tf_models
UpperCAmelCase : Dict = _re_tf_models.match(UpperCAmelCase ).groups()[0]
elif _re_flax_models.match(UpperCAmelCase ) is not None:
UpperCAmelCase : Optional[int] = flax_models
UpperCAmelCase : str = _re_flax_models.match(UpperCAmelCase ).groups()[0]
elif _re_pt_models.match(UpperCAmelCase ) is not None:
UpperCAmelCase : Tuple = pt_models
UpperCAmelCase : Union[str, Any] = _re_pt_models.match(UpperCAmelCase ).groups()[0]
if lookup_dict is not None:
while len(UpperCAmelCase ) > 0:
if attr_name in model_name_to_prefix.values():
UpperCAmelCase : Union[str, Any] = True
break
# Try again after removing the last word in the name
UpperCAmelCase : Any = ''''''.join(camel_case_split(UpperCAmelCase )[:-1] )
# Let's build that table!
UpperCAmelCase : List[Any] = list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
UpperCAmelCase : Optional[int] = ['''Model''', '''Tokenizer slow''', '''Tokenizer fast''', '''PyTorch support''', '''TensorFlow support''', '''Flax Support''']
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
UpperCAmelCase : Any = [len(UpperCAmelCase ) + 2 for c in columns]
UpperCAmelCase : List[str] = max([len(UpperCAmelCase ) for name in model_names] ) + 2
# Build the table per se
UpperCAmelCase : List[Any] = '''|''' + '''|'''.join([_center_text(UpperCAmelCase , UpperCAmelCase ) for c, w in zip(UpperCAmelCase , UpperCAmelCase )] ) + '''|\n'''
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([''':''' + '''-''' * (w - 2) + ''':''' for w in widths] ) + "|\n"
UpperCAmelCase : Tuple = {True: '''✅''', False: '''❌'''}
for name in model_names:
UpperCAmelCase : str = model_name_to_prefix[name]
UpperCAmelCase : List[Any] = [
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(UpperCAmelCase , UpperCAmelCase ) for l, w in zip(UpperCAmelCase , UpperCAmelCase )] ) + "|\n"
return table
def a__ ( UpperCAmelCase : int=False ) -> Dict:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : int = _find_text_in_file(
filename=os.path.join(UpperCAmelCase , '''index.md''' ) , start_prompt='''<!--This table is updated automatically from the auto modules''' , end_prompt='''<!-- End table-->''' , )
UpperCAmelCase : int = get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(UpperCAmelCase , '''index.md''' ) , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
'''The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.''' )
if __name__ == "__main__":
_lowerCamelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
_lowerCamelCase : str = parser.parse_args()
check_model_table(args.fix_and_overwrite)
| 99 |
from __future__ import annotations
import queue
class __UpperCAmelCase :
def __init__( self : str, __A : Union[str, Any] ):
UpperCAmelCase : Dict = data
UpperCAmelCase : Tuple = None
UpperCAmelCase : Any = None
def a__ ( ) -> TreeNode:
print('''\n********Press N to stop entering at any point of time********\n''' )
UpperCAmelCase : Any = input('''Enter the value of the root node: ''' ).strip().lower()
UpperCAmelCase : queue.Queue = queue.Queue()
UpperCAmelCase : Tuple = TreeNode(int(UpperCAmelCase ) )
q.put(UpperCAmelCase )
while not q.empty():
UpperCAmelCase : int = q.get()
UpperCAmelCase : Union[str, Any] = f'''Enter the left node of {node_found.data}: '''
UpperCAmelCase : List[Any] = input(UpperCAmelCase ).strip().lower() or '''n'''
if check == "n":
return tree_node
UpperCAmelCase : List[str] = TreeNode(int(UpperCAmelCase ) )
UpperCAmelCase : List[str] = left_node
q.put(UpperCAmelCase )
UpperCAmelCase : List[Any] = f'''Enter the right node of {node_found.data}: '''
UpperCAmelCase : List[Any] = input(UpperCAmelCase ).strip().lower() or '''n'''
if check == "n":
return tree_node
UpperCAmelCase : Dict = TreeNode(int(UpperCAmelCase ) )
UpperCAmelCase : Dict = right_node
q.put(UpperCAmelCase )
raise
def a__ ( UpperCAmelCase : TreeNode ) -> None:
if not isinstance(UpperCAmelCase , UpperCAmelCase ) or not node:
return
print(node.data , end=''',''' )
pre_order(node.left )
pre_order(node.right )
def a__ ( UpperCAmelCase : TreeNode ) -> None:
if not isinstance(UpperCAmelCase , UpperCAmelCase ) or not node:
return
in_order(node.left )
print(node.data , end=''',''' )
in_order(node.right )
def a__ ( UpperCAmelCase : TreeNode ) -> None:
if not isinstance(UpperCAmelCase , UpperCAmelCase ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end=''',''' )
def a__ ( UpperCAmelCase : TreeNode ) -> None:
if not isinstance(UpperCAmelCase , UpperCAmelCase ) or not node:
return
UpperCAmelCase : queue.Queue = queue.Queue()
q.put(UpperCAmelCase )
while not q.empty():
UpperCAmelCase : List[Any] = q.get()
print(node_dequeued.data , end=''',''' )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def a__ ( UpperCAmelCase : TreeNode ) -> None:
if not isinstance(UpperCAmelCase , UpperCAmelCase ) or not node:
return
UpperCAmelCase : queue.Queue = queue.Queue()
q.put(UpperCAmelCase )
while not q.empty():
UpperCAmelCase : int = []
while not q.empty():
UpperCAmelCase : List[str] = q.get()
print(node_dequeued.data , end=''',''' )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(UpperCAmelCase )
def a__ ( UpperCAmelCase : TreeNode ) -> None:
if not isinstance(UpperCAmelCase , UpperCAmelCase ) or not node:
return
UpperCAmelCase : list[TreeNode] = []
UpperCAmelCase : List[str] = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end=''',''' )
stack.append(UpperCAmelCase )
UpperCAmelCase : Dict = n.left
# end of while means current node doesn't have left child
UpperCAmelCase : Union[str, Any] = stack.pop()
# start to traverse its right child
UpperCAmelCase : List[str] = n.right
def a__ ( UpperCAmelCase : TreeNode ) -> None:
if not isinstance(UpperCAmelCase , UpperCAmelCase ) or not node:
return
UpperCAmelCase : list[TreeNode] = []
UpperCAmelCase : Any = node
while n or stack:
while n:
stack.append(UpperCAmelCase )
UpperCAmelCase : Dict = n.left
UpperCAmelCase : Optional[int] = stack.pop()
print(n.data , end=''',''' )
UpperCAmelCase : Any = n.right
def a__ ( UpperCAmelCase : TreeNode ) -> None:
if not isinstance(UpperCAmelCase , UpperCAmelCase ) or not node:
return
UpperCAmelCase , UpperCAmelCase : Dict = [], []
UpperCAmelCase : Any = node
stacka.append(UpperCAmelCase )
while stacka: # to find the reversed order of post order, store it in stack2
UpperCAmelCase : Union[str, Any] = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(UpperCAmelCase )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end=''',''' )
def a__ ( UpperCAmelCase : str = "" , UpperCAmelCase : int=50 , UpperCAmelCase : Union[str, Any]="*" ) -> str:
if not s:
return "\n" + width * char
UpperCAmelCase , UpperCAmelCase : int = divmod(width - len(UpperCAmelCase ) - 2 , 2 )
return f'''{left * char} {s} {(left + extra) * char}'''
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt("Binary Tree Traversals"))
_lowerCamelCase : TreeNode = build_tree()
print(prompt("Pre Order Traversal"))
pre_order(node)
print(prompt() + "\n")
print(prompt("In Order Traversal"))
in_order(node)
print(prompt() + "\n")
print(prompt("Post Order Traversal"))
post_order(node)
print(prompt() + "\n")
print(prompt("Level Order Traversal"))
level_order(node)
print(prompt() + "\n")
print(prompt("Actual Level Order Traversal"))
level_order_actual(node)
print("*" * 5_0 + "\n")
print(prompt("Pre Order Traversal - Iteration Version"))
pre_order_iter(node)
print(prompt() + "\n")
print(prompt("In Order Traversal - Iteration Version"))
in_order_iter(node)
print(prompt() + "\n")
print(prompt("Post Order Traversal - Iteration Version"))
post_order_iter(node)
print(prompt())
| 99 | 1 |
import argparse
import torch
from datasets import load_dataset
from donut import DonutModel
from transformers import (
DonutImageProcessor,
DonutProcessor,
DonutSwinConfig,
DonutSwinModel,
MBartConfig,
MBartForCausalLM,
VisionEncoderDecoderModel,
XLMRobertaTokenizerFast,
)
def UpperCamelCase ( _A ):
"""simple docstring"""
__magic_name__ : Optional[Any] = model.config
__magic_name__ : List[str] = DonutSwinConfig(
image_size=original_config.input_size, patch_size=4, depths=original_config.encoder_layer, num_heads=[4, 8, 16, 32], window_size=original_config.window_size, embed_dim=128, )
__magic_name__ : Tuple = MBartConfig(
is_decoder=_A, is_encoder_decoder=_A, add_cross_attention=_A, decoder_layers=original_config.decoder_layer, max_position_embeddings=original_config.max_position_embeddings, vocab_size=len(
model.decoder.tokenizer ), scale_embedding=_A, add_final_layer_norm=_A, )
return encoder_config, decoder_config
def UpperCamelCase ( _A ):
"""simple docstring"""
if "encoder.model" in name:
__magic_name__ : Dict = name.replace("""encoder.model""", """encoder""" )
if "decoder.model" in name:
__magic_name__ : List[str] = name.replace("""decoder.model""", """decoder""" )
if "patch_embed.proj" in name:
__magic_name__ : str = name.replace("""patch_embed.proj""", """embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
__magic_name__ : int = name.replace("""patch_embed.norm""", """embeddings.norm""" )
if name.startswith("""encoder""" ):
if "layers" in name:
__magic_name__ : Dict = """encoder.""" + name
if "attn.proj" in name:
__magic_name__ : str = name.replace("""attn.proj""", """attention.output.dense""" )
if "attn" in name and "mask" not in name:
__magic_name__ : str = name.replace("""attn""", """attention.self""" )
if "norm1" in name:
__magic_name__ : str = name.replace("""norm1""", """layernorm_before""" )
if "norm2" in name:
__magic_name__ : Optional[int] = name.replace("""norm2""", """layernorm_after""" )
if "mlp.fc1" in name:
__magic_name__ : Union[str, Any] = name.replace("""mlp.fc1""", """intermediate.dense""" )
if "mlp.fc2" in name:
__magic_name__ : int = name.replace("""mlp.fc2""", """output.dense""" )
if name == "encoder.norm.weight":
__magic_name__ : List[str] = """encoder.layernorm.weight"""
if name == "encoder.norm.bias":
__magic_name__ : Union[str, Any] = """encoder.layernorm.bias"""
return name
def UpperCamelCase ( _A, _A ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
__magic_name__ : Any = orig_state_dict.pop(_A )
if "qkv" in key:
__magic_name__ : Any = key.split(""".""" )
__magic_name__ : str = int(key_split[3] )
__magic_name__ : int = int(key_split[5] )
__magic_name__ : Tuple = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
__magic_name__ : Tuple = val[:dim, :]
__magic_name__ : Dict = val[dim : dim * 2, :]
__magic_name__ : Optional[int] = val[-dim:, :]
else:
__magic_name__ : int = val[:dim]
__magic_name__ : Optional[Any] = val[dim : dim * 2]
__magic_name__ : Dict = val[-dim:]
elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
# HuggingFace implementation doesn't use attn_mask buffer
# and model doesn't use final LayerNorms for the encoder
pass
else:
__magic_name__ : Optional[Any] = val
return orig_state_dict
def UpperCamelCase ( _A, _A=None, _A=False ):
"""simple docstring"""
__magic_name__ : List[str] = DonutModel.from_pretrained(_A ).eval()
# load HuggingFace model
__magic_name__ ,__magic_name__ : List[str] = get_configs(_A )
__magic_name__ : Any = DonutSwinModel(_A )
__magic_name__ : Any = MBartForCausalLM(_A )
__magic_name__ : List[Any] = VisionEncoderDecoderModel(encoder=_A, decoder=_A )
model.eval()
__magic_name__ : Dict = original_model.state_dict()
__magic_name__ : str = convert_state_dict(_A, _A )
model.load_state_dict(_A )
# verify results on scanned document
__magic_name__ : Union[str, Any] = load_dataset("""hf-internal-testing/example-documents""" )
__magic_name__ : List[Any] = dataset["""test"""][0]["""image"""].convert("""RGB""" )
__magic_name__ : Optional[Any] = XLMRobertaTokenizerFast.from_pretrained(_A, from_slow=_A )
__magic_name__ : Dict = DonutImageProcessor(
do_align_long_axis=original_model.config.align_long_axis, size=original_model.config.input_size[::-1] )
__magic_name__ : Union[str, Any] = DonutProcessor(_A, _A )
__magic_name__ : Union[str, Any] = processor(_A, return_tensors="""pt""" ).pixel_values
if model_name == "naver-clova-ix/donut-base-finetuned-docvqa":
__magic_name__ : Optional[int] = """<s_docvqa><s_question>{user_input}</s_question><s_answer>"""
__magic_name__ : Tuple = """When is the coffee break?"""
__magic_name__ : Tuple = task_prompt.replace("""{user_input}""", _A )
elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip":
__magic_name__ : Tuple = """<s_rvlcdip>"""
elif model_name in [
"naver-clova-ix/donut-base-finetuned-cord-v1",
"naver-clova-ix/donut-base-finetuned-cord-v1-2560",
]:
__magic_name__ : List[Any] = """<s_cord>"""
elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2":
__magic_name__ : str = """s_cord-v2>"""
elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket":
__magic_name__ : List[Any] = """<s_zhtrainticket>"""
elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]:
# use a random prompt
__magic_name__ : Any = """hello world"""
else:
raise ValueError("""Model name not supported""" )
__magic_name__ : List[str] = original_model.decoder.tokenizer(_A, add_special_tokens=_A, return_tensors="""pt""" )[
"""input_ids"""
]
__magic_name__ : int = original_model.encoder.model.patch_embed(_A )
__magic_name__ ,__magic_name__ : List[str] = model.encoder.embeddings(_A )
assert torch.allclose(_A, _A, atol=1e-3 )
# verify encoder hidden states
__magic_name__ : Optional[Any] = original_model.encoder(_A )
__magic_name__ : List[str] = model.encoder(_A ).last_hidden_state
assert torch.allclose(_A, _A, atol=1e-2 )
# verify decoder hidden states
__magic_name__ : str = original_model(_A, _A, _A ).logits
__magic_name__ : Union[str, Any] = model(_A, decoder_input_ids=_A ).logits
assert torch.allclose(_A, _A, atol=1e-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f'Saving model and processor to {pytorch_dump_folder_path}' )
model.save_pretrained(_A )
processor.save_pretrained(_A )
if push_to_hub:
model.push_to_hub("""nielsr/""" + model_name.split("""/""" )[-1], commit_message="""Update model""" )
processor.push_to_hub("""nielsr/""" + model_name.split("""/""" )[-1], commit_message="""Update model""" )
if __name__ == "__main__":
__magic_name__: Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="naver-clova-ix/donut-base-finetuned-docvqa",
required=False,
type=str,
help="Name of the original model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
required=False,
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether or not to push the converted model and processor to the 🤗 hub.",
)
__magic_name__: Optional[Any] = parser.parse_args()
convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 342 |
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class snake_case__ :
def __init__( self , lowerCAmelCase__ = None ) -> None:
if components is None:
__magic_name__ : Any = []
__magic_name__ : List[str] = list(lowerCAmelCase__ )
def __len__( self ) -> int:
return len(self.__components )
def __str__( self ) -> str:
return "(" + ",".join(map(lowerCAmelCase__ , self.__components ) ) + ")"
def __add__( self , lowerCAmelCase__ ) -> Vector:
__magic_name__ : Dict = len(self )
if size == len(lowerCAmelCase__ ):
__magic_name__ : str = [self.__components[i] + other.component(lowerCAmelCase__ ) for i in range(lowerCAmelCase__ )]
return Vector(lowerCAmelCase__ )
else:
raise Exception("""must have the same size""" )
def __sub__( self , lowerCAmelCase__ ) -> Vector:
__magic_name__ : int = len(self )
if size == len(lowerCAmelCase__ ):
__magic_name__ : str = [self.__components[i] - other.component(lowerCAmelCase__ ) for i in range(lowerCAmelCase__ )]
return Vector(lowerCAmelCase__ )
else: # error case
raise Exception("""must have the same size""" )
@overload
def __mul__( self , lowerCAmelCase__ ) -> Vector:
...
@overload
def __mul__( self , lowerCAmelCase__ ) -> float:
...
def __mul__( self , lowerCAmelCase__ ) -> float | Vector:
if isinstance(lowerCAmelCase__ , (float, int) ):
__magic_name__ : Optional[Any] = [c * other for c in self.__components]
return Vector(lowerCAmelCase__ )
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and len(self ) == len(lowerCAmelCase__ ):
__magic_name__ : Optional[Any] = len(self )
__magic_name__ : List[Any] = [self.__components[i] * other.component(lowerCAmelCase__ ) for i in range(lowerCAmelCase__ )]
return sum(lowerCAmelCase__ )
else: # error case
raise Exception("""invalid operand!""" )
def __magic_name__ ( self ) -> Vector:
return Vector(self.__components )
def __magic_name__ ( self , lowerCAmelCase__ ) -> float:
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception("""index out of range""" )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> None:
assert -len(self.__components ) <= pos < len(self.__components )
__magic_name__ : Optional[int] = value
def __magic_name__ ( self ) -> float:
if len(self.__components ) == 0:
raise Exception("""Vector is empty""" )
__magic_name__ : Dict = [c**2 for c in self.__components]
return math.sqrt(sum(lowerCAmelCase__ ) )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = False ) -> float:
__magic_name__ : Optional[Any] = self * other
__magic_name__ : List[str] = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def UpperCamelCase ( _A ):
"""simple docstring"""
assert isinstance(_A, _A )
return Vector([0] * dimension )
def UpperCamelCase ( _A, _A ):
"""simple docstring"""
assert isinstance(_A, _A ) and (isinstance(_A, _A ))
__magic_name__ : Union[str, Any] = [0] * dimension
__magic_name__ : Optional[int] = 1
return Vector(_A )
def UpperCamelCase ( _A, _A, _A ):
"""simple docstring"""
assert (
isinstance(_A, _A )
and isinstance(_A, _A )
and (isinstance(_A, (int, float) ))
)
return x * scalar + y
def UpperCamelCase ( _A, _A, _A ):
"""simple docstring"""
random.seed(_A )
__magic_name__ : Union[str, Any] = [random.randint(_A, _A ) for _ in range(_A )]
return Vector(_A )
class snake_case__ :
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> None:
__magic_name__ : Dict = matrix
__magic_name__ : Tuple = w
__magic_name__ : Union[str, Any] = h
def __str__( self ) -> str:
__magic_name__ : Dict = """"""
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self , lowerCAmelCase__ ) -> Matrix:
if self.__width == other.width() and self.__height == other.height():
__magic_name__ : Tuple = []
for i in range(self.__height ):
__magic_name__ : Tuple = [
self.__matrix[i][j] + other.component(lowerCAmelCase__ , lowerCAmelCase__ )
for j in range(self.__width )
]
matrix.append(lowerCAmelCase__ )
return Matrix(lowerCAmelCase__ , self.__width , self.__height )
else:
raise Exception("""matrix must have the same dimension!""" )
def __sub__( self , lowerCAmelCase__ ) -> Matrix:
if self.__width == other.width() and self.__height == other.height():
__magic_name__ : Optional[Any] = []
for i in range(self.__height ):
__magic_name__ : int = [
self.__matrix[i][j] - other.component(lowerCAmelCase__ , lowerCAmelCase__ )
for j in range(self.__width )
]
matrix.append(lowerCAmelCase__ )
return Matrix(lowerCAmelCase__ , self.__width , self.__height )
else:
raise Exception("""matrices must have the same dimension!""" )
@overload
def __mul__( self , lowerCAmelCase__ ) -> Matrix:
...
@overload
def __mul__( self , lowerCAmelCase__ ) -> Vector:
...
def __mul__( self , lowerCAmelCase__ ) -> Vector | Matrix:
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): # matrix-vector
if len(lowerCAmelCase__ ) == self.__width:
__magic_name__ : Tuple = zero_vector(self.__height )
for i in range(self.__height ):
__magic_name__ : Optional[int] = [
self.__matrix[i][j] * other.component(lowerCAmelCase__ )
for j in range(self.__width )
]
ans.change_component(lowerCAmelCase__ , sum(lowerCAmelCase__ ) )
return ans
else:
raise Exception(
"""vector must have the same size as the """
"""number of columns of the matrix!""" )
elif isinstance(lowerCAmelCase__ , (int, float) ): # matrix-scalar
__magic_name__ : Any = [
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(lowerCAmelCase__ , self.__width , self.__height )
return None
def __magic_name__ ( self ) -> int:
return self.__height
def __magic_name__ ( self ) -> int:
return self.__width
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> float:
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception("""change_component: indices out of bounds""" )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> None:
if 0 <= x < self.__height and 0 <= y < self.__width:
__magic_name__ : List[Any] = value
else:
raise Exception("""change_component: indices out of bounds""" )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> float:
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
__magic_name__ : Optional[int] = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(lowerCAmelCase__ ) ):
__magic_name__ : List[str] = minor[i][:y] + minor[i][y + 1 :]
return Matrix(lowerCAmelCase__ , self.__width - 1 , self.__height - 1 ).determinant()
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> float:
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(lowerCAmelCase__ , lowerCAmelCase__ )
else:
raise Exception("""Indices out of bounds""" )
def __magic_name__ ( self ) -> float:
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
if self.__height < 1:
raise Exception("""Matrix has no element""" )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
__magic_name__ : str = [
self.__matrix[0][y] * self.cofactor(0 , lowerCAmelCase__ ) for y in range(self.__width )
]
return sum(lowerCAmelCase__ )
def UpperCamelCase ( _A ):
"""simple docstring"""
__magic_name__ : list[list[float]] = [[0] * n for _ in range(_A )]
return Matrix(_A, _A, _A )
def UpperCamelCase ( _A, _A, _A, _A ):
"""simple docstring"""
random.seed(_A )
__magic_name__ : list[list[float]] = [
[random.randint(_A, _A ) for _ in range(_A )] for _ in range(_A )
]
return Matrix(_A, _A, _A )
| 342 | 1 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
__UpperCAmelCase = logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE ( A__ ):
def __init__( self , *__A , **__A ) -> None:
warnings.warn(
"""The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use DonutImageProcessor instead.""" , __A , )
super().__init__(*__A , **__A )
| 1 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
__UpperCAmelCase = logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE ( A__ ):
def __init__( self , *__A , **__A ) -> None:
warnings.warn(
"""The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use CLIPImageProcessor instead.""" , __A , )
super().__init__(*__A , **__A )
| 1 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase__ = {
'configuration_distilbert': [
'DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'DistilBertConfig',
'DistilBertOnnxConfig',
],
'tokenization_distilbert': ['DistilBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = ['DistilBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'DistilBertForMaskedLM',
'DistilBertForMultipleChoice',
'DistilBertForQuestionAnswering',
'DistilBertForSequenceClassification',
'DistilBertForTokenClassification',
'DistilBertModel',
'DistilBertPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFDistilBertForMaskedLM',
'TFDistilBertForMultipleChoice',
'TFDistilBertForQuestionAnswering',
'TFDistilBertForSequenceClassification',
'TFDistilBertForTokenClassification',
'TFDistilBertMainLayer',
'TFDistilBertModel',
'TFDistilBertPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'FlaxDistilBertForMaskedLM',
'FlaxDistilBertForMultipleChoice',
'FlaxDistilBertForQuestionAnswering',
'FlaxDistilBertForSequenceClassification',
'FlaxDistilBertForTokenClassification',
'FlaxDistilBertModel',
'FlaxDistilBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 65 |
'''simple docstring'''
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE):
UpperCAmelCase__ : torch.FloatTensor
UpperCAmelCase__ : torch.FloatTensor
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE):
UpperCAmelCase__ : int = 1
@register_to_config
def __init__( self :Optional[Any] , _A :int = 2_000 , _A :float = 0.15 , _A :float = 0.01 , _A :float = 1_348.0 , _A :float = 1E-5 , _A :int = 1 , ) -> Optional[Any]:
'''simple docstring'''
__A = sigma_max
# setable values
__A = None
self.set_sigmas(_A , _A , _A , _A )
def lowercase_ ( self :Optional[int] , _A :torch.FloatTensor , _A :Optional[int] = None ) -> torch.FloatTensor:
'''simple docstring'''
return sample
def lowercase_ ( self :Tuple , _A :int , _A :float = None , _A :Union[str, torch.device] = None ) -> Optional[int]:
'''simple docstring'''
__A = sampling_eps if sampling_eps is not None else self.config.sampling_eps
__A = torch.linspace(1 , _A , _A , device=_A )
def lowercase_ ( self :Any , _A :int , _A :float = None , _A :float = None , _A :float = None ) -> Optional[int]:
'''simple docstring'''
__A = sigma_min if sigma_min is not None else self.config.sigma_min
__A = sigma_max if sigma_max is not None else self.config.sigma_max
__A = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(_A , _A )
__A = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
__A = torch.exp(torch.linspace(math.log(_A ) , math.log(_A ) , _A ) )
__A = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def lowercase_ ( self :List[Any] , _A :Any , _A :Optional[Any] ) -> str:
'''simple docstring'''
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , )
def lowercase_ ( self :List[str] , _A :torch.FloatTensor , _A :int , _A :torch.FloatTensor , _A :Optional[torch.Generator] = None , _A :bool = True , ) -> Union[SdeVeOutput, Tuple]:
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' )
__A = timestep * torch.ones(
sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
__A = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
__A = timesteps.to(self.discrete_sigmas.device )
__A = self.discrete_sigmas[timesteps].to(sample.device )
__A = self.get_adjacent_sigma(_A , _A ).to(sample.device )
__A = torch.zeros_like(_A )
__A = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
__A = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
__A = diffusion.unsqueeze(-1 )
__A = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
__A = randn_tensor(
sample.shape , layout=sample.layout , generator=_A , device=sample.device , dtype=sample.dtype )
__A = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
__A = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=_A , prev_sample_mean=_A )
def lowercase_ ( self :str , _A :torch.FloatTensor , _A :torch.FloatTensor , _A :Optional[torch.Generator] = None , _A :bool = True , ) -> Union[SchedulerOutput, Tuple]:
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
__A = randn_tensor(sample.shape , layout=sample.layout , generator=_A ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
__A = torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean()
__A = torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean()
__A = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
__A = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
__A = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
__A = step_size.unsqueeze(-1 )
__A = sample + step_size * model_output
__A = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=_A )
def lowercase_ ( self :Any , _A :torch.FloatTensor , _A :torch.FloatTensor , _A :torch.FloatTensor , ) -> torch.FloatTensor:
'''simple docstring'''
__A = timesteps.to(original_samples.device )
__A = self.discrete_sigmas.to(original_samples.device )[timesteps]
__A = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(_A ) * sigmas[:, None, None, None]
)
__A = noise + original_samples
return noisy_samples
def __len__( self :Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
return self.config.num_train_timesteps
| 161 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowercase : int = {'configuration_vit_msn': ['VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMSNConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Dict = [
'VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMSNModel',
'ViTMSNForImageClassification',
'ViTMSNPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
__lowercase : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 355 |
'''simple docstring'''
import json
import os
import re
import sys
import urllib.request
import requests
from bsa import BeautifulSoup
__lowercase : str = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582'
}
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str = "dhaka" , _SCREAMING_SNAKE_CASE : int = 5 ):
__a : Optional[Any] = min(_SCREAMING_SNAKE_CASE , 50 ) # Prevent abuse!
__a : Optional[Any] = {
'q': query,
'tbm': 'isch',
'hl': 'en',
'ijn': '0',
}
__a : Tuple = requests.get('https://www.google.com/search' , params=_SCREAMING_SNAKE_CASE , headers=_SCREAMING_SNAKE_CASE )
__a : Dict = BeautifulSoup(html.text , 'html.parser' )
__a : List[str] = ''.join(
re.findall(r'AF_initDataCallback\(([^<]+)\);' , str(soup.select('script' ) ) ) )
__a : Optional[Any] = json.dumps(_SCREAMING_SNAKE_CASE )
__a : List[str] = json.loads(_SCREAMING_SNAKE_CASE )
__a : List[Any] = re.findall(
r'\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\",' , _SCREAMING_SNAKE_CASE , )
if not matched_google_image_data:
return 0
__a : Tuple = re.sub(
r'\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]' , '' , str(_SCREAMING_SNAKE_CASE ) , )
__a : Optional[Any] = re.findall(
r'(?:\'|,),\[\"(https:|http.*?)\",\d+,\d+\]' , _SCREAMING_SNAKE_CASE , )
for index, fixed_full_res_image in enumerate(_SCREAMING_SNAKE_CASE ):
if index >= max_images:
return index
__a : List[str] = bytes(_SCREAMING_SNAKE_CASE , 'ascii' ).decode(
'unicode-escape' )
__a : Tuple = bytes(_SCREAMING_SNAKE_CASE , 'ascii' ).decode(
'unicode-escape' )
__a : Dict = urllib.request.build_opener()
__a : Union[str, Any] = [
(
'User-Agent',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582',
)
]
urllib.request.install_opener(_SCREAMING_SNAKE_CASE )
__a : List[Any] = F"""query_{query.replace(" " , "_" )}"""
if not os.path.exists(_SCREAMING_SNAKE_CASE ):
os.makedirs(_SCREAMING_SNAKE_CASE )
urllib.request.urlretrieve( # noqa: S310
_SCREAMING_SNAKE_CASE , F"""{path_name}/original_size_img_{index}.jpg""" )
return index
if __name__ == "__main__":
try:
__lowercase : Optional[int] = download_images_from_google_query(sys.argv[1])
print(f'''{image_count} images were downloaded to disk.''')
except IndexError:
print('Please provide a search term.')
raise
| 294 | 0 |
'''simple docstring'''
from typing import Any
def lowercase ( __magic_name__ ):
'''simple docstring'''
if not input_list:
return []
UpperCAmelCase : Tuple = [input_list.count(__magic_name__ ) for value in input_list]
UpperCAmelCase : Union[str, Any] = max(__magic_name__ ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(__magic_name__ ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 311 |
'''simple docstring'''
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
a : Tuple = ["gpt2"]
a : Dict = "gpt2"
if is_tf_available():
class UpperCamelCase__ ( tf.Module ):
"""simple docstring"""
def __init__( self , snake_case ):
'''simple docstring'''
super().__init__()
UpperCAmelCase : Tuple = tokenizer
UpperCAmelCase : List[str] = AutoConfig.from_pretrained(snake_case )
UpperCAmelCase : int = TFGPTaLMHeadModel.from_config(snake_case )
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name="text" ),) )
def A_ ( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = self.tokenizer(snake_case )
UpperCAmelCase : Optional[int] = tokenized["input_ids"].to_tensor()
UpperCAmelCase : Optional[int] = tf.cast(input_ids_dense > 0 , tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
UpperCAmelCase : List[Any] = self.model(input_ids=snake_case , attention_mask=snake_case )["logits"]
return outputs
@require_tf
@require_keras_nlp
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self ):
'''simple docstring'''
super().setUp()
UpperCAmelCase : Any = [GPTaTokenizer.from_pretrained(snake_case ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
UpperCAmelCase : Optional[Any] = [TFGPTaTokenizer.from_pretrained(snake_case ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
UpperCAmelCase : Tuple = [
"This is a straightforward English test sentence.",
"This one has some weird characters\rto\nsee\r\nif those\u00E9break things.",
"Now we're going to add some Chinese: 一 二 三 一二三",
"And some much more rare Chinese: 齉 堃 齉堃",
"Je vais aussi écrire en français pour tester les accents",
"Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ",
]
UpperCAmelCase : Optional[Any] = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def A_ ( self ):
'''simple docstring'''
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in self.test_sentences:
UpperCAmelCase : List[Any] = tokenizer([test_inputs] , return_tensors="tf" )
UpperCAmelCase : Any = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
UpperCAmelCase : Dict = python_outputs[key].numpy()
UpperCAmelCase : List[str] = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(snake_case , tf.intaa ) == tf_outputs_values ) )
@slow
def A_ ( self ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
UpperCAmelCase : Optional[Any] = tf.function(snake_case )
for test_inputs in self.test_sentences:
UpperCAmelCase : List[str] = tf.constant(snake_case )
UpperCAmelCase : Dict = compiled_tokenizer(snake_case )
UpperCAmelCase : Union[str, Any] = tf_tokenizer(snake_case )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def A_ ( self ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
UpperCAmelCase : int = ModelToSave(tokenizer=snake_case )
UpperCAmelCase : Tuple = tf.convert_to_tensor([self.test_sentences[0]] )
UpperCAmelCase : str = model.serving(snake_case ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
UpperCAmelCase : Optional[int] = Path(snake_case ) / "saved.model"
tf.saved_model.save(snake_case , snake_case , signatures={"serving_default": model.serving} )
UpperCAmelCase : int = tf.saved_model.load(snake_case )
UpperCAmelCase : str = loaded_model.signatures["serving_default"](snake_case )["output_0"]
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def A_ ( self ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
UpperCAmelCase : Any = tf.convert_to_tensor([self.test_sentences[0]] )
UpperCAmelCase : Tuple = tf_tokenizer(snake_case ) # Build model with some sample inputs
UpperCAmelCase : Union[str, Any] = tf_tokenizer.get_config()
UpperCAmelCase : str = TFGPTaTokenizer.from_config(snake_case )
UpperCAmelCase : Tuple = model_from_config(snake_case )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def A_ ( self ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
UpperCAmelCase : List[str] = 1_2_3_1_2_3
for max_length in [3, 5, 1_0_2_4]:
UpperCAmelCase : Any = tf.convert_to_tensor([self.test_sentences[0]] )
UpperCAmelCase : Tuple = tf_tokenizer(snake_case , max_length=snake_case )
UpperCAmelCase : Union[str, Any] = out["input_ids"].numpy().shape[1]
assert out_length == max_length
| 311 | 1 |
"""simple docstring"""
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
a_ : Any = 'pt'
elif is_tf_available():
a_ : Union[str, Any] = 'tf'
else:
a_ : Tuple = 'jax'
class _snake_case ( A__ , unittest.TestCase ):
_lowercase : Any = ByTaTokenizer
_lowercase : Union[str, Any] = False
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
super().setUp()
SCREAMING_SNAKE_CASE = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname)
@cached_property
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
return ByTaTokenizer.from_pretrained('google/byt5-small')
def SCREAMING_SNAKE_CASE__ ( self , **a) -> ByTaTokenizer:
return self.tokenizer_class.from_pretrained(self.tmpdirname , **a)
def SCREAMING_SNAKE_CASE__ ( self , a , a=False , a=20 , a=5) -> Tuple[str, list]:
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for ByT5 because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
SCREAMING_SNAKE_CASE = []
for i in range(len(a)):
try:
SCREAMING_SNAKE_CASE = tokenizer.decode([i] , clean_up_tokenization_spaces=a)
except UnicodeDecodeError:
pass
toks.append((i, tok))
SCREAMING_SNAKE_CASE = list(filter(lambda a: re.match(R'^[ a-zA-Z]+$' , t[1]) , a))
SCREAMING_SNAKE_CASE = list(filter(lambda a: [t[0]] == tokenizer.encode(t[1] , add_special_tokens=a) , a))
if max_length is not None and len(a) > max_length:
SCREAMING_SNAKE_CASE = toks[:max_length]
if min_length is not None and len(a) < min_length and len(a) > 0:
while len(a) < min_length:
SCREAMING_SNAKE_CASE = toks + toks
# toks_str = [t[1] for t in toks]
SCREAMING_SNAKE_CASE = [t[0] for t in toks]
# Ensure consistency
SCREAMING_SNAKE_CASE = tokenizer.decode(a , clean_up_tokenization_spaces=a)
if " " not in output_txt and len(a) > 1:
SCREAMING_SNAKE_CASE = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=a)
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=a)
)
if with_prefix_space:
SCREAMING_SNAKE_CASE = ' ' + output_txt
SCREAMING_SNAKE_CASE = tokenizer.encode(a , add_special_tokens=a)
return output_txt, output_ids
def SCREAMING_SNAKE_CASE__ ( self) -> Dict:
SCREAMING_SNAKE_CASE = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE = tokenizer(['hi</s>', 'I went to the gym</s>', '</s>'])
SCREAMING_SNAKE_CASE = tokenizer(['hi', 'I went to the gym', ''])
self.assertListEqual(batch_with_eos_added['input_ids'] , batch_without_eos_added['input_ids'])
def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE = 'Unicode €.'
SCREAMING_SNAKE_CASE = tokenizer(a)
SCREAMING_SNAKE_CASE = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded['input_ids'] , a)
# decoding
SCREAMING_SNAKE_CASE = tokenizer.decode(a)
self.assertEqual(a , 'Unicode €.</s>')
SCREAMING_SNAKE_CASE = tokenizer('e è é ê ë')
SCREAMING_SNAKE_CASE = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded['input_ids'] , a)
# decoding
SCREAMING_SNAKE_CASE = tokenizer.decode(a)
self.assertEqual(a , 'e è é ê ë</s>')
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë')) , 'e è é ê ë</s>')
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
SCREAMING_SNAKE_CASE = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
SCREAMING_SNAKE_CASE = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
# fmt: on
SCREAMING_SNAKE_CASE = tokenizer(a , padding=a , return_tensors=a)
self.assertIsInstance(a , a)
if FRAMEWORK != "jax":
SCREAMING_SNAKE_CASE = list(batch.input_ids.numpy()[0])
else:
SCREAMING_SNAKE_CASE = list(batch.input_ids.tolist()[0])
self.assertListEqual(a , a)
self.assertEqual((2, 37) , batch.input_ids.shape)
self.assertEqual((2, 37) , batch.attention_mask.shape)
def SCREAMING_SNAKE_CASE__ ( self) -> int:
SCREAMING_SNAKE_CASE = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
SCREAMING_SNAKE_CASE = tokenizer(a , padding=a , return_tensors=a)
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids' , a)
self.assertIn('attention_mask' , a)
self.assertNotIn('decoder_input_ids' , a)
self.assertNotIn('decoder_attention_mask' , a)
def SCREAMING_SNAKE_CASE__ ( self) -> int:
SCREAMING_SNAKE_CASE = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE = [
'Summary of the text.',
'Another summary.',
]
SCREAMING_SNAKE_CASE = tokenizer(
text_target=a , max_length=32 , padding='max_length' , truncation=a , return_tensors=a)
self.assertEqual(32 , targets['input_ids'].shape[1])
def SCREAMING_SNAKE_CASE__ ( self) -> str:
SCREAMING_SNAKE_CASE = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE = ['A long paragraph for summarization. </s>']
SCREAMING_SNAKE_CASE = ['Summary of the text. </s>']
# fmt: off
SCREAMING_SNAKE_CASE = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
SCREAMING_SNAKE_CASE = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
# fmt: on
SCREAMING_SNAKE_CASE = tokenizer(a , text_target=a)
self.assertEqual(a , batch['input_ids'][0])
self.assertEqual(a , batch['labels'][0])
def SCREAMING_SNAKE_CASE__ ( self) -> str:
# safety check on max_len default value so we are sure the test works
SCREAMING_SNAKE_CASE = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}'''):
self.assertNotEqual(tokenizer.model_max_length , 42)
# Now let's start the test
SCREAMING_SNAKE_CASE = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}'''):
# Isolate this from the other tests because we save additional tokens/etc
SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE = ' He is very happy, UNwant\u00E9d,running'
SCREAMING_SNAKE_CASE = tokenizer.encode(a , add_special_tokens=a)
tokenizer.save_pretrained(a)
SCREAMING_SNAKE_CASE = tokenizer.__class__.from_pretrained(a)
SCREAMING_SNAKE_CASE = after_tokenizer.encode(a , add_special_tokens=a)
self.assertListEqual(a , a)
shutil.rmtree(a)
SCREAMING_SNAKE_CASE = self.get_tokenizers(model_max_length=42)
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}'''):
# Isolate this from the other tests because we save additional tokens/etc
SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'])
SCREAMING_SNAKE_CASE = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token')
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens})
SCREAMING_SNAKE_CASE = tokenizer.encode(a , add_special_tokens=a)
tokenizer.save_pretrained(a)
SCREAMING_SNAKE_CASE = tokenizer.__class__.from_pretrained(a)
SCREAMING_SNAKE_CASE = after_tokenizer.encode(a , add_special_tokens=a)
self.assertListEqual(a , a)
self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens)
self.assertEqual(after_tokenizer.model_max_length , 42)
SCREAMING_SNAKE_CASE = tokenizer.__class__.from_pretrained(a , model_max_length=43)
self.assertEqual(tokenizer.model_max_length , 43)
shutil.rmtree(a)
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]:
SCREAMING_SNAKE_CASE = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()))
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()))
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(a)
with open(os.path.join(a , 'special_tokens_map.json') , encoding='utf-8') as json_file:
SCREAMING_SNAKE_CASE = json.load(a)
with open(os.path.join(a , 'tokenizer_config.json') , encoding='utf-8') as json_file:
SCREAMING_SNAKE_CASE = json.load(a)
SCREAMING_SNAKE_CASE = [f'''<extra_id_{i}>''' for i in range(125)]
SCREAMING_SNAKE_CASE = added_tokens_extra_ids + [
'an_additional_special_token'
]
SCREAMING_SNAKE_CASE = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(a , 'special_tokens_map.json') , 'w' , encoding='utf-8') as outfile:
json.dump(a , a)
with open(os.path.join(a , 'tokenizer_config.json') , 'w' , encoding='utf-8') as outfile:
json.dump(a , a)
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
SCREAMING_SNAKE_CASE = tokenizer_class.from_pretrained(
a , )
self.assertIn(
'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens)
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'])) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
SCREAMING_SNAKE_CASE = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=a)]
SCREAMING_SNAKE_CASE = tokenizer_class.from_pretrained(
a , additional_special_tokens=a , )
self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens)
self.assertEqual(
['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'])) , )
def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()))
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()))
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(a)
SCREAMING_SNAKE_CASE = tokenizer_class.from_pretrained(a)
self.assertTrue(tokenizer.decode([255]) == '')
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
pass
def SCREAMING_SNAKE_CASE__ ( self) -> List[str]:
pass
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
pass
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
pass
def SCREAMING_SNAKE_CASE__ ( self) -> str:
# The default common tokenizer tests uses invalid tokens for ByT5 that can only accept one-character strings
# and special added tokens as tokens
SCREAMING_SNAKE_CASE = self.get_tokenizers(fast=a , do_lower_case=a)
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}'''):
SCREAMING_SNAKE_CASE = ['t', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 'x', 't', '</s>']
SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_string(a)
self.assertIsInstance(a , a)
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
SCREAMING_SNAKE_CASE = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}'''):
SCREAMING_SNAKE_CASE = [
'bos_token',
'eos_token',
'unk_token',
'sep_token',
'pad_token',
'cls_token',
'mask_token',
]
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = tokenizer.convert_ids_to_tokens(
a , skip_special_tokens=a)
for attr in attributes_list:
setattr(a , attr + '_id' , a)
self.assertEqual(getattr(a , a) , a)
self.assertEqual(getattr(a , attr + '_id') , a)
setattr(a , attr + '_id' , a)
self.assertEqual(getattr(a , a) , a)
self.assertEqual(getattr(a , attr + '_id') , a)
setattr(a , 'additional_special_tokens_ids' , [])
self.assertListEqual(getattr(a , 'additional_special_tokens') , [])
self.assertListEqual(getattr(a , 'additional_special_tokens_ids') , [])
setattr(a , 'additional_special_tokens_ids' , [token_id_to_test_setters])
self.assertListEqual(getattr(a , 'additional_special_tokens') , [token_to_test_setters])
self.assertListEqual(getattr(a , 'additional_special_tokens_ids') , [token_id_to_test_setters])
| 357 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
a_ : Any = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Dict = ['MLukeTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
a_ : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 327 | 0 |
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class A_ :
'''simple docstring'''
def __init__( self , snake_case , snake_case=13 , snake_case=64 , snake_case=2 , snake_case=3 , snake_case=True , snake_case=True , snake_case=32 , snake_case=5 , snake_case=4 , snake_case=37 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=10 , snake_case=0.02 , snake_case=[1, 16, 4, 4] , snake_case=None , ):
lowercase = parent
lowercase = batch_size
lowercase = image_size
lowercase = patch_size
lowercase = num_channels
lowercase = is_training
lowercase = use_labels
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = type_sequence_label_size
lowercase = initializer_range
lowercase = scope
lowercase = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
lowercase = (self.image_size // 32) ** 2
lowercase = num_patches + 1
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase = None
if self.use_labels:
lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = {
'global_padding': 'same',
'layer_type': 'bottleneck',
'depths': [3, 4, 9],
'out_features': ['stage1', 'stage2', 'stage3'],
'embedding_dynamic_padding': True,
'hidden_sizes': [4, 8, 16, 32],
'num_groups': 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=snake_case , )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case ):
lowercase = ViTHybridModel(config=snake_case )
model.to(snake_case )
model.eval()
lowercase = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case ):
lowercase = self.type_sequence_label_size
lowercase = ViTHybridForImageClassification(snake_case )
model.to(snake_case )
model.eval()
lowercase = model(snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.prepare_config_and_inputs()
lowercase , lowercase , lowercase = config_and_inputs
lowercase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class A_ ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
_UpperCamelCase : str = (
{"""feature-extraction""": ViTHybridModel, """image-classification""": ViTHybridForImageClassification}
if is_torch_available()
else {}
)
_UpperCamelCase : Dict = False
_UpperCamelCase : Optional[Any] = False
_UpperCamelCase : Union[str, Any] = False
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = ViTHybridModelTester(self )
lowercase = ConfigTester(self , config_class=snake_case , has_text_modality=snake_case , hidden_size=37 )
def SCREAMING_SNAKE_CASE__ ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds' )
def SCREAMING_SNAKE_CASE__ ( self ):
pass
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase = model_class(snake_case )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case , nn.Linear ) )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase = model_class(snake_case )
lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase = [*signature.parameters.keys()]
lowercase = ['pixel_values']
self.assertListEqual(arg_names[:1] , snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
lowercase = _config_zero_init(snake_case )
for model_class in self.all_model_classes:
lowercase = model_class(config=snake_case )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
lowercase = [F'''{name}.{key}''' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase = ViTHybridModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
def UpperCAmelCase_ ( ):
lowercase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class A_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def SCREAMING_SNAKE_CASE__ ( self ):
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
snake_case )
lowercase = self.default_image_processor
lowercase = prepare_img()
lowercase = image_processor(images=snake_case , return_tensors='pt' ).to(snake_case )
# forward pass
with torch.no_grad():
lowercase = model(**snake_case )
# verify the logits
lowercase = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , snake_case )
lowercase = torch.tensor([-1.9_090, -0.4_993, -0.2_389] ).to(snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case , atol=1E-4 ) )
@slow
@require_accelerate
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = ViTHybridImageProcessor.from_pretrained('google/vit-hybrid-base-bit-384' )
lowercase = ViTHybridForImageClassification.from_pretrained('google/vit-hybrid-base-bit-384' , device_map='auto' )
lowercase = prepare_img()
lowercase = image_processor(images=snake_case , return_tensors='pt' )
lowercase = model(**snake_case )
lowercase = outputs.logits
# model predicts one of the 1000 ImageNet classes
lowercase = logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , 'tabby, tabby cat' )
| 195 |
from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
UpperCAmelCase = logging.get_logger(__name__)
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
return [
int(1000 * (box[0] / width) ),
int(1000 * (box[1] / height) ),
int(1000 * (box[2] / width) ),
int(1000 * (box[3] / height) ),
]
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase = to_pil_image(__SCREAMING_SNAKE_CASE )
lowercase , lowercase = pil_image.size
lowercase = pytesseract.image_to_data(__SCREAMING_SNAKE_CASE , lang=__SCREAMING_SNAKE_CASE , output_type='dict' , config=__SCREAMING_SNAKE_CASE )
lowercase , lowercase , lowercase , lowercase , lowercase = data['text'], data['left'], data['top'], data['width'], data['height']
# filter empty words and corresponding coordinates
lowercase = [idx for idx, word in enumerate(__SCREAMING_SNAKE_CASE ) if not word.strip()]
lowercase = [word for idx, word in enumerate(__SCREAMING_SNAKE_CASE ) if idx not in irrelevant_indices]
lowercase = [coord for idx, coord in enumerate(__SCREAMING_SNAKE_CASE ) if idx not in irrelevant_indices]
lowercase = [coord for idx, coord in enumerate(__SCREAMING_SNAKE_CASE ) if idx not in irrelevant_indices]
lowercase = [coord for idx, coord in enumerate(__SCREAMING_SNAKE_CASE ) if idx not in irrelevant_indices]
lowercase = [coord for idx, coord in enumerate(__SCREAMING_SNAKE_CASE ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
lowercase = []
for x, y, w, h in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase = [x, y, x + w, y + h]
actual_boxes.append(__SCREAMING_SNAKE_CASE )
# finally, normalize the bounding boxes
lowercase = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
assert len(__SCREAMING_SNAKE_CASE ) == len(__SCREAMING_SNAKE_CASE ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : Dict = ["""pixel_values"""]
def __init__( self , snake_case = True , snake_case = None , snake_case = PILImageResampling.BILINEAR , snake_case = True , snake_case = 1 / 255 , snake_case = True , snake_case = None , snake_case = None , snake_case = True , snake_case = None , snake_case = "" , **snake_case , ):
super().__init__(**snake_case )
lowercase = size if size is not None else {'height': 224, 'width': 224}
lowercase = get_size_dict(snake_case )
lowercase = do_resize
lowercase = size
lowercase = resample
lowercase = do_rescale
lowercase = rescale_value
lowercase = do_normalize
lowercase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowercase = image_std if image_std is not None else IMAGENET_STANDARD_STD
lowercase = apply_ocr
lowercase = ocr_lang
lowercase = tesseract_config
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case = PILImageResampling.BILINEAR , snake_case = None , **snake_case , ):
lowercase = get_size_dict(snake_case )
if "height" not in size or "width" not in size:
raise ValueError(F'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' )
lowercase = (size['height'], size['width'])
return resize(snake_case , size=snake_case , resample=snake_case , data_format=snake_case , **snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case = None , **snake_case , ):
return rescale(snake_case , scale=snake_case , data_format=snake_case , **snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case = None , **snake_case , ):
return normalize(snake_case , mean=snake_case , std=snake_case , data_format=snake_case , **snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case = None , snake_case = None , snake_case=None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = ChannelDimension.FIRST , **snake_case , ):
lowercase = do_resize if do_resize is not None else self.do_resize
lowercase = size if size is not None else self.size
lowercase = get_size_dict(snake_case )
lowercase = resample if resample is not None else self.resample
lowercase = do_rescale if do_rescale is not None else self.do_rescale
lowercase = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase = do_normalize if do_normalize is not None else self.do_normalize
lowercase = image_mean if image_mean is not None else self.image_mean
lowercase = image_std if image_std is not None else self.image_std
lowercase = apply_ocr if apply_ocr is not None else self.apply_ocr
lowercase = ocr_lang if ocr_lang is not None else self.ocr_lang
lowercase = tesseract_config if tesseract_config is not None else self.tesseract_config
lowercase = make_list_of_images(snake_case )
if not valid_images(snake_case ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('If do_normalize is True, image_mean and image_std must be specified.' )
# All transformations expect numpy arrays.
lowercase = [to_numpy_array(snake_case ) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self , 'pytesseract' )
lowercase = []
lowercase = []
for image in images:
lowercase , lowercase = apply_tesseract(snake_case , snake_case , snake_case )
words_batch.append(snake_case )
boxes_batch.append(snake_case )
if do_resize:
lowercase = [self.resize(image=snake_case , size=snake_case , resample=snake_case ) for image in images]
if do_rescale:
lowercase = [self.rescale(image=snake_case , scale=snake_case ) for image in images]
if do_normalize:
lowercase = [self.normalize(image=snake_case , mean=snake_case , std=snake_case ) for image in images]
lowercase = [to_channel_dimension_format(snake_case , snake_case ) for image in images]
lowercase = BatchFeature(data={'pixel_values': images} , tensor_type=snake_case )
if apply_ocr:
lowercase = words_batch
lowercase = boxes_batch
return data
| 195 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__lowerCamelCase = {
'''configuration_tapas''': ['''TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TapasConfig'''],
'''tokenization_tapas''': ['''TapasTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
'''TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TapasForMaskedLM''',
'''TapasForQuestionAnswering''',
'''TapasForSequenceClassification''',
'''TapasModel''',
'''TapasPreTrainedModel''',
'''load_tf_weights_in_tapas''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
'''TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFTapasForMaskedLM''',
'''TFTapasForQuestionAnswering''',
'''TFTapasForSequenceClassification''',
'''TFTapasModel''',
'''TFTapasPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 361 |
'''simple docstring'''
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class A__ :
def __init__( self , UpperCamelCase__ , UpperCamelCase__=2 , UpperCamelCase__=3 , UpperCamelCase__=4 , UpperCamelCase__=2 , UpperCamelCase__=7 , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=99 , UpperCamelCase__=36 , UpperCamelCase__=3 , UpperCamelCase__=4 , UpperCamelCase__=37 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=512 , UpperCamelCase__=16 , UpperCamelCase__=2 , UpperCamelCase__=0.02 , UpperCamelCase__=6 , UpperCamelCase__=6 , UpperCamelCase__=3 , UpperCamelCase__=4 , UpperCamelCase__=None , UpperCamelCase__=1000 , ) -> Optional[int]:
'''simple docstring'''
A_ = parent
A_ = batch_size
A_ = num_channels
A_ = image_size
A_ = patch_size
A_ = text_seq_length
A_ = is_training
A_ = use_input_mask
A_ = use_token_type_ids
A_ = use_labels
A_ = vocab_size
A_ = hidden_size
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = intermediate_size
A_ = hidden_act
A_ = hidden_dropout_prob
A_ = attention_probs_dropout_prob
A_ = max_position_embeddings
A_ = type_vocab_size
A_ = type_sequence_label_size
A_ = initializer_range
A_ = coordinate_size
A_ = shape_size
A_ = num_labels
A_ = num_choices
A_ = scope
A_ = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
A_ = text_seq_length
A_ = (image_size // patch_size) ** 2 + 1
A_ = self.text_seq_length + self.image_seq_length
def snake_case_ ( self ) -> Any:
'''simple docstring'''
A_ = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
A_ = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
A_ = bbox[i, j, 3]
A_ = bbox[i, j, 1]
A_ = t
if bbox[i, j, 2] < bbox[i, j, 0]:
A_ = bbox[i, j, 2]
A_ = bbox[i, j, 0]
A_ = t
A_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ = None
if self.use_input_mask:
A_ = random_attention_mask([self.batch_size, self.text_seq_length] )
A_ = None
if self.use_token_type_ids:
A_ = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
A_ = None
A_ = None
if self.use_labels:
A_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
A_ = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> str:
'''simple docstring'''
A_ = LayoutLMvaModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
# text + image
A_ = model(UpperCamelCase__ , pixel_values=UpperCamelCase__ )
A_ = model(
UpperCamelCase__ , bbox=UpperCamelCase__ , pixel_values=UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ )
A_ = model(UpperCamelCase__ , bbox=UpperCamelCase__ , pixel_values=UpperCamelCase__ , token_type_ids=UpperCamelCase__ )
A_ = model(UpperCamelCase__ , bbox=UpperCamelCase__ , pixel_values=UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
A_ = model(UpperCamelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
A_ = model(pixel_values=UpperCamelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
A_ = self.num_labels
A_ = LayoutLMvaForSequenceClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A_ = model(
UpperCamelCase__ , bbox=UpperCamelCase__ , pixel_values=UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> str:
'''simple docstring'''
A_ = self.num_labels
A_ = LayoutLMvaForTokenClassification(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A_ = model(
UpperCamelCase__ , bbox=UpperCamelCase__ , pixel_values=UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> int:
'''simple docstring'''
A_ = LayoutLMvaForQuestionAnswering(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A_ = model(
UpperCamelCase__ , bbox=UpperCamelCase__ , pixel_values=UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , start_positions=UpperCamelCase__ , end_positions=UpperCamelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case_ ( self ) -> int:
'''simple docstring'''
A_ = self.prepare_config_and_inputs()
(
(
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) ,
) = config_and_inputs
A_ = {
"""input_ids""": input_ids,
"""bbox""": bbox,
"""pixel_values""": pixel_values,
"""token_type_ids""": token_type_ids,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_torch
class A__ ( _snake_case , _snake_case , unittest.TestCase ):
lowercase = False
lowercase = False
lowercase = False
lowercase = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowercase = (
{"document-question-answering": LayoutLMvaForQuestionAnswering, "feature-extraction": LayoutLMvaModel}
if is_torch_available()
else {}
)
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Dict:
'''simple docstring'''
# `DocumentQuestionAnsweringPipeline` is expected to work with this model, but it combines the text and visual
# embedding along the sequence dimension (dim 1), which causes an error during post-processing as `p_mask` has
# the sequence dimension of the text embedding only.
# (see the line `embedding_output = torch.cat([embedding_output, visual_embeddings], dim=1)`)
return True
def snake_case_ ( self ) -> str:
'''simple docstring'''
A_ = LayoutLMvaModelTester(self )
A_ = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=False ) -> Union[str, Any]:
'''simple docstring'''
A_ = copy.deepcopy(UpperCamelCase__ )
if model_class in get_values(UpperCamelCase__ ):
A_ = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(UpperCamelCase__ , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(UpperCamelCase__ ):
A_ = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase__ )
elif model_class in get_values(UpperCamelCase__ ):
A_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase__ )
A_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase__ )
elif model_class in [
*get_values(UpperCamelCase__ ),
]:
A_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase__ )
elif model_class in [
*get_values(UpperCamelCase__ ),
]:
A_ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=UpperCamelCase__ , )
return inputs_dict
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
A_ = type
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase__ )
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCamelCase__ )
def snake_case_ ( self ) -> Tuple:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCamelCase__ )
@slow
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ = LayoutLMvaModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def UpperCAmelCase__ ( ) -> Dict:
A_ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
class A__ ( unittest.TestCase ):
@cached_property
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
return LayoutLMvaImageProcessor(apply_ocr=UpperCamelCase__ ) if is_vision_available() else None
@slow
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
A_ = LayoutLMvaModel.from_pretrained("""microsoft/layoutlmv3-base""" ).to(UpperCamelCase__ )
A_ = self.default_image_processor
A_ = prepare_img()
A_ = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ).pixel_values.to(UpperCamelCase__ )
A_ = torch.tensor([[1, 2]] )
A_ = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
A_ = model(
input_ids=input_ids.to(UpperCamelCase__ ) , bbox=bbox.to(UpperCamelCase__ ) , pixel_values=pixel_values.to(UpperCamelCase__ ) , )
# verify the logits
A_ = torch.Size((1, 199, 768) )
self.assertEqual(outputs.last_hidden_state.shape , UpperCamelCase__ )
A_ = torch.tensor(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCamelCase__ , atol=1e-4 ) )
| 101 | 0 |
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def lowercase_ ( _lowerCamelCase : str , _lowerCamelCase : str , **_lowerCamelCase : Dict):
lowercase__ : Optional[Any] = AutoConfig.from_pretrained(__UpperCamelCase , **__UpperCamelCase)
lowercase__ : int = AutoModelForSeqaSeqLM.from_config(__UpperCamelCase)
model.save_pretrained(__UpperCamelCase)
AutoTokenizer.from_pretrained(__UpperCamelCase).save_pretrained(__UpperCamelCase)
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version)
| 87 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__a :int = {
'configuration_mask2former': [
'MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Mask2FormerConfig',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :Union[str, Any] = ['Mask2FormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :Optional[Any] = [
'MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'Mask2FormerForUniversalSegmentation',
'Mask2FormerModel',
'Mask2FormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
__a :Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure) | 312 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
__A = {
'''configuration_trocr''': ['''TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TrOCRConfig'''],
'''processing_trocr''': ['''TrOCRProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'''TROCR_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TrOCRForCausalLM''',
'''TrOCRPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 371 |
from ..utils import DummyObject, requires_backends
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Any:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> int:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> str:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> List[str]:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Any:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Any:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Dict:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Dict:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Tuple:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Dict:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> str:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Dict:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Tuple:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Dict:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Any:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Dict:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Any:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> str:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> int:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
class __lowerCAmelCase ( metaclass=__magic_name__ ):
"""simple docstring"""
snake_case_ = ['''sentencepiece''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> int:
'''simple docstring'''
requires_backends(self , ['sentencepiece'] )
| 348 | 0 |
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import Callable, Dict, List, Tuple
import timm
import torch
import torch.nn as nn
from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf
from huggingface_hub import cached_download, hf_hub_url
from torch import Tensor
from vissl.models.model_helpers import get_trunk_forward_outputs
from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase = logging.get_logger()
@dataclass
class _a :
_a : nn.Module
_a : List[nn.Module] = field(default_factory=_lowercase)
_a : list = field(default_factory=_lowercase)
def UpperCAmelCase__( self : Tuple , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Tensor , _SCREAMING_SNAKE_CASE : Tensor )-> Any:
lowerCAmelCase__ : str = len(list(m.modules() ) ) == 1 or isinstance(_SCREAMING_SNAKE_CASE , nn.Convad ) or isinstance(_SCREAMING_SNAKE_CASE , nn.BatchNormad )
if has_not_submodules:
self.traced.append(_SCREAMING_SNAKE_CASE )
def __call__( self : Union[str, Any] , _SCREAMING_SNAKE_CASE : Tensor )-> str:
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(_SCREAMING_SNAKE_CASE )
[x.remove() for x in self.handles]
return self
@property
def UpperCAmelCase__( self : Any )-> Union[str, Any]:
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda _SCREAMING_SNAKE_CASE : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class _a :
_a : nn.Module
_a : nn.Module
_a : int = 1
_a : List = field(default_factory=_lowercase)
_a : List = field(default_factory=_lowercase)
_a : bool = True
def __call__( self : Optional[Any] , _SCREAMING_SNAKE_CASE : Tensor )-> str:
lowerCAmelCase__ : List[Any] = Tracker(self.dest )(_SCREAMING_SNAKE_CASE ).parametrized
lowerCAmelCase__ : str = Tracker(self.src )(_SCREAMING_SNAKE_CASE ).parametrized
lowerCAmelCase__ : List[str] = list(filter(lambda _SCREAMING_SNAKE_CASE : type(_SCREAMING_SNAKE_CASE ) not in self.src_skip , _SCREAMING_SNAKE_CASE ) )
lowerCAmelCase__ : str = list(filter(lambda _SCREAMING_SNAKE_CASE : type(_SCREAMING_SNAKE_CASE ) not in self.dest_skip , _SCREAMING_SNAKE_CASE ) )
if len(_SCREAMING_SNAKE_CASE ) != len(_SCREAMING_SNAKE_CASE ) and self.raise_if_mismatch:
raise Exception(
F'Numbers of operations are different. Source module has {len(_SCREAMING_SNAKE_CASE )} operations while'
F' destination module has {len(_SCREAMING_SNAKE_CASE )}.' )
for dest_m, src_m in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(F'Transfered from={src_m} to={dest_m}' )
class _a ( nn.Module):
def __init__( self : List[Any] , _SCREAMING_SNAKE_CASE : nn.Module )-> Optional[int]:
super().__init__()
lowerCAmelCase__ : List[Tuple[str, nn.Module]] = []
# - get the stem
feature_blocks.append(('''conv1''', model.stem) )
# - get all the feature blocks
for k, v in model.trunk_output.named_children():
assert k.startswith('''block''' ), F'Unexpected layer name {k}'
lowerCAmelCase__ : Optional[int] = len(_SCREAMING_SNAKE_CASE ) + 1
feature_blocks.append((F'res{block_index}', v) )
lowerCAmelCase__ : List[str] = nn.ModuleDict(_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : int , _SCREAMING_SNAKE_CASE : Tensor )-> List[str]:
return get_trunk_forward_outputs(
_SCREAMING_SNAKE_CASE , out_feat_keys=_SCREAMING_SNAKE_CASE , feature_blocks=self._feature_blocks , )
class _a ( _lowercase):
def UpperCAmelCase__( self : Union[str, Any] , _SCREAMING_SNAKE_CASE : str )-> str:
lowerCAmelCase__ : int = x.split('''-''' )
return x_split[0] + x_split[1] + "_" + "".join(x_split[2:] )
def __getitem__( self : Optional[Any] , _SCREAMING_SNAKE_CASE : str )-> Callable[[], Tuple[nn.Module, Dict]]:
# default to timm!
if x not in self:
lowerCAmelCase__ : Optional[Any] = self.convert_name_to_timm(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : List[str] = partial(lambda: (timm.create_model(_SCREAMING_SNAKE_CASE , pretrained=_SCREAMING_SNAKE_CASE ).eval(), None) )
else:
lowerCAmelCase__ : Any = super().__getitem__(_SCREAMING_SNAKE_CASE )
return val
class _a ( _lowercase):
def __getitem__( self : Optional[Any] , _SCREAMING_SNAKE_CASE : str )-> Callable[[], nn.Module]:
if "seer" in x and "in1k" not in x:
lowerCAmelCase__ : int = RegNetModel
else:
lowerCAmelCase__ : List[str] = RegNetForImageClassification
return val
def lowerCamelCase_ ( _a , _a , _a ):
"""simple docstring"""
for from_key, to_key in keys:
lowerCAmelCase__ : Optional[Any] = from_state_dict[from_key].clone()
print(f'Copied key={from_key} to={to_key}' )
return to_state_dict
def lowerCamelCase_ ( _a , _a , _a , _a , _a , _a = True , ):
"""simple docstring"""
print(f'Converting {name}...' )
with torch.no_grad():
lowerCAmelCase__ , lowerCAmelCase__ : int = from_model_func()
lowerCAmelCase__ : Optional[Any] = our_model_func(_a ).eval()
lowerCAmelCase__ : int = ModuleTransfer(src=_a , dest=_a , raise_if_mismatch=_a )
lowerCAmelCase__ : str = torch.randn((1, 3, 224, 224) )
module_transfer(_a )
if from_state_dict is not None:
lowerCAmelCase__ : Any = []
# for seer - in1k finetuned we have to manually copy the head
if "seer" in name and "in1k" in name:
lowerCAmelCase__ : List[Any] = [('''0.clf.0.weight''', '''classifier.1.weight'''), ('''0.clf.0.bias''', '''classifier.1.bias''')]
lowerCAmelCase__ : int = manually_copy_vissl_head(_a , our_model.state_dict() , _a )
our_model.load_state_dict(_a )
lowerCAmelCase__ : List[str] = our_model(_a , output_hidden_states=_a )
lowerCAmelCase__ : Dict = (
our_outputs.logits if isinstance(_a , _a ) else our_outputs.last_hidden_state
)
lowerCAmelCase__ : Tuple = from_model(_a )
lowerCAmelCase__ : int = from_output[-1] if type(_a ) is list else from_output
# now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state
if "seer" in name and "in1k" in name:
lowerCAmelCase__ : Optional[int] = our_outputs.hidden_states[-1]
assert torch.allclose(_a , _a ), "The model logits don't match the original one."
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / name , commit_message='''Add model''' , use_temp_dir=_a , )
lowerCAmelCase__ : Optional[int] = 224 if '''seer''' not in name else 384
# we can use the convnext one
lowerCAmelCase__ : int = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' , size=_a )
image_processor.push_to_hub(
repo_path_or_name=save_directory / name , commit_message='''Add image processor''' , use_temp_dir=_a , )
print(f'Pushed {name}' )
def lowerCamelCase_ ( _a , _a = None , _a = True ):
"""simple docstring"""
lowerCAmelCase__ : str = '''imagenet-1k-id2label.json'''
lowerCAmelCase__ : Dict = 1_000
lowerCAmelCase__ : Optional[int] = (1, num_labels)
lowerCAmelCase__ : Optional[int] = '''huggingface/label-files'''
lowerCAmelCase__ : Tuple = num_labels
lowerCAmelCase__ : List[Any] = json.load(open(cached_download(hf_hub_url(_a , _a , repo_type='''dataset''' ) ) , '''r''' ) )
lowerCAmelCase__ : Dict = {int(_a ): v for k, v in idalabel.items()}
lowerCAmelCase__ : List[Any] = idalabel
lowerCAmelCase__ : Union[str, Any] = {v: k for k, v in idalabel.items()}
lowerCAmelCase__ : Dict = partial(_a , num_labels=_a , idalabel=_a , labelaid=_a )
lowerCAmelCase__ : Tuple = {
'''regnet-x-002''': ImageNetPreTrainedConfig(
depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 , layer_type='''x''' ),
'''regnet-x-004''': ImageNetPreTrainedConfig(
depths=[1, 2, 7, 12] , hidden_sizes=[32, 64, 160, 384] , groups_width=16 , layer_type='''x''' ),
'''regnet-x-006''': ImageNetPreTrainedConfig(
depths=[1, 3, 5, 7] , hidden_sizes=[48, 96, 240, 528] , groups_width=24 , layer_type='''x''' ),
'''regnet-x-008''': ImageNetPreTrainedConfig(
depths=[1, 3, 7, 5] , hidden_sizes=[64, 128, 288, 672] , groups_width=16 , layer_type='''x''' ),
'''regnet-x-016''': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 2] , hidden_sizes=[72, 168, 408, 912] , groups_width=24 , layer_type='''x''' ),
'''regnet-x-032''': ImageNetPreTrainedConfig(
depths=[2, 6, 15, 2] , hidden_sizes=[96, 192, 432, 1_008] , groups_width=48 , layer_type='''x''' ),
'''regnet-x-040''': ImageNetPreTrainedConfig(
depths=[2, 5, 14, 2] , hidden_sizes=[80, 240, 560, 1_360] , groups_width=40 , layer_type='''x''' ),
'''regnet-x-064''': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[168, 392, 784, 1_624] , groups_width=56 , layer_type='''x''' ),
'''regnet-x-080''': ImageNetPreTrainedConfig(
depths=[2, 5, 15, 1] , hidden_sizes=[80, 240, 720, 1_920] , groups_width=120 , layer_type='''x''' ),
'''regnet-x-120''': ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2_240] , groups_width=112 , layer_type='''x''' ),
'''regnet-x-160''': ImageNetPreTrainedConfig(
depths=[2, 6, 13, 1] , hidden_sizes=[256, 512, 896, 2_048] , groups_width=128 , layer_type='''x''' ),
'''regnet-x-320''': ImageNetPreTrainedConfig(
depths=[2, 7, 13, 1] , hidden_sizes=[336, 672, 1_344, 2_520] , groups_width=168 , layer_type='''x''' ),
# y variant
'''regnet-y-002''': ImageNetPreTrainedConfig(depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 ),
'''regnet-y-004''': ImageNetPreTrainedConfig(
depths=[1, 3, 6, 6] , hidden_sizes=[48, 104, 208, 440] , groups_width=8 ),
'''regnet-y-006''': ImageNetPreTrainedConfig(
depths=[1, 3, 7, 4] , hidden_sizes=[48, 112, 256, 608] , groups_width=16 ),
'''regnet-y-008''': ImageNetPreTrainedConfig(
depths=[1, 3, 8, 2] , hidden_sizes=[64, 128, 320, 768] , groups_width=16 ),
'''regnet-y-016''': ImageNetPreTrainedConfig(
depths=[2, 6, 17, 2] , hidden_sizes=[48, 120, 336, 888] , groups_width=24 ),
'''regnet-y-032''': ImageNetPreTrainedConfig(
depths=[2, 5, 13, 1] , hidden_sizes=[72, 216, 576, 1_512] , groups_width=24 ),
'''regnet-y-040''': ImageNetPreTrainedConfig(
depths=[2, 6, 12, 2] , hidden_sizes=[128, 192, 512, 1_088] , groups_width=64 ),
'''regnet-y-064''': ImageNetPreTrainedConfig(
depths=[2, 7, 14, 2] , hidden_sizes=[144, 288, 576, 1_296] , groups_width=72 ),
'''regnet-y-080''': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[168, 448, 896, 2_016] , groups_width=56 ),
'''regnet-y-120''': ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2_240] , groups_width=112 ),
'''regnet-y-160''': ImageNetPreTrainedConfig(
depths=[2, 4, 11, 1] , hidden_sizes=[224, 448, 1_232, 3_024] , groups_width=112 ),
'''regnet-y-320''': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1_392, 3_712] , groups_width=232 ),
# models created by SEER -> https://arxiv.org/abs/2202.08360
'''regnet-y-320-seer''': RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1_392, 3_712] , groups_width=232 ),
'''regnet-y-640-seer''': RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1_968, 4_920] , groups_width=328 ),
'''regnet-y-1280-seer''': RegNetConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[528, 1_056, 2_904, 7_392] , groups_width=264 ),
'''regnet-y-2560-seer''': RegNetConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[640, 1_696, 2_544, 5_088] , groups_width=640 ),
'''regnet-y-10b-seer''': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[2_020, 4_040, 11_110, 28_280] , groups_width=1_010 ),
# finetuned on imagenet
'''regnet-y-320-seer-in1k''': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1_392, 3_712] , groups_width=232 ),
'''regnet-y-640-seer-in1k''': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1_968, 4_920] , groups_width=328 ),
'''regnet-y-1280-seer-in1k''': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[528, 1_056, 2_904, 7_392] , groups_width=264 ),
'''regnet-y-2560-seer-in1k''': ImageNetPreTrainedConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[640, 1_696, 2_544, 5_088] , groups_width=640 ),
'''regnet-y-10b-seer-in1k''': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[2_020, 4_040, 11_110, 28_280] , groups_width=1_010 ),
}
lowerCAmelCase__ : Optional[Any] = NameToOurModelFuncMap()
lowerCAmelCase__ : Optional[Any] = NameToFromModelFuncMap()
# add seer weights logic
def load_using_classy_vision(_a , _a ) -> Tuple[nn.Module, Dict]:
lowerCAmelCase__ : Tuple = torch.hub.load_state_dict_from_url(_a , model_dir=str(_a ) , map_location='''cpu''' )
lowerCAmelCase__ : int = model_func()
# check if we have a head, if yes add it
lowerCAmelCase__ : int = files['''classy_state_dict''']['''base_model''']['''model''']
lowerCAmelCase__ : Tuple = model_state_dict['''trunk''']
model.load_state_dict(_a )
return model.eval(), model_state_dict["heads"]
# pretrained
lowerCAmelCase__ : int = partial(
_a , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
lowerCAmelCase__ : Optional[int] = partial(
_a , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
lowerCAmelCase__ : Optional[int] = partial(
_a , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
lowerCAmelCase__ : Tuple = partial(
_a , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch''' , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=1_010 , w_a=1_744 , w_a=6_20.83 , w_m=2.52 ) ) ) , )
# IN1K finetuned
lowerCAmelCase__ : List[Any] = partial(
_a , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
lowerCAmelCase__ : Optional[int] = partial(
_a , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
lowerCAmelCase__ : Union[str, Any] = partial(
_a , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
lowerCAmelCase__ : Union[str, Any] = partial(
_a , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch''' , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=1_010 , w_a=1_744 , w_a=6_20.83 , w_m=2.52 ) ) ) , )
if model_name:
convert_weight_and_push(
_a , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , names_to_config[model_name] , _a , _a , )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(
_a , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , _a , _a , _a , )
return config, expected_shape
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help=(
'''The name of the model you wish to convert, it must be one of the supported regnet* architecture,'''
''' currently: regnetx-*, regnety-*. If `None`, all of them will the converted.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=Path,
required=True,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
default=True,
type=bool,
required=False,
help='''If True, push model and image processor to the hub.''',
)
lowerCamelCase = parser.parse_args()
lowerCamelCase = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 131 |
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
lowerCamelCase = logging.get_logger(__name__)
class _a ( _lowercase):
_a : Optional[Any] = ['''pixel_values''']
def __init__( self : List[Any] , _SCREAMING_SNAKE_CASE : bool = True , _SCREAMING_SNAKE_CASE : Optional[Dict[str, int]] = None , _SCREAMING_SNAKE_CASE : PILImageResampling = PILImageResampling.BILINEAR , _SCREAMING_SNAKE_CASE : bool = True , _SCREAMING_SNAKE_CASE : Dict[str, int] = None , _SCREAMING_SNAKE_CASE : bool = True , _SCREAMING_SNAKE_CASE : Union[int, float] = 1 / 255 , _SCREAMING_SNAKE_CASE : bool = True , _SCREAMING_SNAKE_CASE : Optional[Union[float, List[float]]] = None , _SCREAMING_SNAKE_CASE : Optional[Union[float, List[float]]] = None , **_SCREAMING_SNAKE_CASE : int , )-> None:
super().__init__(**_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Dict = size if size is not None else {'''shortest_edge''': 256}
lowerCAmelCase__ : Tuple = get_size_dict(_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : List[Any] = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
lowerCAmelCase__ : Optional[Any] = get_size_dict(_SCREAMING_SNAKE_CASE , param_name='''crop_size''' )
lowerCAmelCase__ : List[str] = do_resize
lowerCAmelCase__ : Optional[Any] = size
lowerCAmelCase__ : Any = resample
lowerCAmelCase__ : str = do_center_crop
lowerCAmelCase__ : Dict = crop_size
lowerCAmelCase__ : str = do_rescale
lowerCAmelCase__ : List[str] = rescale_factor
lowerCAmelCase__ : int = do_normalize
lowerCAmelCase__ : Dict = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCAmelCase__ : Optional[int] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCAmelCase__( self : List[Any] , _SCREAMING_SNAKE_CASE : np.ndarray , _SCREAMING_SNAKE_CASE : Dict[str, int] , _SCREAMING_SNAKE_CASE : PILImageResampling = PILImageResampling.BICUBIC , _SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **_SCREAMING_SNAKE_CASE : Dict , )-> np.ndarray:
lowerCAmelCase__ : str = get_size_dict(_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE )
if "shortest_edge" not in size:
raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
lowerCAmelCase__ : List[str] = get_resize_output_image_size(_SCREAMING_SNAKE_CASE , size=size['''shortest_edge'''] , default_to_square=_SCREAMING_SNAKE_CASE )
return resize(_SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE , resample=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : List[str] , _SCREAMING_SNAKE_CASE : np.ndarray , _SCREAMING_SNAKE_CASE : Dict[str, int] , _SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **_SCREAMING_SNAKE_CASE : List[str] , )-> np.ndarray:
lowerCAmelCase__ : Dict = get_size_dict(_SCREAMING_SNAKE_CASE )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}' )
return center_crop(_SCREAMING_SNAKE_CASE , size=(size['''height'''], size['''width''']) , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : int , _SCREAMING_SNAKE_CASE : np.ndarray , _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **_SCREAMING_SNAKE_CASE : Optional[int] )-> np.ndarray:
return rescale(_SCREAMING_SNAKE_CASE , scale=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : Optional[int] , _SCREAMING_SNAKE_CASE : np.ndarray , _SCREAMING_SNAKE_CASE : Union[float, List[float]] , _SCREAMING_SNAKE_CASE : Union[float, List[float]] , _SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **_SCREAMING_SNAKE_CASE : str , )-> np.ndarray:
return normalize(_SCREAMING_SNAKE_CASE , mean=_SCREAMING_SNAKE_CASE , std=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : Any , _SCREAMING_SNAKE_CASE : ImageInput , _SCREAMING_SNAKE_CASE : Optional[bool] = None , _SCREAMING_SNAKE_CASE : Dict[str, int] = None , _SCREAMING_SNAKE_CASE : PILImageResampling = None , _SCREAMING_SNAKE_CASE : bool = None , _SCREAMING_SNAKE_CASE : Dict[str, int] = None , _SCREAMING_SNAKE_CASE : Optional[bool] = None , _SCREAMING_SNAKE_CASE : Optional[float] = None , _SCREAMING_SNAKE_CASE : Optional[bool] = None , _SCREAMING_SNAKE_CASE : Optional[Union[float, List[float]]] = None , _SCREAMING_SNAKE_CASE : Optional[Union[float, List[float]]] = None , _SCREAMING_SNAKE_CASE : Optional[Union[str, TensorType]] = None , _SCREAMING_SNAKE_CASE : Union[str, ChannelDimension] = ChannelDimension.FIRST , **_SCREAMING_SNAKE_CASE : Tuple , )-> Optional[Any]:
lowerCAmelCase__ : List[str] = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase__ : List[str] = size if size is not None else self.size
lowerCAmelCase__ : Any = get_size_dict(_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : str = resample if resample is not None else self.resample
lowerCAmelCase__ : Dict = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCAmelCase__ : Dict = crop_size if crop_size is not None else self.crop_size
lowerCAmelCase__ : Any = get_size_dict(_SCREAMING_SNAKE_CASE , param_name='''crop_size''' )
lowerCAmelCase__ : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase__ : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase__ : str = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase__ : List[Any] = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase__ : List[str] = image_std if image_std is not None else self.image_std
lowerCAmelCase__ : Optional[int] = make_list_of_images(_SCREAMING_SNAKE_CASE )
if not valid_images(_SCREAMING_SNAKE_CASE ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
lowerCAmelCase__ : List[Any] = [to_numpy_array(_SCREAMING_SNAKE_CASE ) for image in images]
if do_resize:
lowerCAmelCase__ : Dict = [self.resize(image=_SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE , resample=_SCREAMING_SNAKE_CASE ) for image in images]
if do_center_crop:
lowerCAmelCase__ : Dict = [self.center_crop(image=_SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE ) for image in images]
if do_rescale:
lowerCAmelCase__ : List[Any] = [self.rescale(image=_SCREAMING_SNAKE_CASE , scale=_SCREAMING_SNAKE_CASE ) for image in images]
if do_normalize:
lowerCAmelCase__ : Tuple = [self.normalize(image=_SCREAMING_SNAKE_CASE , mean=_SCREAMING_SNAKE_CASE , std=_SCREAMING_SNAKE_CASE ) for image in images]
lowerCAmelCase__ : Dict = [to_channel_dimension_format(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for image in images]
lowerCAmelCase__ : Dict = {'''pixel_values''': images}
return BatchFeature(data=_SCREAMING_SNAKE_CASE , tensor_type=_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : int , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : List[Tuple] = None )-> List[Any]:
lowerCAmelCase__ : Union[str, Any] = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(_SCREAMING_SNAKE_CASE ) != len(_SCREAMING_SNAKE_CASE ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(_SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ : Tuple = target_sizes.numpy()
lowerCAmelCase__ : Tuple = []
for idx in range(len(_SCREAMING_SNAKE_CASE ) ):
lowerCAmelCase__ : int = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : str = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(_SCREAMING_SNAKE_CASE )
else:
lowerCAmelCase__ : Any = logits.argmax(dim=1 )
lowerCAmelCase__ : Dict = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 131 | 1 |
"""simple docstring"""
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> int:
return int((input_a, input_a).count(0 ) != 0 )
def a__ ( ) -> None:
assert nand_gate(0 , 0 ) == 1
assert nand_gate(0 , 1 ) == 1
assert nand_gate(1 , 0 ) == 1
assert nand_gate(1 , 1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1))
| 108 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class snake_case ( unittest.TestCase ):
def __init__( self : Any , UpperCamelCase__ : int , UpperCamelCase__ : Any=7 , UpperCamelCase__ : List[str]=3 , UpperCamelCase__ : List[Any]=1_8 , UpperCamelCase__ : List[Any]=3_0 , UpperCamelCase__ : List[str]=4_0_0 , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : int=[0.48145466, 0.4578275, 0.40821073] , UpperCamelCase__ : str=[0.26862954, 0.26130258, 0.27577711] , UpperCamelCase__ : List[str]=True , )-> Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase: Dict = size if size is not None else {"height": 2_2_4, "width": 2_2_4}
__lowerCAmelCase: Union[str, Any] = crop_size if crop_size is not None else {"height": 1_8, "width": 1_8}
__lowerCAmelCase: Optional[int] = parent
__lowerCAmelCase: List[str] = batch_size
__lowerCAmelCase: Union[str, Any] = num_channels
__lowerCAmelCase: Optional[Any] = image_size
__lowerCAmelCase: Tuple = min_resolution
__lowerCAmelCase: List[str] = max_resolution
__lowerCAmelCase: List[Any] = do_resize
__lowerCAmelCase: Union[str, Any] = size
__lowerCAmelCase: List[Any] = do_center_crop
__lowerCAmelCase: Optional[int] = crop_size
__lowerCAmelCase: Dict = do_normalize
__lowerCAmelCase: List[str] = image_mean
__lowerCAmelCase: Optional[int] = image_std
__lowerCAmelCase: str = do_convert_rgb
def lowercase_ ( self : Tuple)-> str:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def lowercase_ ( self : Any , UpperCamelCase__ : Tuple=False , UpperCamelCase__ : List[str]=False , UpperCamelCase__ : Dict=False)-> List[str]:
'''simple docstring'''
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
__lowerCAmelCase: Optional[int] = []
for i in range(self.batch_size):
image_inputs.append(
np.random.randint(
2_5_5 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta))
else:
__lowerCAmelCase: List[str] = []
for i in range(self.batch_size):
__lowerCAmelCase , __lowerCAmelCase: List[str] = np.random.choice(np.arange(self.min_resolution , self.max_resolution) , 2)
image_inputs.append(np.random.randint(2_5_5 , size=(self.num_channels, width, height) , dtype=np.uinta))
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
__lowerCAmelCase: Union[str, Any] = [Image.fromarray(np.moveaxis(UpperCamelCase__ , 0 , -1)) for x in image_inputs]
if torchify:
__lowerCAmelCase: str = [torch.from_numpy(UpperCamelCase__) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class snake_case ( __snake_case, unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : str = ChineseCLIPImageProcessor if is_vision_available() else None
def lowercase_ ( self : Any)-> List[Any]:
'''simple docstring'''
__lowerCAmelCase: Tuple = ChineseCLIPImageProcessingTester(self , do_center_crop=UpperCamelCase__)
@property
def lowercase_ ( self : Any)-> Optional[Any]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase_ ( self : Union[str, Any])-> Optional[int]:
'''simple docstring'''
__lowerCAmelCase: Tuple = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(UpperCamelCase__ , "do_resize"))
self.assertTrue(hasattr(UpperCamelCase__ , "size"))
self.assertTrue(hasattr(UpperCamelCase__ , "do_center_crop"))
self.assertTrue(hasattr(UpperCamelCase__ , "center_crop"))
self.assertTrue(hasattr(UpperCamelCase__ , "do_normalize"))
self.assertTrue(hasattr(UpperCamelCase__ , "image_mean"))
self.assertTrue(hasattr(UpperCamelCase__ , "image_std"))
self.assertTrue(hasattr(UpperCamelCase__ , "do_convert_rgb"))
def lowercase_ ( self : List[Any])-> str:
'''simple docstring'''
__lowerCAmelCase: Tuple = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {"height": 2_2_4, "width": 2_2_4})
self.assertEqual(image_processor.crop_size , {"height": 1_8, "width": 1_8})
__lowerCAmelCase: List[str] = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4)
self.assertEqual(image_processor.size , {"shortest_edge": 4_2})
self.assertEqual(image_processor.crop_size , {"height": 8_4, "width": 8_4})
def lowercase_ ( self : List[str])-> Optional[int]:
'''simple docstring'''
pass
def lowercase_ ( self : Any)-> Optional[int]:
'''simple docstring'''
__lowerCAmelCase: int = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
__lowerCAmelCase: Union[str, Any] = self.image_processor_tester.prepare_inputs(equal_resolution=UpperCamelCase__)
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , Image.Image)
# Test not batched input
__lowerCAmelCase: Optional[int] = image_processing(image_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__lowerCAmelCase: int = image_processing(UpperCamelCase__ , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def lowercase_ ( self : int)-> Optional[Any]:
'''simple docstring'''
__lowerCAmelCase: Union[str, Any] = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
__lowerCAmelCase: List[Any] = self.image_processor_tester.prepare_inputs(equal_resolution=UpperCamelCase__ , numpify=UpperCamelCase__)
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , np.ndarray)
# Test not batched input
__lowerCAmelCase: List[Any] = image_processing(image_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__lowerCAmelCase: Any = image_processing(UpperCamelCase__ , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def lowercase_ ( self : int)-> str:
'''simple docstring'''
__lowerCAmelCase: Union[str, Any] = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
__lowerCAmelCase: Dict = self.image_processor_tester.prepare_inputs(equal_resolution=UpperCamelCase__ , torchify=UpperCamelCase__)
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , torch.Tensor)
# Test not batched input
__lowerCAmelCase: Tuple = image_processing(image_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__lowerCAmelCase: Optional[int] = image_processing(UpperCamelCase__ , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
@require_torch
@require_vision
class snake_case ( __snake_case, unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : List[str] = ChineseCLIPImageProcessor if is_vision_available() else None
def lowercase_ ( self : int)-> Dict:
'''simple docstring'''
__lowerCAmelCase: Optional[int] = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=UpperCamelCase__)
__lowerCAmelCase: Union[str, Any] = 3
@property
def lowercase_ ( self : Union[str, Any])-> Any:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase_ ( self : int)-> str:
'''simple docstring'''
__lowerCAmelCase: int = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(UpperCamelCase__ , "do_resize"))
self.assertTrue(hasattr(UpperCamelCase__ , "size"))
self.assertTrue(hasattr(UpperCamelCase__ , "do_center_crop"))
self.assertTrue(hasattr(UpperCamelCase__ , "center_crop"))
self.assertTrue(hasattr(UpperCamelCase__ , "do_normalize"))
self.assertTrue(hasattr(UpperCamelCase__ , "image_mean"))
self.assertTrue(hasattr(UpperCamelCase__ , "image_std"))
self.assertTrue(hasattr(UpperCamelCase__ , "do_convert_rgb"))
def lowercase_ ( self : Tuple)-> Any:
'''simple docstring'''
pass
def lowercase_ ( self : Tuple)-> Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase: Any = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
__lowerCAmelCase: int = self.image_processor_tester.prepare_inputs(equal_resolution=UpperCamelCase__)
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , Image.Image)
# Test not batched input
__lowerCAmelCase: List[Any] = image_processing(image_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__lowerCAmelCase: Optional[int] = image_processing(UpperCamelCase__ , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 108 | 1 |
'''simple docstring'''
from __future__ import annotations
class __lowerCAmelCase :
'''simple docstring'''
def __init__(self : Any , UpperCamelCase : int = 0 ):
'''simple docstring'''
lowercase__ = key
def UpperCamelCase__ (self : str , UpperCamelCase : str , UpperCamelCase : int ):
'''simple docstring'''
assert isinstance(UpperCamelCase , UpperCamelCase ) and isinstance(UpperCamelCase , UpperCamelCase )
lowercase__ = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(UpperCamelCase ) ^ key ) for ch in content]
def UpperCamelCase__ (self : List[Any] , UpperCamelCase : str , UpperCamelCase : int ):
'''simple docstring'''
assert isinstance(UpperCamelCase , UpperCamelCase ) and isinstance(UpperCamelCase , UpperCamelCase )
lowercase__ = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(UpperCamelCase ) ^ key ) for ch in content]
def UpperCamelCase__ (self : Optional[Any] , UpperCamelCase : str , UpperCamelCase : int = 0 ):
'''simple docstring'''
assert isinstance(UpperCamelCase , UpperCamelCase ) and isinstance(UpperCamelCase , UpperCamelCase )
lowercase__ = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
lowercase__ = ''''''
for ch in content:
ans += chr(ord(UpperCamelCase ) ^ key )
return ans
def UpperCamelCase__ (self : Any , UpperCamelCase : str , UpperCamelCase : int = 0 ):
'''simple docstring'''
assert isinstance(UpperCamelCase , UpperCamelCase ) and isinstance(UpperCamelCase , UpperCamelCase )
lowercase__ = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
lowercase__ = ''''''
for ch in content:
ans += chr(ord(UpperCamelCase ) ^ key )
return ans
def UpperCamelCase__ (self : str , UpperCamelCase : str , UpperCamelCase : int = 0 ):
'''simple docstring'''
assert isinstance(UpperCamelCase , UpperCamelCase ) and isinstance(UpperCamelCase , UpperCamelCase )
try:
with open(UpperCamelCase ) as fin, open('''encrypt.out''' , '''w+''' ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(UpperCamelCase , UpperCamelCase ) )
except OSError:
return False
return True
def UpperCamelCase__ (self : Dict , UpperCamelCase : str , UpperCamelCase : int ):
'''simple docstring'''
assert isinstance(UpperCamelCase , UpperCamelCase ) and isinstance(UpperCamelCase , UpperCamelCase )
try:
with open(UpperCamelCase ) as fin, open('''decrypt.out''' , '''w+''' ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(UpperCamelCase , UpperCamelCase ) )
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| 2 |
'''simple docstring'''
from __future__ import annotations
def _SCREAMING_SNAKE_CASE (A ) -> bool:
"""simple docstring"""
return len(set(A ) ) == len(A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 2 | 1 |
"""simple docstring"""
from __future__ import annotations
import csv
import requests
from bsa import BeautifulSoup
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase = "" ) -> dict[str, float]:
lowercase__: List[str] = url or '''https://www.imdb.com/chart/top/?ref_=nv_mv_250'''
lowercase__: int = BeautifulSoup(requests.get(__UpperCAmelCase ).text , '''html.parser''' )
lowercase__: List[str] = soup.find_all('''td''' , attrs='''titleColumn''' )
lowercase__: Optional[int] = soup.find_all('''td''' , class_='''ratingColumn imdbRating''' )
return {
title.a.text: float(rating.strong.text )
for title, rating in zip(__UpperCAmelCase , __UpperCAmelCase )
}
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase = "IMDb_Top_250_Movies.csv" ) -> None:
lowercase__: Optional[int] = get_imdb_top_aaa_movies()
with open(__UpperCAmelCase , '''w''' , newline='''''' ) as out_file:
lowercase__: str = csv.writer(__UpperCAmelCase )
writer.writerow(['''Movie title''', '''IMDb rating'''] )
for title, rating in movies.items():
writer.writerow([title, rating] )
if __name__ == "__main__":
write_movies()
| 2 | """simple docstring"""
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
__A = "<<<<<<< This should probably be modified because it mentions: "
__A = "=======\n>>>>>>>\n"
__A = [
"TextEncoderConfig",
"ByteTextEncoder",
"SubwordTextEncoder",
"encoder_config",
"maybe_build_from_corpus",
"manual_dir",
]
__A = [
# (pattern, replacement)
# Order is important here for some replacements
(R"tfds\.core", R"datasets"),
(R"tf\.io\.gfile\.GFile", R"open"),
(R"tf\.([\w\d]+)", R"datasets.Value('\1')"),
(R"tfds\.features\.Text\(\)", R"datasets.Value('string')"),
(R"tfds\.features\.Text\(", R"datasets.Value('string'),"),
(R"features\s*=\s*tfds.features.FeaturesDict\(", R"features=datasets.Features("),
(R"tfds\.features\.FeaturesDict\(", R"dict("),
(R"The TensorFlow Datasets Authors", R"The TensorFlow Datasets Authors and the HuggingFace Datasets Authors"),
(R"tfds\.", R"datasets."),
(R"dl_manager\.manual_dir", R"self.config.data_dir"),
(R"self\.builder_config", R"self.config"),
]
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> Tuple:
return ConvertCommand(args.tfds_path , args.datasets_directory )
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
@staticmethod
def _snake_case ( _UpperCAmelCase ):
lowercase__: int = parser.add_parser(
'''convert''' , help='''Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.''' , )
train_parser.add_argument(
'''--tfds_path''' , type=_UpperCAmelCase , required=_UpperCAmelCase , help='''Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.''' , )
train_parser.add_argument(
'''--datasets_directory''' , type=_UpperCAmelCase , required=_UpperCAmelCase , help='''Path to the HuggingFace Datasets folder.''' )
train_parser.set_defaults(func=_UpperCAmelCase )
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , *_UpperCAmelCase ):
lowercase__: List[str] = get_logger('''datasets-cli/converting''' )
lowercase__: Optional[Any] = tfds_path
lowercase__: Dict = datasets_directory
def _snake_case ( self ):
if os.path.isdir(self._tfds_path ):
lowercase__: Optional[Any] = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
lowercase__: Optional[int] = os.path.dirname(self._tfds_path )
else:
raise ValueError('''--tfds_path is neither a directory nor a file. Please check path.''' )
lowercase__: int = os.path.abspath(self._datasets_directory )
self._logger.info(F"""Converting datasets from {abs_tfds_path} to {abs_datasets_path}""" )
lowercase__: Tuple = []
lowercase__: Dict = []
lowercase__: Any = {}
if os.path.isdir(self._tfds_path ):
lowercase__: Dict = os.listdir(_UpperCAmelCase )
else:
lowercase__: Dict = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(F"""Looking at file {f_name}""" )
lowercase__: Tuple = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Optional[int] = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
if not os.path.isfile(_UpperCAmelCase ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info('''Skipping file''' )
continue
with open(_UpperCAmelCase , encoding='''utf-8''' ) as f:
lowercase__: Tuple = f.readlines()
lowercase__: Optional[Any] = []
lowercase__: Dict = False
lowercase__: List[str] = False
lowercase__: List[Any] = []
for line in lines:
lowercase__: List[str] = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
lowercase__: Optional[int] = '''import datasets\n'''
elif "import tensorflow" in out_line:
# order is important here
lowercase__: Dict = ''''''
continue
elif "from absl import logging" in out_line:
lowercase__: Tuple = '''from datasets import logging\n'''
elif "getLogger" in out_line:
lowercase__: Optional[Any] = out_line.replace('''getLogger''' , '''get_logger''' )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
lowercase__: Any = True
lowercase__: str = list(filter(lambda _UpperCAmelCase : e in out_line , _UpperCAmelCase ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(_UpperCAmelCase ) + '''\n''' )
out_lines.append(_UpperCAmelCase )
out_lines.append(_UpperCAmelCase )
continue
else:
for pattern, replacement in TO_CONVERT:
lowercase__: List[Any] = re.sub(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
lowercase__: Any = re.match(r'''from\stensorflow_datasets.*import\s([^\.\r\n]+)''' , _UpperCAmelCase )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(''',''' ) )
lowercase__: List[str] = '''from . import ''' + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(F"""Error converting {out_line.strip()}""" )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
lowercase__: Optional[Any] = True
out_lines.append(_UpperCAmelCase )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
lowercase__: Dict = f_name.replace('''.py''' , '''''' )
lowercase__: Dict = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Optional[Any] = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
self._logger.info(F"""Adding directory {output_dir}""" )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(_UpperCAmelCase )
if needs_manual_update:
with_manual_update.append(_UpperCAmelCase )
with open(_UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as f:
f.writelines(_UpperCAmelCase )
self._logger.info(F"""Converted in {output_file}""" )
for utils_file in utils_files:
try:
lowercase__: str = os.path.basename(_UpperCAmelCase )
lowercase__: Union[str, Any] = imports_to_builder_map[f_name.replace('''.py''' , '''''' )]
self._logger.info(F"""Moving {dest_folder} to {utils_file}""" )
shutil.copy(_UpperCAmelCase , _UpperCAmelCase )
except KeyError:
self._logger.error(F"""Cannot find destination folder for {utils_file}. Please copy manually.""" )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
F"""You need to manually update file {file_path} to remove configurations using 'TextEncoderConfig'.""" )
| 2 | 1 |
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def _SCREAMING_SNAKE_CASE ( a = 3 ) -> qiskit.result.counts.Counts:
if isinstance(a , a ):
raise TypeError('number of qubits must be a integer.' )
if number_of_qubits <= 0:
raise ValueError('number of qubits must be > 0.' )
if math.floor(a ) != number_of_qubits:
raise ValueError('number of qubits must be exact integer.' )
if number_of_qubits > 10:
raise ValueError('number of qubits too large to simulate(>10).' )
__A : str = QuantumRegister(a , 'qr' )
__A : int = ClassicalRegister(a , 'cr' )
__A : Any = QuantumCircuit(a , a )
__A : str = number_of_qubits
for i in range(a ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(a ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , a , a )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(a , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(a , a )
# simulate with 10000 shots
__A : int = Aer.get_backend('qasm_simulator' )
__A : Optional[int] = execute(a , a , shots=1_00_00 )
return job.result().get_counts(a )
if __name__ == "__main__":
print(
F"""Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}"""
)
| 280 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class _A( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _A , _A=7 , _A=3 , _A=30 , _A=400 , _A=True , _A=None , _A=True , _A=[0.5, 0.5, 0.5] , _A=[0.5, 0.5, 0.5] , _A=True , _A=1 / 255 , _A=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
__A : List[Any] = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1333}
__A : Union[str, Any] = parent
__A : Optional[int] = batch_size
__A : int = num_channels
__A : int = min_resolution
__A : Any = max_resolution
__A : List[Any] = do_resize
__A : List[Any] = size
__A : Union[str, Any] = do_normalize
__A : Optional[int] = image_mean
__A : Optional[int] = image_std
__A : int = do_rescale
__A : str = rescale_factor
__A : Tuple = do_pad
def UpperCAmelCase_ ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def UpperCAmelCase_ ( self , _A , _A=False ):
if not batched:
__A : List[str] = image_inputs[0]
if isinstance(_A , Image.Image ):
__A , __A : int = image.size
else:
__A , __A : Any = image.shape[1], image.shape[2]
if w < h:
__A : List[Any] = int(self.size['shortest_edge'] * h / w )
__A : List[Any] = self.size['shortest_edge']
elif w > h:
__A : Union[str, Any] = self.size['shortest_edge']
__A : str = int(self.size['shortest_edge'] * w / h )
else:
__A : Dict = self.size['shortest_edge']
__A : str = self.size['shortest_edge']
else:
__A : int = []
for image in image_inputs:
__A , __A : Optional[Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__A : List[str] = max(_A , key=lambda _A : item[0] )[0]
__A : str = max(_A , key=lambda _A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _A( snake_case__ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : List[str] = YolosImageProcessor if is_vision_available() else None
def UpperCAmelCase_ ( self ):
__A : Dict = YolosImageProcessingTester(self )
@property
def UpperCAmelCase_ ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase_ ( self ):
__A : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_A , 'image_mean' ) )
self.assertTrue(hasattr(_A , 'image_std' ) )
self.assertTrue(hasattr(_A , 'do_normalize' ) )
self.assertTrue(hasattr(_A , 'do_resize' ) )
self.assertTrue(hasattr(_A , 'size' ) )
def UpperCAmelCase_ ( self ):
__A : Tuple = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 1333} )
self.assertEqual(image_processor.do_pad , _A )
__A : Dict = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=_A )
self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} )
self.assertEqual(image_processor.do_pad , _A )
def UpperCAmelCase_ ( self ):
pass
def UpperCAmelCase_ ( self ):
# Initialize image_processing
__A : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__A : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A )
for image in image_inputs:
self.assertIsInstance(_A , Image.Image )
# Test not batched input
__A : Any = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
__A , __A : Optional[int] = self.image_processor_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__A , __A : Optional[Any] = self.image_processor_tester.get_expected_values(_A , batched=_A )
__A : str = image_processing(_A , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase_ ( self ):
# Initialize image_processing
__A : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__A : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , numpify=_A )
for image in image_inputs:
self.assertIsInstance(_A , np.ndarray )
# Test not batched input
__A : str = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
__A , __A : List[Any] = self.image_processor_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__A : Tuple = image_processing(_A , return_tensors='pt' ).pixel_values
__A , __A : Optional[int] = self.image_processor_tester.get_expected_values(_A , batched=_A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase_ ( self ):
# Initialize image_processing
__A : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__A : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A )
for image in image_inputs:
self.assertIsInstance(_A , torch.Tensor )
# Test not batched input
__A : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
__A , __A : Union[str, Any] = self.image_processor_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__A : Optional[int] = image_processing(_A , return_tensors='pt' ).pixel_values
__A , __A : Optional[int] = self.image_processor_tester.get_expected_values(_A , batched=_A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase_ ( self ):
# Initialize image_processings
__A : Tuple = self.image_processing_class(**self.image_processor_dict )
__A : Any = self.image_processing_class(do_resize=_A , do_normalize=_A , do_rescale=_A )
# create random PyTorch tensors
__A : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A )
for image in image_inputs:
self.assertIsInstance(_A , torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
__A : Optional[int] = image_processing_a.pad(_A , return_tensors='pt' )
__A : Optional[int] = image_processing_a(_A , return_tensors='pt' )
self.assertTrue(
torch.allclose(encoded_images_with_method['pixel_values'] , encoded_images['pixel_values'] , atol=1e-4 ) )
@slow
def UpperCAmelCase_ ( self ):
# prepare image and target
__A : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
__A : Optional[Any] = json.loads(f.read() )
__A : Optional[Any] = {'image_id': 39769, 'annotations': target}
# encode them
__A : str = YolosImageProcessor.from_pretrained('hustvl/yolos-small' )
__A : List[Any] = image_processing(images=_A , annotations=_A , return_tensors='pt' )
# verify pixel values
__A : List[Any] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape , _A )
__A : Union[str, Any] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _A , atol=1e-4 ) )
# verify area
__A : List[Any] = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _A ) )
# verify boxes
__A : Any = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , _A )
__A : Optional[Any] = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _A , atol=1e-3 ) )
# verify image_id
__A : Optional[int] = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _A ) )
# verify is_crowd
__A : str = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _A ) )
# verify class_labels
__A : Any = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _A ) )
# verify orig_size
__A : int = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _A ) )
# verify size
__A : str = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _A ) )
@slow
def UpperCAmelCase_ ( self ):
# prepare image, target and masks_path
__A : Dict = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
__A : Tuple = json.loads(f.read() )
__A : Any = {'file_name': '000000039769.png', 'image_id': 39769, 'segments_info': target}
__A : List[Any] = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
__A : Any = YolosImageProcessor(format='coco_panoptic' )
__A : List[Any] = image_processing(images=_A , annotations=_A , masks_path=_A , return_tensors='pt' )
# verify pixel values
__A : Any = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape , _A )
__A : Union[str, Any] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _A , atol=1e-4 ) )
# verify area
__A : int = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _A ) )
# verify boxes
__A : Optional[int] = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , _A )
__A : Optional[Any] = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _A , atol=1e-3 ) )
# verify image_id
__A : Union[str, Any] = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _A ) )
# verify is_crowd
__A : Tuple = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _A ) )
# verify class_labels
__A : List[str] = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _A ) )
# verify masks
__A : Tuple = 822873
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , _A )
# verify orig_size
__A : str = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _A ) )
# verify size
__A : int = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _A ) )
| 280 | 1 |
'''simple docstring'''
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_SCREAMING_SNAKE_CASE = '''▁'''
_SCREAMING_SNAKE_CASE = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class __lowercase ( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
a : Optional[int] = BigBirdTokenizer
a : Union[str, Any] = BigBirdTokenizerFast
a : Union[str, Any] = True
a : Dict = True
def _UpperCAmelCase (self ) -> Tuple:
'''simple docstring'''
super().setUp()
__lowercase = self.tokenizer_class(_lowerCamelCase ,keep_accents=_lowerCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
__lowercase = '''<s>'''
__lowercase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowerCamelCase ) ,_lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowerCamelCase ) ,_lowerCamelCase )
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
__lowercase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,'''<unk>''' )
self.assertEqual(vocab_keys[1] ,'''<s>''' )
self.assertEqual(vocab_keys[-1] ,'''[MASK]''' )
self.assertEqual(len(_lowerCamelCase ) ,1004 )
def _UpperCAmelCase (self ) -> List[str]:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size ,1000 )
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
__lowercase = self.get_tokenizer()
__lowercase = self.get_rust_tokenizer()
__lowercase = '''I was born in 92000, and this is falsé.'''
__lowercase = tokenizer.tokenize(_lowerCamelCase )
__lowercase = rust_tokenizer.tokenize(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase ,_lowerCamelCase )
__lowercase = tokenizer.encode(_lowerCamelCase ,add_special_tokens=_lowerCamelCase )
__lowercase = rust_tokenizer.encode(_lowerCamelCase ,add_special_tokens=_lowerCamelCase )
self.assertListEqual(_lowerCamelCase ,_lowerCamelCase )
__lowercase = self.get_rust_tokenizer()
__lowercase = tokenizer.encode(_lowerCamelCase )
__lowercase = rust_tokenizer.encode(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase ,_lowerCamelCase )
def _UpperCAmelCase (self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = BigBirdTokenizer(_lowerCamelCase ,keep_accents=_lowerCamelCase )
__lowercase = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_lowerCamelCase ,['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_lowerCamelCase ) ,[285, 46, 10, 170, 382] ,)
__lowercase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_lowerCamelCase ,[
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] ,)
__lowercase = tokenizer.convert_tokens_to_ids(_lowerCamelCase )
self.assertListEqual(
_lowerCamelCase ,[8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] ,)
__lowercase = tokenizer.convert_ids_to_tokens(_lowerCamelCase )
self.assertListEqual(
_lowerCamelCase ,[
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] ,)
@cached_property
def _UpperCAmelCase (self ) -> Tuple:
'''simple docstring'''
return BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''' )
@slow
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
__lowercase = '''Hello World!'''
__lowercase = [65, 18536, 2260, 101, 66]
self.assertListEqual(_lowerCamelCase ,self.big_tokenizer.encode(_lowerCamelCase ) )
@slow
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
__lowercase = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
# fmt: off
__lowercase = [65, 871, 419, 358, 946, 991, 2521, 452, 358, 1357, 387, 7751, 3536, 112, 985, 456, 126, 865, 938, 5400, 5734, 458, 1368, 467, 786, 2462, 5246, 1159, 633, 865, 4519, 457, 582, 852, 2557, 427, 916, 508, 405, 34324, 497, 391, 408, 11342, 1244, 385, 100, 938, 985, 456, 574, 362, 12597, 3200, 3129, 1172, 66] # noqa: E231
# fmt: on
self.assertListEqual(_lowerCamelCase ,self.big_tokenizer.encode(_lowerCamelCase ) )
@require_torch
@slow
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
__lowercase = list(self.big_tokenizer.get_vocab().keys() )[:10]
__lowercase = ''' '''.join(_lowerCamelCase )
__lowercase = self.big_tokenizer.encode_plus(_lowerCamelCase ,return_tensors='''pt''' ,return_token_type_ids=_lowerCamelCase )
__lowercase = self.big_tokenizer.batch_encode_plus(
[sequence + ''' ''' + sequence] ,return_tensors='''pt''' ,return_token_type_ids=_lowerCamelCase )
__lowercase = BigBirdConfig(attention_type='''original_full''' )
__lowercase = BigBirdModel(_lowerCamelCase )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**_lowerCamelCase )
model(**_lowerCamelCase )
@slow
def _UpperCAmelCase (self ) -> int:
'''simple docstring'''
__lowercase = BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''' )
__lowercase = tokenizer.decode(tokenizer('''Paris is the [MASK].''' ).input_ids )
self.assertTrue(decoded_text == '''[CLS] Paris is the[MASK].[SEP]''' )
@slow
def _UpperCAmelCase (self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = {'''input_ids''': [[65, 39286, 458, 36335, 2001, 456, 13073, 13266, 455, 113, 7746, 1741, 11157, 391, 13073, 13266, 455, 113, 3967, 35412, 113, 4936, 109, 3870, 2377, 113, 30084, 45720, 458, 134, 17496, 112, 503, 11672, 113, 118, 112, 5665, 13347, 38687, 112, 1496, 31389, 112, 3268, 47264, 134, 962, 112, 16377, 8035, 23130, 430, 12169, 15518, 28592, 458, 146, 41697, 109, 391, 12169, 15518, 16689, 458, 146, 41358, 109, 452, 726, 4034, 111, 763, 35412, 5082, 388, 1903, 111, 9051, 391, 2870, 48918, 1900, 1123, 550, 998, 112, 9586, 15985, 455, 391, 410, 22955, 37636, 114, 66], [65, 448, 17496, 419, 3663, 385, 763, 113, 27533, 2870, 3283, 13043, 1639, 24713, 523, 656, 24013, 18550, 2521, 517, 27014, 21244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 11786, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 484, 2169, 7687, 21932, 18146, 726, 363, 17032, 3391, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowerCamelCase ,model_name='''google/bigbird-roberta-base''' ,revision='''215c99f1600e06f83acce68422f2035b2b5c3510''' ,)
| 217 |
'''simple docstring'''
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
_SCREAMING_SNAKE_CASE = {
'''169M''': 1_2,
'''430M''': 2_4,
'''1B5''': 2_4,
'''3B''': 3_2,
'''7B''': 3_2,
'''14B''': 4_0,
}
_SCREAMING_SNAKE_CASE = {
'''169M''': 7_6_8,
'''430M''': 1_0_2_4,
'''1B5''': 2_0_4_8,
'''3B''': 2_5_6_0,
'''7B''': 4_0_9_6,
'''14B''': 5_1_2_0,
}
def _lowerCAmelCase ( lowerCamelCase_ : Dict ):
__lowercase = list(state_dict.keys() )
for name in state_dict_keys:
__lowercase = state_dict.pop(lowerCamelCase_ )
# emb -> embedding
if name.startswith('''emb.''' ):
__lowercase = name.replace('''emb.''' , '''embeddings.''' )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith('''blocks.0.ln0''' ):
__lowercase = name.replace('''blocks.0.ln0''' , '''blocks.0.pre_ln''' )
# att -> attention
__lowercase = re.sub(r'''blocks\.(\d+)\.att''' , r'''blocks.\1.attention''' , lowerCamelCase_ )
# ffn -> feed_forward
__lowercase = re.sub(r'''blocks\.(\d+)\.ffn''' , r'''blocks.\1.feed_forward''' , lowerCamelCase_ )
# time_mix_k -> time_mix_key and reshape
if name.endswith('''.time_mix_k''' ):
__lowercase = name.replace('''.time_mix_k''' , '''.time_mix_key''' )
# time_mix_v -> time_mix_value and reshape
if name.endswith('''.time_mix_v''' ):
__lowercase = name.replace('''.time_mix_v''' , '''.time_mix_value''' )
# time_mix_r -> time_mix_key and reshape
if name.endswith('''.time_mix_r''' ):
__lowercase = name.replace('''.time_mix_r''' , '''.time_mix_receptance''' )
if name != "head.weight":
__lowercase = '''rwkv.''' + name
__lowercase = weight
return state_dict
def _lowerCAmelCase ( lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : List[str] , lowerCamelCase_ : List[Any]=None , lowerCamelCase_ : List[Any]=None , lowerCamelCase_ : Any=False , lowerCamelCase_ : int=None ):
# 1. If possible, build the tokenizer.
if tokenizer_file is None:
print('''No `--tokenizer_file` provided, we will use the default tokenizer.''' )
__lowercase = 5_0_2_7_7
__lowercase = AutoTokenizer.from_pretrained('''EleutherAI/gpt-neox-20b''' )
else:
__lowercase = PreTrainedTokenizerFast(tokenizer_file=lowerCamelCase_ )
__lowercase = len(lowerCamelCase_ )
tokenizer.save_pretrained(lowerCamelCase_ )
# 2. Build the config
__lowercase = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
__lowercase = candidate
break
if size is None:
raise ValueError('''Could not infer the size, please provide it with the `--size` argument.''' )
if size not in possible_sizes:
raise ValueError(f"`size` should be one of {possible_sizes}, got {size}." )
__lowercase = RwkvConfig(
vocab_size=lowerCamelCase_ , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(lowerCamelCase_ )
# 3. Download model file then convert state_dict
__lowercase = hf_hub_download(lowerCamelCase_ , lowerCamelCase_ )
__lowercase = torch.load(lowerCamelCase_ , map_location='''cpu''' )
__lowercase = convert_state_dict(lowerCamelCase_ )
# 4. Split in shards and save
__lowercase , __lowercase = shard_checkpoint(lowerCamelCase_ )
for shard_file, shard in shards.items():
torch.save(lowerCamelCase_ , os.path.join(lowerCamelCase_ , lowerCamelCase_ ) )
if index is not None:
__lowercase = os.path.join(lowerCamelCase_ , lowerCamelCase_ )
# Save the index as well
with open(lowerCamelCase_ , '''w''' , encoding='''utf-8''' ) as f:
__lowercase = json.dumps(lowerCamelCase_ , indent=2 , sort_keys=lowerCamelCase_ ) + '''\n'''
f.write(lowerCamelCase_ )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
'''Cleaning up shards. This may error with an OOM error, it this is the case don\'t worry you still have converted the model.''' )
__lowercase = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
__lowercase = torch.load(os.path.join(lowerCamelCase_ , lowerCamelCase_ ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(lowerCamelCase_ , lowerCamelCase_ ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError('''Please provide a `model_name` to push the model to the Hub.''' )
__lowercase = AutoModelForCausalLM.from_pretrained(lowerCamelCase_ )
model.push_to_hub(lowerCamelCase_ , max_shard_size='''2GB''' )
tokenizer.push_to_hub(lowerCamelCase_ )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--repo_id''', default=None, type=str, required=True, help='''Repo ID from which to pull the checkpoint.'''
)
parser.add_argument(
'''--checkpoint_file''', default=None, type=str, required=True, help='''Name of the checkpoint file in the repo.'''
)
parser.add_argument(
'''--output_dir''', default=None, type=str, required=True, help='''Where to save the converted model.'''
)
parser.add_argument(
'''--tokenizer_file''',
default=None,
type=str,
help='''Path to the tokenizer file to use (if not provided, only the model is converted).''',
)
parser.add_argument(
'''--size''',
default=None,
type=str,
help='''Size of the model. Will be inferred from the `checkpoint_file` if not passed.''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Push to the Hub the converted model.''',
)
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help='''Name of the pushed model on the Hub, including the username / organization.''',
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 217 | 1 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
SCREAMING_SNAKE_CASE_: int =logging.get_logger(__name__)
class __A ( UpperCamelCase__ ):
def __init__(self : Optional[int] , *__a : Optional[Any] , **__a : Dict ):
warnings.warn(
"The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use DonutImageProcessor instead." , __a , )
super().__init__(*__a , **__a )
| 1 | '''simple docstring'''
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : int ) -> str:
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError("the value of both inputs must be positive" )
UpperCAmelCase_ = str(bin(snake_case_ ) )[2:] # remove the leading "0b"
UpperCAmelCase_ = str(bin(snake_case_ ) )[2:]
UpperCAmelCase_ = max(len(snake_case_ ) , len(snake_case_ ) )
return "0b" + "".join(
str(int("1" in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(snake_case_ ) , b_binary.zfill(snake_case_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 1 | 1 |
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
_UpperCamelCase = (3, 9, -11, 0, 7, 5, 1, -1)
_UpperCamelCase = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class lowercase :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
class lowercase :
'''simple docstring'''
def __init__(self , __a ) -> None:
"""simple docstring"""
UpperCAmelCase__ = None
for i in sorted(__a , reverse=__a ):
UpperCAmelCase__ = Node(__a , self.head )
def __iter__(self ) -> Iterator[int]:
"""simple docstring"""
UpperCAmelCase__ = self.head
while node:
yield node.data
UpperCAmelCase__ = node.next_node
def __len__(self ) -> int:
"""simple docstring"""
return sum(1 for _ in self )
def __str__(self ) -> str:
"""simple docstring"""
return " -> ".join([str(__a ) for node in self] )
def UpperCamelCase_( snake_case__: SortedLinkedList , snake_case__: SortedLinkedList ) -> SortedLinkedList:
return SortedLinkedList(list(snake_case__ ) + list(snake_case__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
_UpperCamelCase = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 335 |
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class lowercase :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = field(
default="""codeparrot/codeparrot""" , metadata={"""help""": """Model name or path of model to be trained."""} )
__SCREAMING_SNAKE_CASE = field(
default="""./""" , metadata={"""help""": """Save dir where model repo is cloned and models updates are saved to."""} )
__SCREAMING_SNAKE_CASE = field(
default="""codeparrot/codeparrot-clean-train""" , metadata={"""help""": """Name or path of training dataset."""} )
__SCREAMING_SNAKE_CASE = field(
default="""codeparrot/codeparrot-clean-valid""" , metadata={"""help""": """Name or path of validation dataset."""} )
__SCREAMING_SNAKE_CASE = field(default=2 , metadata={"""help""": """Batch size for training."""} )
__SCREAMING_SNAKE_CASE = field(default=2 , metadata={"""help""": """Batch size for evaluation."""} )
__SCREAMING_SNAKE_CASE = field(default=0.1 , metadata={"""help""": """Value of weight decay."""} )
__SCREAMING_SNAKE_CASE = field(
default=10000 , metadata={"""help""": """Size of buffer used to shuffle streaming dataset."""} )
__SCREAMING_SNAKE_CASE = field(default=2E-4 , metadata={"""help""": """Learning rate fo training."""} )
__SCREAMING_SNAKE_CASE = field(default="""cosine""" , metadata={"""help""": """Learning rate."""} )
__SCREAMING_SNAKE_CASE = field(
default=750 , metadata={"""help""": """Number of warmup steps in the learning rate schedule."""} )
__SCREAMING_SNAKE_CASE = field(
default=16 , metadata={"""help""": """Number of gradient accumulation steps."""} )
__SCREAMING_SNAKE_CASE = field(
default=_UpperCamelCase , metadata={"""help""": """Use gradient checkpointing to reduce memory footprint."""} )
__SCREAMING_SNAKE_CASE = field(default=50000 , metadata={"""help""": """Maximum number of training steps."""} )
__SCREAMING_SNAKE_CASE = field(
default=-1 , metadata={"""help""": """Maximum number of evaluation steps. If -1 the full dataset is evaluated."""} )
__SCREAMING_SNAKE_CASE = field(default=1024 , metadata={"""help""": """Sequence lengths used for training."""} )
__SCREAMING_SNAKE_CASE = field(default=1 , metadata={"""help""": """Training seed."""} )
__SCREAMING_SNAKE_CASE = field(
default=1024 , metadata={"""help""": """Interval to save checkpoints. Measured as number of forward passes not training steps."""} , )
__SCREAMING_SNAKE_CASE = field(
default=_UpperCamelCase , metadata={"""help""": """States path if the training should continue from a checkpoint folder."""} )
__SCREAMING_SNAKE_CASE = field(default=_UpperCamelCase , metadata={"""help""": """If True the data is pretokenized."""} )
@dataclass
class lowercase :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = field(
default="""codeparrot/codeparrot""" , metadata={"""help""": """Model name or path of model to be evaluated."""} )
__SCREAMING_SNAKE_CASE = field(
default="""codeparrot/codeparrot-clean-valid""" , metadata={"""help""": """Name or path of validation dataset."""} )
__SCREAMING_SNAKE_CASE = field(default=2 , metadata={"""help""": """Batch size used for evaluation."""} )
__SCREAMING_SNAKE_CASE = field(
default=-1 , metadata={"""help""": """Maximum number of evaluation steps. If -1 the full dataset is evaluated."""} )
__SCREAMING_SNAKE_CASE = field(default=1024 , metadata={"""help""": """Length of sequences to be evaluated."""} )
__SCREAMING_SNAKE_CASE = field(default=1 , metadata={"""help""": """Random seed used for evaluation."""} )
@dataclass
class lowercase :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = field(
default="""codeparrot/codeparrot""" , metadata={"""help""": """Model name or path of model to be evaluated."""} )
__SCREAMING_SNAKE_CASE = field(default=_UpperCamelCase , metadata={"""help""": """Number of workers used for code evaluation."""} )
__SCREAMING_SNAKE_CASE = field(
default=_UpperCamelCase , metadata={"""help""": """The number of human-eval tasks to run. If not included all tasks are evaluated."""} , )
__SCREAMING_SNAKE_CASE = field(
default=_UpperCamelCase , metadata={"""help""": """Sample from the language model's output distribution."""} )
__SCREAMING_SNAKE_CASE = field(default=0.2 , metadata={"""help""": """Sampling temperature used for generation."""} )
__SCREAMING_SNAKE_CASE = field(default=256 , metadata={"""help""": """Maximum number of newly generated tokens."""} )
__SCREAMING_SNAKE_CASE = field(default=0 , metadata={"""help""": """Top-k parameter used for generation."""} )
__SCREAMING_SNAKE_CASE = field(default=0.95 , metadata={"""help""": """Top-p parameter used for nucleus sampling."""} )
__SCREAMING_SNAKE_CASE = field(default=10 , metadata={"""help""": """Number of generations to run in parallel."""} )
__SCREAMING_SNAKE_CASE = field(
default=200 , metadata={"""help""": """Number of completions to generate for each sample."""} )
__SCREAMING_SNAKE_CASE = field(default=1 , metadata={"""help""": """Random seed used for evaluation."""} )
__SCREAMING_SNAKE_CASE = field(
default="""eval_results.json""" , metadata={"""help""": """Random seed used for evaluation."""} )
__SCREAMING_SNAKE_CASE = field(
default="""0""" , metadata={"""help""": """Allow `code_eval` to execute Python code on machine"""} )
__SCREAMING_SNAKE_CASE = field(
default=-1 , metadata={
"""help""": (
"""Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive"""
""" number corresponds to which GPU device id to run on."""
)
} , )
@dataclass
class lowercase :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = field(
default=_UpperCamelCase , metadata={
"""help""": """The number of CPU cores to use for parallel preprocessing. Default uses the maximum available."""
} , )
__SCREAMING_SNAKE_CASE = field(
default="""transformersbook/codeparrot""" , metadata={"""help""": """Folder or name of dataset to process."""} )
__SCREAMING_SNAKE_CASE = field(
default="""codeparrot-clean""" , metadata={"""help""": """Folder to save processed processed dataset."""} )
__SCREAMING_SNAKE_CASE = field(
default=100000 , metadata={"""help""": """Number of files to save per JSON output file."""} )
__SCREAMING_SNAKE_CASE = field(default="""content""" , metadata={"""help""": """Column containing text data to process."""} )
__SCREAMING_SNAKE_CASE = field(
default=1000 , metadata={"""help""": """Maximum line length in file, otherwise file is filtered."""} )
__SCREAMING_SNAKE_CASE = field(
default=100 , metadata={"""help""": """Maximum mean line length in file, otherwise file is filtered."""} )
__SCREAMING_SNAKE_CASE = field(
default=0.25 , metadata={"""help""": """Maximum fraction of non-alphanumeric characters, otherwise file is filtered."""} )
__SCREAMING_SNAKE_CASE = field(
default=1.5 , metadata={"""help""": """Minimum character token ratio for the file, otherwise file is filtered."""} )
__SCREAMING_SNAKE_CASE = field(
default=0.7 , metadata={"""help""": """Probability for filtering config, test and uncommon files."""} )
__SCREAMING_SNAKE_CASE = field(
default="""codeparrot/codeparrot""" , metadata={"""help""": """Name or path to the tokenizer."""} , )
__SCREAMING_SNAKE_CASE = field(
default=_UpperCamelCase , metadata={"""help""": """If True, near-duplicate samples are removed."""} )
__SCREAMING_SNAKE_CASE = field(
default=0.85 , metadata={"""help""": """Jaccard threshold for near-duplicate samples."""} )
@dataclass
class lowercase :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = field(
default="""gpt2""" , metadata={"""help""": """Base tokenizer to build new tokenizer from."""} )
__SCREAMING_SNAKE_CASE = field(
default="""transformersbook/codeparrot-train""" , metadata={"""help""": """Dataset to train tokenizer on."""} )
__SCREAMING_SNAKE_CASE = field(default="""content""" , metadata={"""help""": """Column containing text data to process."""} )
__SCREAMING_SNAKE_CASE = field(default=200000 , metadata={"""help""": """Number of examples to train tokenizer on."""} )
__SCREAMING_SNAKE_CASE = field(
default=32768 , metadata={"""help""": """Number of examples to train the tokenizer on."""} )
__SCREAMING_SNAKE_CASE = field(default="""codeparrot""" , metadata={"""help""": """Name of new tokenizer."""} )
__SCREAMING_SNAKE_CASE = field(default=_UpperCamelCase , metadata={"""help""": """Push saved tokenizer to the hub."""} )
@dataclass
class lowercase :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = field(
default="""codeparrot/codeparrot""" , metadata={"""help""": """Name or path to the tokenizer."""} )
__SCREAMING_SNAKE_CASE = field(
default="""codeparrot/codeparrot-clean-train""" , metadata={"""help""": """Name or path to the dataset to pretokenize."""} )
__SCREAMING_SNAKE_CASE = field(
default="""tokenized-codeparrot-train""" , metadata={"""help""": """Repo name of the pretokenized data."""} )
__SCREAMING_SNAKE_CASE = field(default=_UpperCamelCase , metadata={"""help""": """Number of workers used for code evaluation."""} )
@dataclass
class lowercase :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = field(
default="""gpt2-large""" , metadata={"""help""": """Configuration to use for model initialization."""} )
__SCREAMING_SNAKE_CASE = field(
default="""codeparrot/codeparrot""" , metadata={"""help""": """Tokenizer attached to model."""} )
__SCREAMING_SNAKE_CASE = field(default="""codeparrot""" , metadata={"""help""": """Name of the created model."""} )
__SCREAMING_SNAKE_CASE = field(default=_UpperCamelCase , metadata={"""help""": """Push saved tokenizer to the hub."""} )
| 335 | 1 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __lowerCamelCase ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
A_ : Tuple = XLMTokenizer
A_ : List[str] = False
def _UpperCAmelCase ( self ) -> Union[str, Any]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_a = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
_a = dict(zip(UpperCAmelCase__ , range(len(UpperCAmelCase__ ) ) ) )
_a = ["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""]
_a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' ) as fp:
fp.write(json.dumps(UpperCAmelCase__ ) )
with open(self.merges_file , '''w''' ) as fp:
fp.write('''\n'''.join(UpperCAmelCase__ ) )
def _UpperCAmelCase ( self , __UpperCAmelCase ) -> Optional[Any]:
_a = """lower newer"""
_a = """lower newer"""
return input_text, output_text
def _UpperCAmelCase ( self ) -> Union[str, Any]:
_a = XLMTokenizer(self.vocab_file , self.merges_file )
_a = """lower"""
_a = ["""low""", """er</w>"""]
_a = tokenizer.tokenize(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
_a = tokens + ["""<unk>"""]
_a = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , UpperCAmelCase__ )
@slow
def _UpperCAmelCase ( self ) -> Dict:
_a = XLMTokenizer.from_pretrained('''xlm-mlm-en-2048''' )
_a = tokenizer.encode('''sequence builders''' , add_special_tokens=UpperCAmelCase__ )
_a = tokenizer.encode('''multi-sequence build''' , add_special_tokens=UpperCAmelCase__ )
_a = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase__ )
_a = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase__ , UpperCAmelCase__ )
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1] | 320 |
"""simple docstring"""
from __future__ import annotations
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
print(F"""Vertex\tShortest Distance from vertex {src}""" )
for i, d in enumerate(UpperCamelCase__ ):
print(F"""{i}\t\t{d}""" )
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
for j in range(UpperCamelCase__ ):
_a , _a , _a : List[str] = (graph[j][k] for k in ["""src""", """dst""", """weight"""])
if distance[u] != float("""inf""" ) and distance[u] + w < distance[v]:
return True
return False
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_a : Dict = [float("""inf""" )] * vertex_count
_a : Any = 0.0
for _ in range(vertex_count - 1 ):
for j in range(UpperCamelCase__ ):
_a , _a , _a : List[Any] = (graph[j][k] for k in ["""src""", """dst""", """weight"""])
if distance[u] != float("""inf""" ) and distance[u] + w < distance[v]:
_a : Any = distance[u] + w
_a : Union[str, Any] = check_negative_cycle(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if negative_cycle_exists:
raise Exception("""Negative cycle found""" )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
_snake_case = int(input('Enter number of vertices: ').strip())
_snake_case = int(input('Enter number of edges: ').strip())
_snake_case = [{} for _ in range(E)]
for i in range(E):
print('Edge ', i + 1)
_snake_case , _snake_case , _snake_case = (
int(x)
for x in input('Enter source, destination, weight: ').strip().split(' ')
)
_snake_case = {'src': src, 'dst': dest, 'weight': weight}
_snake_case = int(input('\nEnter shortest path source:').strip())
_snake_case = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 294 | 0 |
import os
from pathlib import Path
import numpy as np
import pytest
from pack_dataset import pack_data_dir
from parameterized import parameterized
from save_len_file import save_len_file
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
from transformers.models.mbart.modeling_mbart import shift_tokens_right
from transformers.testing_utils import TestCasePlus, slow
from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset
__lowercase = '''bert-base-cased'''
__lowercase = '''google/pegasus-xsum'''
__lowercase = [''' Sam ate lunch today.''', '''Sams lunch ingredients.''']
__lowercase = ['''A very interesting story about what I ate for lunch.''', '''Avocado, celery, turkey, coffee''']
__lowercase = '''patrickvonplaten/t5-tiny-random'''
__lowercase = '''sshleifer/bart-tiny-random'''
__lowercase = '''sshleifer/tiny-mbart'''
__lowercase = '''sshleifer/tiny-marian-en-de'''
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :str = '''\n'''.join(SCREAMING_SNAKE_CASE )
Path(SCREAMING_SNAKE_CASE ).open('''w''' ).writelines(SCREAMING_SNAKE_CASE )
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
for split in ["train", "val", "test"]:
_dump_articles(os.path.join(SCREAMING_SNAKE_CASE , f"""{split}.source""" ) , SCREAMING_SNAKE_CASE )
_dump_articles(os.path.join(SCREAMING_SNAKE_CASE , f"""{split}.target""" ) , SCREAMING_SNAKE_CASE )
return tmp_dir
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
@slow
def UpperCamelCase__ ( self , __lowercase) -> List[Any]:
__UpperCamelCase :Dict = AutoTokenizer.from_pretrained(__lowercase)
__UpperCamelCase :Optional[Any] = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir())
__UpperCamelCase :List[Any] = max(len(tokenizer.encode(__lowercase)) for a in ARTICLES)
__UpperCamelCase :Optional[int] = max(len(tokenizer.encode(__lowercase)) for a in SUMMARIES)
__UpperCamelCase :int = 4
__UpperCamelCase :Any = 8
assert max_len_target > max_src_len # Will be truncated
assert max_len_source > max_src_len # Will be truncated
__UpperCamelCase , __UpperCamelCase :Tuple = '''ro_RO''', '''de_DE''' # ignored for all but mbart, but never causes error.
__UpperCamelCase :str = SeqaSeqDataset(
__lowercase , data_dir=__lowercase , type_path='''train''' , max_source_length=__lowercase , max_target_length=__lowercase , src_lang=__lowercase , tgt_lang=__lowercase , )
__UpperCamelCase :Any = DataLoader(__lowercase , batch_size=2 , collate_fn=train_dataset.collate_fn)
for batch in dataloader:
assert isinstance(__lowercase , __lowercase)
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_src_len
# show that targets are the same len
assert batch["labels"].shape[1] == max_tgt_len
if tok_name != MBART_TINY:
continue
# check language codes in correct place
__UpperCamelCase :Optional[int] = shift_tokens_right(batch['''labels'''] , tokenizer.pad_token_id)
assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang]
assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang]
break # No need to test every batch
@parameterized.expand([BART_TINY, BERT_BASE_CASED])
def UpperCamelCase__ ( self , __lowercase) -> int:
__UpperCamelCase :Union[str, Any] = AutoTokenizer.from_pretrained(__lowercase)
__UpperCamelCase :Union[str, Any] = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir())
__UpperCamelCase :int = max(len(tokenizer.encode(__lowercase)) for a in ARTICLES)
__UpperCamelCase :Dict = max(len(tokenizer.encode(__lowercase)) for a in SUMMARIES)
__UpperCamelCase :Union[str, Any] = 4
__UpperCamelCase :List[str] = LegacySeqaSeqDataset(
__lowercase , data_dir=__lowercase , type_path='''train''' , max_source_length=20 , max_target_length=__lowercase , )
__UpperCamelCase :Dict = DataLoader(__lowercase , batch_size=2 , collate_fn=train_dataset.collate_fn)
for batch in dataloader:
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_len_source
assert 20 >= batch["input_ids"].shape[1] # trimmed significantly
# show that targets were truncated
assert batch["labels"].shape[1] == trunc_target # Truncated
assert max_len_target > trunc_target # Truncated
break # No need to test every batch
def UpperCamelCase__ ( self) -> Dict:
__UpperCamelCase :List[Any] = AutoTokenizer.from_pretrained('''facebook/mbart-large-cc25''')
__UpperCamelCase :Union[str, Any] = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir()))
__UpperCamelCase :str = tmp_dir.joinpath('''train.source''').open().readlines()
__UpperCamelCase :int = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir()))
pack_data_dir(__lowercase , __lowercase , 128 , __lowercase)
__UpperCamelCase :Union[str, Any] = {x.name for x in tmp_dir.iterdir()}
__UpperCamelCase :int = {x.name for x in save_dir.iterdir()}
__UpperCamelCase :Optional[int] = save_dir.joinpath('''train.source''').open().readlines()
# orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.']
# desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.']
assert len(__lowercase) < len(__lowercase)
assert len(__lowercase) == 1
assert len(packed_examples[0]) == sum(len(__lowercase) for x in orig_examples)
assert orig_paths == new_paths
@pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason='''This test requires fairseq''')
def UpperCamelCase__ ( self) -> List[Any]:
if not FAIRSEQ_AVAILABLE:
return
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :Any = self._get_dataset(max_len=64)
__UpperCamelCase :Union[str, Any] = 64
__UpperCamelCase :Tuple = ds.make_dynamic_sampler(__lowercase , required_batch_size_multiple=__lowercase)
__UpperCamelCase :List[str] = [len(__lowercase) for x in batch_sampler]
assert len(set(__lowercase)) > 1 # it's not dynamic batch size if every batch is the same length
assert sum(__lowercase) == len(__lowercase) # no dropped or added examples
__UpperCamelCase :int = DataLoader(__lowercase , batch_sampler=__lowercase , collate_fn=ds.collate_fn , num_workers=2)
__UpperCamelCase :List[str] = []
__UpperCamelCase :int = []
for batch in data_loader:
__UpperCamelCase :List[Any] = batch['''input_ids'''].shape
__UpperCamelCase :Dict = src_shape[0]
assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple
__UpperCamelCase :Optional[int] = np.product(batch['''input_ids'''].shape)
num_src_per_batch.append(__lowercase)
if num_src_tokens > (max_tokens * 1.1):
failures.append(__lowercase)
assert num_src_per_batch[0] == max(__lowercase)
if failures:
raise AssertionError(f"""too many tokens in {len(__lowercase)} batches""")
def UpperCamelCase__ ( self) -> Tuple:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :Optional[Any] = self._get_dataset(max_len=512)
__UpperCamelCase :Any = 2
__UpperCamelCase :List[Any] = ds.make_sortish_sampler(__lowercase , shuffle=__lowercase)
__UpperCamelCase :List[Any] = DataLoader(__lowercase , batch_size=__lowercase , collate_fn=ds.collate_fn , num_workers=2)
__UpperCamelCase :Tuple = DataLoader(__lowercase , batch_size=__lowercase , collate_fn=ds.collate_fn , num_workers=2 , sampler=__lowercase)
__UpperCamelCase :int = tokenizer.pad_token_id
def count_pad_tokens(__lowercase , __lowercase="input_ids"):
return [batch[k].eq(__lowercase).sum().item() for batch in data_loader]
assert sum(count_pad_tokens(__lowercase , k='''labels''')) < sum(count_pad_tokens(__lowercase , k='''labels'''))
assert sum(count_pad_tokens(__lowercase)) < sum(count_pad_tokens(__lowercase))
assert len(__lowercase) == len(__lowercase)
def UpperCamelCase__ ( self , __lowercase=1_000 , __lowercase=128) -> List[Any]:
if os.getenv('''USE_REAL_DATA''' , __lowercase):
__UpperCamelCase :Optional[Any] = '''examples/seq2seq/wmt_en_ro'''
__UpperCamelCase :Dict = max_len * 2 * 64
if not Path(__lowercase).joinpath('''train.len''').exists():
save_len_file(__lowercase , __lowercase)
else:
__UpperCamelCase :Union[str, Any] = '''examples/seq2seq/test_data/wmt_en_ro'''
__UpperCamelCase :Optional[int] = max_len * 4
save_len_file(__lowercase , __lowercase)
__UpperCamelCase :str = AutoTokenizer.from_pretrained(__lowercase)
__UpperCamelCase :List[Any] = SeqaSeqDataset(
__lowercase , data_dir=__lowercase , type_path='''train''' , max_source_length=__lowercase , max_target_length=__lowercase , n_obs=__lowercase , )
return ds, max_tokens, tokenizer
def UpperCamelCase__ ( self) -> Tuple:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :List[Any] = self._get_dataset()
__UpperCamelCase :List[str] = set(DistributedSortishSampler(__lowercase , 256 , num_replicas=2 , rank=0 , add_extra_examples=__lowercase))
__UpperCamelCase :Tuple = set(DistributedSortishSampler(__lowercase , 256 , num_replicas=2 , rank=1 , add_extra_examples=__lowercase))
assert idsa.intersection(__lowercase) == set()
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
def UpperCamelCase__ ( self , __lowercase) -> List[Any]:
__UpperCamelCase :List[Any] = AutoTokenizer.from_pretrained(__lowercase , use_fast=__lowercase)
if tok_name == MBART_TINY:
__UpperCamelCase :Optional[Any] = SeqaSeqDataset(
__lowercase , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir()) , type_path='''train''' , max_source_length=4 , max_target_length=8 , src_lang='''EN''' , tgt_lang='''FR''' , )
__UpperCamelCase :Tuple = train_dataset.dataset_kwargs
assert "src_lang" in kwargs and "tgt_lang" in kwargs
else:
__UpperCamelCase :Tuple = SeqaSeqDataset(
__lowercase , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir()) , type_path='''train''' , max_source_length=4 , max_target_length=8 , )
__UpperCamelCase :Optional[int] = train_dataset.dataset_kwargs
assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs
assert len(__lowercase) == 1 if tok_name == BART_TINY else len(__lowercase) == 0
| 105 | import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Optional[int] = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase :List[str] = emb.weight.shape
__UpperCamelCase :str = nn.Linear(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , bias=SCREAMING_SNAKE_CASE )
__UpperCamelCase :Any = emb.weight.data
return lin_layer
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Dict = torch.load(SCREAMING_SNAKE_CASE , map_location='''cpu''' )
__UpperCamelCase :Tuple = mam_aaa['''args'''] or mam_aaa['''cfg''']['''model''']
__UpperCamelCase :Dict = mam_aaa['''model''']
remove_ignore_keys_(SCREAMING_SNAKE_CASE )
__UpperCamelCase :Dict = state_dict['''encoder.embed_tokens.weight'''].shape[0]
__UpperCamelCase :Dict = MaMaaaConfig(
vocab_size=SCREAMING_SNAKE_CASE , max_position_embeddings=1_024 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='''relu''' , )
__UpperCamelCase :Tuple = state_dict['''decoder.embed_tokens.weight''']
__UpperCamelCase :int = MaMaaaForConditionalGeneration(SCREAMING_SNAKE_CASE )
model.model.load_state_dict(SCREAMING_SNAKE_CASE , strict=SCREAMING_SNAKE_CASE )
__UpperCamelCase :Optional[Any] = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''fairseq_path''', type=str, help='''path to a model.pt on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
__lowercase = parser.parse_args()
__lowercase = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 105 | 1 |
import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase_ ( a , unittest.TestCase):
lowerCamelCase__ = FunnelTokenizer
lowerCamelCase__ = FunnelTokenizerFast
lowerCamelCase__ = True
lowerCamelCase__ = True
def snake_case__ ( self):
'''simple docstring'''
super().setUp()
_lowerCAmelCase : Any = [
"<unk>",
"<cls>",
"<sep>",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
_lowerCAmelCase : str = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"])
with open(self.vocab_file, "w", encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
def snake_case__ ( self, **__a):
'''simple docstring'''
return FunnelTokenizer.from_pretrained(self.tmpdirname, **__a)
def snake_case__ ( self, **__a):
'''simple docstring'''
return FunnelTokenizerFast.from_pretrained(self.tmpdirname, **__a)
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = "UNwant\u00E9d,running"
_lowerCAmelCase : Tuple = "unwanted, running"
return input_text, output_text
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.tokenizer_class(self.vocab_file)
_lowerCAmelCase : Dict = tokenizer.tokenize("UNwant\u00E9d,running")
self.assertListEqual(__a, ["un", "##want", "##ed", ",", "runn", "##ing"])
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a), [7, 4, 5, 10, 8, 9])
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.get_tokenizers(do_lower_case=__a)
for tokenizer in tokenizers:
_lowerCAmelCase : Optional[Any] = tokenizer("UNwant\u00E9d,running")
_lowerCAmelCase : Union[str, Any] = len(inputs["input_ids"]) - 1
self.assertListEqual(inputs["token_type_ids"], [2] + [0] * sentence_len)
_lowerCAmelCase : Any = tokenizer("UNwant\u00E9d,running", "UNwant\u00E9d,running")
self.assertListEqual(inputs["token_type_ids"], [2] + [0] * sentence_len + [1] * sentence_len)
| 36 |
import pickle
import numpy as np
from matplotlib import pyplot as plt
class A__ :
def __init__( self , A_ , A_ , A_ , A_ , A_ , A_=0.2 , A_=0.2 ):
'''simple docstring'''
UpperCamelCase : int = bp_numa
UpperCamelCase : int = bp_numa
UpperCamelCase : List[Any] = bp_numa
UpperCamelCase : Optional[int] = conva_get[:2]
UpperCamelCase : Optional[Any] = conva_get[2]
UpperCamelCase : Dict = size_pa
UpperCamelCase : Union[str, Any] = rate_w
UpperCamelCase : Dict = rate_t
UpperCamelCase : Union[str, Any] = [
np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
UpperCamelCase : Any = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
UpperCamelCase : List[Any] = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
UpperCamelCase : Optional[Any] = -2 * np.random.rand(self.conva[1] ) + 1
UpperCamelCase : Any = -2 * np.random.rand(self.num_bpa ) + 1
UpperCamelCase : int = -2 * np.random.rand(self.num_bpa ) + 1
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : Optional[int] = {
"num_bp1": self.num_bpa,
"num_bp2": self.num_bpa,
"num_bp3": self.num_bpa,
"conv1": self.conva,
"step_conv1": self.step_conva,
"size_pooling1": self.size_poolinga,
"rate_weight": self.rate_weight,
"rate_thre": self.rate_thre,
"w_conv1": self.w_conva,
"wkj": self.wkj,
"vji": self.vji,
"thre_conv1": self.thre_conva,
"thre_bp2": self.thre_bpa,
"thre_bp3": self.thre_bpa,
}
with open(A_ , "wb" ) as f:
pickle.dump(A_ , A_ )
print(F"""Model saved: {save_path}""" )
@classmethod
def __UpperCamelCase( cls , A_ ):
'''simple docstring'''
with open(A_ , "rb" ) as f:
UpperCamelCase : Optional[Any] = pickle.load(A_ ) # noqa: S301
UpperCamelCase : List[Any] = model_dic.get("conv1" )
conv_get.append(model_dic.get("step_conv1" ) )
UpperCamelCase : Union[str, Any] = model_dic.get("size_pooling1" )
UpperCamelCase : List[Any] = model_dic.get("num_bp1" )
UpperCamelCase : Dict = model_dic.get("num_bp2" )
UpperCamelCase : Dict = model_dic.get("num_bp3" )
UpperCamelCase : Dict = model_dic.get("rate_weight" )
UpperCamelCase : str = model_dic.get("rate_thre" )
# create model instance
UpperCamelCase : Any = CNN(A_ , A_ , A_ , A_ , A_ , A_ , A_ )
# modify model parameter
UpperCamelCase : str = model_dic.get("w_conv1" )
UpperCamelCase : Optional[Any] = model_dic.get("wkj" )
UpperCamelCase : int = model_dic.get("vji" )
UpperCamelCase : Any = model_dic.get("thre_conv1" )
UpperCamelCase : Optional[int] = model_dic.get("thre_bp2" )
UpperCamelCase : Union[str, Any] = model_dic.get("thre_bp3" )
return conv_ins
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
return 1 / (1 + np.exp(-1 * x ))
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
return round(A_ , 3 )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : str = convs[0]
UpperCamelCase : Optional[Any] = convs[1]
UpperCamelCase : Optional[Any] = np.shape(A_ )[0]
# get the data slice of original image data, data_focus
UpperCamelCase : List[str] = []
for i_focus in range(0 , size_data - size_conv + 1 , A_ ):
for j_focus in range(0 , size_data - size_conv + 1 , A_ ):
UpperCamelCase : Union[str, Any] = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(A_ )
# calculate the feature map of every single kernel, and saved as list of matrix
UpperCamelCase : int = []
UpperCamelCase : Optional[Any] = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(A_ ):
UpperCamelCase : str = []
for i_focus in range(len(A_ ) ):
UpperCamelCase : List[Any] = (
np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(A_ ) )
UpperCamelCase : Optional[int] = np.asmatrix(A_ ).reshape(
A_ , A_ )
data_featuremap.append(A_ )
# expanding the data slice to One dimenssion
UpperCamelCase : List[Any] = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(A_ ) )
UpperCamelCase : Tuple = np.asarray(A_ )
return focus_list, data_featuremap
def __UpperCamelCase( self , A_ , A_ , A_="average_pool" ):
'''simple docstring'''
UpperCamelCase : Any = len(featuremaps[0] )
UpperCamelCase : str = int(size_map / size_pooling )
UpperCamelCase : Optional[int] = []
for i_map in range(len(A_ ) ):
UpperCamelCase : Tuple = featuremaps[i_map]
UpperCamelCase : Any = []
for i_focus in range(0 , A_ , A_ ):
for j_focus in range(0 , A_ , A_ ):
UpperCamelCase : int = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(A_ ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(A_ ) )
UpperCamelCase : Optional[Any] = np.asmatrix(A_ ).reshape(A_ , A_ )
featuremap_pooled.append(A_ )
return featuremap_pooled
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : List[Any] = []
for i in range(len(A_ ) ):
UpperCamelCase : List[Any] = np.shape(data[i] )
UpperCamelCase : str = data[i].reshape(1 , shapes[0] * shapes[1] )
UpperCamelCase : Optional[int] = data_listed.getA().tolist()[0]
data_expanded.extend(A_ )
UpperCamelCase : Any = np.asarray(A_ )
return data_expanded
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : List[Any] = np.asarray(A_ )
UpperCamelCase : List[Any] = np.shape(A_ )
UpperCamelCase : Any = data_mat.reshape(1 , shapes[0] * shapes[1] )
return data_expanded
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : int = []
UpperCamelCase : Optional[int] = 0
for i_map in range(A_ ):
UpperCamelCase : int = np.ones((size_map, size_map) )
for i in range(0 , A_ , A_ ):
for j in range(0 , A_ , A_ ):
UpperCamelCase : str = pd_pool[
i_pool
]
UpperCamelCase : str = i_pool + 1
UpperCamelCase : str = np.multiply(
A_ , np.multiply(out_map[i_map] , (1 - out_map[i_map]) ) )
pd_all.append(A_ )
return pd_all
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_=bool ):
'''simple docstring'''
print("----------------------Start Training-------------------------" )
print((" - - Shape: Train_Data ", np.shape(A_ )) )
print((" - - Shape: Teach_Data ", np.shape(A_ )) )
UpperCamelCase : List[str] = 0
UpperCamelCase : Union[str, Any] = []
UpperCamelCase : int = 1_0000
while rp < n_repeat and mse >= error_accuracy:
UpperCamelCase : Tuple = 0
print(F"""-------------Learning Time {rp}--------------""" )
for p in range(len(A_ ) ):
# print('------------Learning Image: %d--------------'%p)
UpperCamelCase : Any = np.asmatrix(datas_train[p] )
UpperCamelCase : List[str] = np.asarray(datas_teach[p] )
UpperCamelCase , UpperCamelCase : Dict = self.convolute(
A_ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
UpperCamelCase : Tuple = self.pooling(A_ , self.size_poolinga )
UpperCamelCase : int = np.shape(A_ )
UpperCamelCase : List[str] = self._expand(A_ )
UpperCamelCase : Optional[int] = data_bp_input
UpperCamelCase : str = np.dot(A_ , self.vji.T ) - self.thre_bpa
UpperCamelCase : Optional[int] = self.sig(A_ )
UpperCamelCase : List[Any] = np.dot(A_ , self.wkj.T ) - self.thre_bpa
UpperCamelCase : Dict = self.sig(A_ )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
UpperCamelCase : List[Any] = np.multiply(
(data_teach - bp_outa) , np.multiply(A_ , (1 - bp_outa) ) )
UpperCamelCase : str = np.multiply(
np.dot(A_ , self.wkj ) , np.multiply(A_ , (1 - bp_outa) ) )
UpperCamelCase : Any = np.dot(A_ , self.vji )
UpperCamelCase : Dict = pd_i_all / (self.size_poolinga * self.size_poolinga)
UpperCamelCase : List[Any] = pd_conva_pooled.T.getA().tolist()
UpperCamelCase : List[Any] = self._calculate_gradient_from_pool(
A_ , A_ , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
UpperCamelCase : List[Any] = self._expand_mat(pd_conva_all[k_conv] )
UpperCamelCase : List[Any] = self.rate_weight * np.dot(A_ , A_ )
UpperCamelCase : str = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
UpperCamelCase : Dict = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
UpperCamelCase : Optional[Any] = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
UpperCamelCase : List[Any] = self.vji + pd_j_all.T * bp_outa * self.rate_weight
UpperCamelCase : Optional[Any] = self.thre_bpa - pd_k_all * self.rate_thre
UpperCamelCase : List[str] = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
UpperCamelCase : List[Any] = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
UpperCamelCase : Any = rp + 1
UpperCamelCase : Union[str, Any] = error_count / patterns
all_mse.append(A_ )
def draw_error():
UpperCamelCase : Tuple = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(A_ , "+-" )
plt.plot(A_ , "r--" )
plt.xlabel("Learning Times" )
plt.ylabel("All_mse" )
plt.grid(A_ , alpha=0.5 )
plt.show()
print("------------------Training Complished---------------------" )
print((" - - Training epoch: ", rp, F""" - - Mse: {mse:.6f}""") )
if draw_e:
draw_error()
return mse
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = []
print("-------------------Start Testing-------------------------" )
print((" - - Shape: Test_Data ", np.shape(A_ )) )
for p in range(len(A_ ) ):
UpperCamelCase : int = np.asmatrix(datas_test[p] )
UpperCamelCase , UpperCamelCase : Any = self.convolute(
A_ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
UpperCamelCase : List[str] = self.pooling(A_ , self.size_poolinga )
UpperCamelCase : Dict = self._expand(A_ )
UpperCamelCase : List[Any] = data_bp_input
UpperCamelCase : Any = bp_outa * self.vji.T - self.thre_bpa
UpperCamelCase : List[Any] = self.sig(A_ )
UpperCamelCase : int = bp_outa * self.wkj.T - self.thre_bpa
UpperCamelCase : Optional[int] = self.sig(A_ )
produce_out.extend(bp_outa.getA().tolist() )
UpperCamelCase : List[str] = [list(map(self.do_round , A_ ) ) for each in produce_out]
return np.asarray(A_ )
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = np.asmatrix(A_ )
UpperCamelCase , UpperCamelCase : List[Any] = self.convolute(
A_ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
UpperCamelCase : str = self.pooling(A_ , self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 52 | 0 |
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
lowercase : Union[str, Any] = logging.getLogger(__name__)
lowercase : Optional[Any] = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
lowercase : Any = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class A__ :
"""simple docstring"""
__A : Optional[str] = field(
default=__lowercase , metadata={
'''help''': (
'''The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'''
)
} , )
__A : Optional[str] = field(
default=__lowercase , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(__lowercase )} , )
__A : Optional[str] = field(
default=__lowercase , metadata={
'''help''': (
'''Override some existing default config settings when a model is trained from scratch. Example: '''
'''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'''
)
} , )
__A : Optional[str] = field(
default=__lowercase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
__A : Optional[str] = field(
default=__lowercase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
__A : Optional[str] = field(
default=__lowercase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
__A : bool = field(
default=__lowercase , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , )
__A : str = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
__A : bool = field(
default=__lowercase , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
def __lowercase ( self) -> Any:
'''simple docstring'''
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
'--config_overrides can\'t be used in combination with --config_name or --model_name_or_path')
@dataclass
class A__ :
"""simple docstring"""
__A : Optional[str] = field(
default=__lowercase , metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} )
__A : Optional[str] = field(
default=__lowercase , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
__A : Optional[str] = field(default=__lowercase , metadata={'''help''': '''The input training data file (a text file).'''} )
__A : Optional[str] = field(
default=__lowercase , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , )
__A : Optional[str] = field(
default=__lowercase , metadata={'''help''': '''An optional input train ref data file for whole word masking in Chinese.'''} , )
__A : Optional[str] = field(
default=__lowercase , metadata={'''help''': '''An optional input validation ref data file for whole word masking in Chinese.'''} , )
__A : bool = field(
default=__lowercase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
__A : Optional[int] = field(
default=5 , metadata={
'''help''': '''The percentage of the train set used as validation set in case there\'s no validation split'''
} , )
__A : Optional[int] = field(
default=__lowercase , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated. Default to the max input length of the model.'''
)
} , )
__A : Optional[int] = field(
default=__lowercase , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , )
__A : float = field(
default=0.15 , metadata={'''help''': '''Ratio of tokens to mask for masked language modeling loss'''} )
__A : bool = field(
default=__lowercase , metadata={
'''help''': (
'''Whether to pad all samples to `max_seq_length`. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch.'''
)
} , )
def __lowercase ( self) -> int:
'''simple docstring'''
if self.train_file is not None:
a__ : Optional[int] = self.train_file.split('.')[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
a__ : List[Any] = self.validation_file.split('.')[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def A_ ( A__ , A__ ) -> str:
with open(A__ , 'r' , encoding='utf-8' ) as f:
a__ : str = [json.loads(A__ ) for line in f.read().splitlines() if (len(A__ ) > 0 and not line.isspace())]
assert len(A__ ) == len(A__ )
a__ : Optional[int] = {c: dataset[c] for c in dataset.column_names}
a__ : Any = refs
return Dataset.from_dict(A__ )
def A_ ( ) -> List[str]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
a__ : Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
a__ , a__ , a__ : str = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
a__ , a__ , a__ : Dict = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
a__ : Optional[int] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
a__ : Optional[int] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , A__ )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
a__ : Any = load_dataset(data_args.dataset_name , data_args.dataset_config_name )
if "validation" not in datasets.keys():
a__ : List[str] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'train[:{data_args.validation_split_percentage}%]' , )
a__ : Dict = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'train[{data_args.validation_split_percentage}%:]' , )
else:
a__ : Any = {}
if data_args.train_file is not None:
a__ : int = data_args.train_file
if data_args.validation_file is not None:
a__ : Tuple = data_args.validation_file
a__ : Optional[Any] = data_args.train_file.split('.' )[-1]
if extension == "txt":
a__ : Optional[Any] = 'text'
a__ : int = load_dataset(A__ , data_files=A__ )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
a__ : Tuple = {
'cache_dir': model_args.cache_dir,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.config_name:
a__ : Union[str, Any] = AutoConfig.from_pretrained(model_args.config_name , **A__ )
elif model_args.model_name_or_path:
a__ : Any = AutoConfig.from_pretrained(model_args.model_name_or_path , **A__ )
else:
a__ : Optional[int] = CONFIG_MAPPING[model_args.model_type]()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.config_overrides is not None:
logger.info(F'Overriding config: {model_args.config_overrides}' )
config.update_from_string(model_args.config_overrides )
logger.info(F'New config: {config}' )
a__ : Optional[Any] = {
'cache_dir': model_args.cache_dir,
'use_fast': model_args.use_fast_tokenizer,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
a__ : int = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **A__ )
elif model_args.model_name_or_path:
a__ : str = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **A__ )
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported by this script.'
'You can do it from another script, save it, and load it from here, using --tokenizer_name.' )
if model_args.model_name_or_path:
a__ : Union[str, Any] = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=A__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('Training new model from scratch' )
a__ : Optional[Any] = AutoModelForMaskedLM.from_config(A__ )
model.resize_token_embeddings(len(A__ ) )
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
a__ : Dict = datasets['train'].column_names
else:
a__ : Dict = datasets['validation'].column_names
a__ : List[Any] = 'text' if 'text' in column_names else column_names[0]
a__ : List[str] = 'max_length' if data_args.pad_to_max_length else False
def tokenize_function(A__ ):
# Remove empty lines
a__ : Optional[int] = [line for line in examples['text'] if len(A__ ) > 0 and not line.isspace()]
return tokenizer(examples['text'] , padding=A__ , truncation=A__ , max_length=data_args.max_seq_length )
a__ : int = datasets.map(
A__ , batched=A__ , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
a__ : Any = add_chinese_references(tokenized_datasets['train'] , data_args.train_ref_file )
if data_args.validation_ref_file is not None:
a__ : Union[str, Any] = add_chinese_references(
tokenized_datasets['validation'] , data_args.validation_ref_file )
# If we have ref files, need to avoid it removed by trainer
a__ : Optional[int] = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
a__ : Any = False
# Data collator
# This one will take care of randomly masking the tokens.
a__ : int = DataCollatorForWholeWordMask(tokenizer=A__ , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
a__ : Optional[int] = Trainer(
model=A__ , args=A__ , train_dataset=tokenized_datasets['train'] if training_args.do_train else None , eval_dataset=tokenized_datasets['validation'] if training_args.do_eval else None , tokenizer=A__ , data_collator=A__ , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
a__ : Dict = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ):
a__ : Any = model_args.model_name_or_path
else:
a__ : Dict = None
a__ : Optional[int] = trainer.train(resume_from_checkpoint=A__ )
trainer.save_model() # Saves the tokenizer too for easy upload
a__ : Dict = os.path.join(training_args.output_dir , 'train_results.txt' )
if trainer.is_world_process_zero():
with open(A__ , 'w' ) as writer:
logger.info('***** Train results *****' )
for key, value in sorted(train_result.metrics.items() ):
logger.info(F' {key} = {value}' )
writer.write(F'{key} = {value}\n' )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , 'trainer_state.json' ) )
# Evaluation
a__ : Optional[int] = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
a__ : Optional[Any] = trainer.evaluate()
a__ : int = math.exp(eval_output['eval_loss'] )
a__ : Any = perplexity
a__ : Union[str, Any] = os.path.join(training_args.output_dir , 'eval_results_mlm_wwm.txt' )
if trainer.is_world_process_zero():
with open(A__ , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in sorted(results.items() ):
logger.info(F' {key} = {value}' )
writer.write(F'{key} = {value}\n' )
return results
def A_ ( A__ ) -> Any:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 361 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowercase : List[str] = {
"""configuration_clip""": [
"""CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""CLIPConfig""",
"""CLIPOnnxConfig""",
"""CLIPTextConfig""",
"""CLIPVisionConfig""",
],
"""processing_clip""": ["""CLIPProcessor"""],
"""tokenization_clip""": ["""CLIPTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Tuple = ["""CLIPTokenizerFast"""]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Optional[int] = ["""CLIPFeatureExtractor"""]
lowercase : Union[str, Any] = ["""CLIPImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Dict = [
"""CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CLIPModel""",
"""CLIPPreTrainedModel""",
"""CLIPTextModel""",
"""CLIPTextModelWithProjection""",
"""CLIPVisionModel""",
"""CLIPVisionModelWithProjection""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : List[str] = [
"""TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFCLIPModel""",
"""TFCLIPPreTrainedModel""",
"""TFCLIPTextModel""",
"""TFCLIPVisionModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Any = [
"""FlaxCLIPModel""",
"""FlaxCLIPPreTrainedModel""",
"""FlaxCLIPTextModel""",
"""FlaxCLIPTextPreTrainedModel""",
"""FlaxCLIPVisionModel""",
"""FlaxCLIPVisionPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
lowercase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 225 | 0 |
'''simple docstring'''
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
if isinstance(__lowercase , torch.Tensor ):
return image
elif isinstance(__lowercase , PIL.Image.Image ):
SCREAMING_SNAKE_CASE : str = [image]
if isinstance(image[0] , PIL.Image.Image ):
SCREAMING_SNAKE_CASE : int = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION["""lanczos"""] ) )[None, :] for i in image]
SCREAMING_SNAKE_CASE : Tuple = np.concatenate(__lowercase , axis=0 )
SCREAMING_SNAKE_CASE : str = np.array(__lowercase ).astype(np.floataa ) / 255.0
SCREAMING_SNAKE_CASE : Optional[Any] = image.transpose(0 , 3 , 1 , 2 )
SCREAMING_SNAKE_CASE : List[Any] = 2.0 * image - 1.0
SCREAMING_SNAKE_CASE : Optional[int] = torch.from_numpy(__lowercase )
elif isinstance(image[0] , torch.Tensor ):
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.cat(__lowercase , dim=0 )
return image
def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=0.9_995 ):
"""simple docstring"""
if not isinstance(__lowercase , np.ndarray ):
SCREAMING_SNAKE_CASE : str = True
SCREAMING_SNAKE_CASE : Any = va.device
SCREAMING_SNAKE_CASE : str = va.cpu().numpy()
SCREAMING_SNAKE_CASE : Tuple = va.cpu().numpy()
SCREAMING_SNAKE_CASE : int = np.sum(va * va / (np.linalg.norm(__lowercase ) * np.linalg.norm(__lowercase )) )
if np.abs(__lowercase ) > DOT_THRESHOLD:
SCREAMING_SNAKE_CASE : Dict = (1 - t) * va + t * va
else:
SCREAMING_SNAKE_CASE : int = np.arccos(__lowercase )
SCREAMING_SNAKE_CASE : Optional[Any] = np.sin(__lowercase )
SCREAMING_SNAKE_CASE : int = theta_a * t
SCREAMING_SNAKE_CASE : List[Any] = np.sin(__lowercase )
SCREAMING_SNAKE_CASE : Union[str, Any] = np.sin(theta_a - theta_t ) / sin_theta_a
SCREAMING_SNAKE_CASE : str = sin_theta_t / sin_theta_a
SCREAMING_SNAKE_CASE : Any = sa * va + sa * va
if inputs_are_torch:
SCREAMING_SNAKE_CASE : List[str] = torch.from_numpy(__lowercase ).to(__lowercase )
return va
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = F.normalize(__lowercase , dim=-1 )
SCREAMING_SNAKE_CASE : List[str] = F.normalize(__lowercase , dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
for param in model.parameters():
SCREAMING_SNAKE_CASE : str = value
class UpperCamelCase__ ( snake_case_ ):
"""simple docstring"""
def __init__( self : int , lowerCamelCase_ : AutoencoderKL , lowerCamelCase_ : CLIPTextModel , lowerCamelCase_ : CLIPModel , lowerCamelCase_ : CLIPTokenizer , lowerCamelCase_ : UNetaDConditionModel , lowerCamelCase_ : Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler, DPMSolverMultistepScheduler] , lowerCamelCase_ : CLIPFeatureExtractor , lowerCamelCase_ : Optional[Any]=None , lowerCamelCase_ : List[Any]=None , lowerCamelCase_ : Tuple=None , ):
'''simple docstring'''
super().__init__()
self.register_modules(
vae=__UpperCAmelCase , text_encoder=__UpperCAmelCase , clip_model=__UpperCAmelCase , tokenizer=__UpperCAmelCase , unet=__UpperCAmelCase , scheduler=__UpperCAmelCase , feature_extractor=__UpperCAmelCase , coca_model=__UpperCAmelCase , coca_tokenizer=__UpperCAmelCase , coca_transform=__UpperCAmelCase , )
SCREAMING_SNAKE_CASE : Optional[Any] = (
feature_extractor.size
if isinstance(feature_extractor.size , __UpperCAmelCase )
else feature_extractor.size["""shortest_edge"""]
)
SCREAMING_SNAKE_CASE : int = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std )
set_requires_grad(self.text_encoder , __UpperCAmelCase )
set_requires_grad(self.clip_model , __UpperCAmelCase )
def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : Optional[Union[str, int]] = "auto" ):
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
SCREAMING_SNAKE_CASE : Union[str, Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__UpperCAmelCase )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
self.enable_attention_slicing(__UpperCAmelCase )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
set_requires_grad(self.vae , __UpperCAmelCase )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
set_requires_grad(self.vae , __UpperCAmelCase )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
set_requires_grad(self.unet , __UpperCAmelCase )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
set_requires_grad(self.unet , __UpperCAmelCase )
def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = min(int(num_inference_steps * strength ) , __UpperCAmelCase )
SCREAMING_SNAKE_CASE : Any = max(num_inference_steps - init_timestep , 0 )
SCREAMING_SNAKE_CASE : Optional[Any] = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def lowerCamelCase_ ( self : Any , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : str , lowerCamelCase_ : Dict , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : Dict=None ):
'''simple docstring'''
if not isinstance(__UpperCAmelCase , torch.Tensor ):
raise ValueError(f'''`image` has to be of type `torch.Tensor` but is {type(__UpperCAmelCase )}''' )
SCREAMING_SNAKE_CASE : Optional[Any] = image.to(device=__UpperCAmelCase , dtype=__UpperCAmelCase )
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
SCREAMING_SNAKE_CASE : int = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(__UpperCAmelCase )
]
SCREAMING_SNAKE_CASE : int = torch.cat(__UpperCAmelCase , dim=0 )
else:
SCREAMING_SNAKE_CASE : List[str] = self.vae.encode(__UpperCAmelCase ).latent_dist.sample(__UpperCAmelCase )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
SCREAMING_SNAKE_CASE : Any = 0.18_215 * init_latents
SCREAMING_SNAKE_CASE : Optional[Any] = init_latents.repeat_interleave(__UpperCAmelCase , dim=0 )
SCREAMING_SNAKE_CASE : str = randn_tensor(init_latents.shape , generator=__UpperCAmelCase , device=__UpperCAmelCase , dtype=__UpperCAmelCase )
# get latents
SCREAMING_SNAKE_CASE : int = self.scheduler.add_noise(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = init_latents
return latents
def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.coca_transform(__UpperCAmelCase ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
SCREAMING_SNAKE_CASE : Dict = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) )
SCREAMING_SNAKE_CASE : int = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split("""<end_of_text>""" )[0].replace("""<start_of_text>""" , """""" ).rstrip(""" .,""" )
def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : Any , lowerCamelCase_ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.feature_extractor.preprocess(__UpperCAmelCase )
SCREAMING_SNAKE_CASE : List[Any] = torch.from_numpy(clip_image_input["""pixel_values"""][0] ).unsqueeze(0 ).to(self.device ).half()
SCREAMING_SNAKE_CASE : Any = self.clip_model.get_image_features(__UpperCAmelCase )
SCREAMING_SNAKE_CASE : List[str] = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=__UpperCAmelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = image_embeddings_clip.repeat_interleave(__UpperCAmelCase , dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def lowerCamelCase_ ( self : int , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : int , lowerCamelCase_ : Dict , lowerCamelCase_ : Any , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Dict , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = latents.detach().requires_grad_()
SCREAMING_SNAKE_CASE : Any = self.scheduler.scale_model_input(__UpperCAmelCase , __UpperCAmelCase )
# predict the noise residual
SCREAMING_SNAKE_CASE : Optional[int] = self.unet(__UpperCAmelCase , __UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase ).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
SCREAMING_SNAKE_CASE : int = self.scheduler.alphas_cumprod[timestep]
SCREAMING_SNAKE_CASE : int = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
SCREAMING_SNAKE_CASE : int = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
SCREAMING_SNAKE_CASE : Optional[Any] = torch.sqrt(__UpperCAmelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , __UpperCAmelCase ):
SCREAMING_SNAKE_CASE : str = self.scheduler.sigmas[index]
SCREAMING_SNAKE_CASE : str = latents - sigma * noise_pred
else:
raise ValueError(f'''scheduler type {type(self.scheduler )} not supported''' )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
SCREAMING_SNAKE_CASE : Union[str, Any] = 1 / 0.18_215 * sample
SCREAMING_SNAKE_CASE : int = self.vae.decode(__UpperCAmelCase ).sample
SCREAMING_SNAKE_CASE : Dict = (image / 2 + 0.5).clamp(0 , 1 )
SCREAMING_SNAKE_CASE : Tuple = transforms.Resize(self.feature_extractor_size )(__UpperCAmelCase )
SCREAMING_SNAKE_CASE : int = self.normalize(__UpperCAmelCase ).to(latents.dtype )
SCREAMING_SNAKE_CASE : Optional[Any] = self.clip_model.get_image_features(__UpperCAmelCase )
SCREAMING_SNAKE_CASE : Dict = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=__UpperCAmelCase )
SCREAMING_SNAKE_CASE : Any = spherical_dist_loss(__UpperCAmelCase , __UpperCAmelCase ).mean() * clip_guidance_scale
SCREAMING_SNAKE_CASE : List[Any] = -torch.autograd.grad(__UpperCAmelCase , __UpperCAmelCase )[0]
if isinstance(self.scheduler , __UpperCAmelCase ):
SCREAMING_SNAKE_CASE : Union[str, Any] = latents.detach() + grads * (sigma**2)
SCREAMING_SNAKE_CASE : List[Any] = noise_pred_original
else:
SCREAMING_SNAKE_CASE : Any = noise_pred_original - torch.sqrt(__UpperCAmelCase ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self : Union[str, Any] , lowerCamelCase_ : Union[torch.FloatTensor, PIL.Image.Image] , lowerCamelCase_ : Union[torch.FloatTensor, PIL.Image.Image] , lowerCamelCase_ : Optional[str] = None , lowerCamelCase_ : Optional[str] = None , lowerCamelCase_ : Optional[int] = 5_12 , lowerCamelCase_ : Optional[int] = 5_12 , lowerCamelCase_ : float = 0.6 , lowerCamelCase_ : Optional[int] = 50 , lowerCamelCase_ : Optional[float] = 7.5 , lowerCamelCase_ : Optional[int] = 1 , lowerCamelCase_ : float = 0.0 , lowerCamelCase_ : Optional[float] = 1_00 , lowerCamelCase_ : Optional[torch.Generator] = None , lowerCamelCase_ : Optional[str] = "pil" , lowerCamelCase_ : bool = True , lowerCamelCase_ : float = 0.8 , lowerCamelCase_ : float = 0.1 , lowerCamelCase_ : float = 0.1 , ):
'''simple docstring'''
if isinstance(__UpperCAmelCase , __UpperCAmelCase ) and len(__UpperCAmelCase ) != batch_size:
raise ValueError(f'''You have passed {batch_size} batch_size, but only {len(__UpperCAmelCase )} generators.''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if isinstance(__UpperCAmelCase , torch.Generator ) and batch_size > 1:
SCREAMING_SNAKE_CASE : List[Any] = [generator] + [None] * (batch_size - 1)
SCREAMING_SNAKE_CASE : List[Any] = [
("""model""", self.coca_model is None),
("""tokenizer""", self.coca_tokenizer is None),
("""transform""", self.coca_transform is None),
]
SCREAMING_SNAKE_CASE : Dict = [x[0] for x in coca_is_none if x[1]]
SCREAMING_SNAKE_CASE : str = """, """.join(__UpperCAmelCase )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(__UpperCAmelCase ):
raise ValueError(
f'''Content prompt is None and CoCa [{coca_is_none_str}] is None.'''
f'''Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' )
SCREAMING_SNAKE_CASE : Any = self.get_image_description(__UpperCAmelCase )
if style_prompt is None:
if len(__UpperCAmelCase ):
raise ValueError(
f'''Style prompt is None and CoCa [{coca_is_none_str}] is None.'''
f''' Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' )
SCREAMING_SNAKE_CASE : Tuple = self.get_image_description(__UpperCAmelCase )
# get prompt text embeddings for content and style
SCREAMING_SNAKE_CASE : Dict = self.tokenizer(
__UpperCAmelCase , padding="""max_length""" , max_length=self.tokenizer.model_max_length , truncation=__UpperCAmelCase , return_tensors="""pt""" , )
SCREAMING_SNAKE_CASE : int = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
SCREAMING_SNAKE_CASE : Optional[int] = self.tokenizer(
__UpperCAmelCase , padding="""max_length""" , max_length=self.tokenizer.model_max_length , truncation=__UpperCAmelCase , return_tensors="""pt""" , )
SCREAMING_SNAKE_CASE : Optional[int] = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
SCREAMING_SNAKE_CASE : List[str] = slerp(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# duplicate text embeddings for each generation per prompt
SCREAMING_SNAKE_CASE : int = text_embeddings.repeat_interleave(__UpperCAmelCase , dim=0 )
# set timesteps
SCREAMING_SNAKE_CASE : int = """offset""" in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
SCREAMING_SNAKE_CASE : Optional[Any] = {}
if accepts_offset:
SCREAMING_SNAKE_CASE : List[str] = 1
self.scheduler.set_timesteps(__UpperCAmelCase , **__UpperCAmelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[int] = self.get_timesteps(__UpperCAmelCase , __UpperCAmelCase , self.device )
SCREAMING_SNAKE_CASE : Tuple = timesteps[:1].repeat(__UpperCAmelCase )
# Preprocess image
SCREAMING_SNAKE_CASE : Dict = preprocess(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
SCREAMING_SNAKE_CASE : int = self.prepare_latents(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , text_embeddings.dtype , self.device , __UpperCAmelCase )
SCREAMING_SNAKE_CASE : int = preprocess(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
SCREAMING_SNAKE_CASE : List[Any] = self.prepare_latents(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , text_embeddings.dtype , self.device , __UpperCAmelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = slerp(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
if clip_guidance_scale > 0:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_clip_image_embeddings(__UpperCAmelCase , __UpperCAmelCase )
SCREAMING_SNAKE_CASE : Optional[int] = self.get_clip_image_embeddings(__UpperCAmelCase , __UpperCAmelCase )
SCREAMING_SNAKE_CASE : Any = slerp(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
SCREAMING_SNAKE_CASE : Union[str, Any] = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE : Union[str, Any] = content_text_input.input_ids.shape[-1]
SCREAMING_SNAKE_CASE : Tuple = self.tokenizer([""""""] , padding="""max_length""" , max_length=__UpperCAmelCase , return_tensors="""pt""" )
SCREAMING_SNAKE_CASE : str = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
SCREAMING_SNAKE_CASE : Tuple = uncond_embeddings.repeat_interleave(__UpperCAmelCase , dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
SCREAMING_SNAKE_CASE : Tuple = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
SCREAMING_SNAKE_CASE : List[str] = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
SCREAMING_SNAKE_CASE : Tuple = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
SCREAMING_SNAKE_CASE : List[str] = torch.randn(__UpperCAmelCase , generator=__UpperCAmelCase , device="""cpu""" , dtype=__UpperCAmelCase ).to(
self.device )
else:
SCREAMING_SNAKE_CASE : Dict = torch.randn(__UpperCAmelCase , generator=__UpperCAmelCase , device=self.device , dtype=__UpperCAmelCase )
else:
if latents.shape != latents_shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
SCREAMING_SNAKE_CASE : Dict = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
SCREAMING_SNAKE_CASE : Tuple = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
SCREAMING_SNAKE_CASE : Dict = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
SCREAMING_SNAKE_CASE : Tuple = {}
if accepts_eta:
SCREAMING_SNAKE_CASE : Union[str, Any] = eta
# check if the scheduler accepts generator
SCREAMING_SNAKE_CASE : int = """generator""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
SCREAMING_SNAKE_CASE : int = generator
with self.progress_bar(total=__UpperCAmelCase ):
for i, t in enumerate(__UpperCAmelCase ):
# expand the latents if we are doing classifier free guidance
SCREAMING_SNAKE_CASE : Any = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
SCREAMING_SNAKE_CASE : Optional[Any] = self.scheduler.scale_model_input(__UpperCAmelCase , __UpperCAmelCase )
# predict the noise residual
SCREAMING_SNAKE_CASE : Dict = self.unet(__UpperCAmelCase , __UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = noise_pred.chunk(2 )
SCREAMING_SNAKE_CASE : Union[str, Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
SCREAMING_SNAKE_CASE : Any = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Union[str, Any] = self.cond_fn(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , )
# compute the previous noisy sample x_t -> x_t-1
SCREAMING_SNAKE_CASE : Tuple = self.scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
SCREAMING_SNAKE_CASE : Dict = 1 / 0.18_215 * latents
SCREAMING_SNAKE_CASE : Tuple = self.vae.decode(__UpperCAmelCase ).sample
SCREAMING_SNAKE_CASE : Optional[int] = (image / 2 + 0.5).clamp(0 , 1 )
SCREAMING_SNAKE_CASE : Tuple = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE : Dict = self.numpy_to_pil(__UpperCAmelCase )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=__UpperCAmelCase , nsfw_content_detected=__UpperCAmelCase )
| 323 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Tuple , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Tuple=13 , __UpperCAmelCase : Optional[int]=7 , __UpperCAmelCase : int=True , __UpperCAmelCase : str=True , __UpperCAmelCase : Union[str, Any]=True , __UpperCAmelCase : str=True , __UpperCAmelCase : List[str]=99 , __UpperCAmelCase : List[str]=32 , __UpperCAmelCase : Union[str, Any]=2 , __UpperCAmelCase : List[str]=4 , __UpperCAmelCase : Optional[Any]=37 , __UpperCAmelCase : Any="gelu" , __UpperCAmelCase : Optional[Any]=0.1 , __UpperCAmelCase : int=0.1 , __UpperCAmelCase : Dict=512 , __UpperCAmelCase : List[Any]=16 , __UpperCAmelCase : List[str]=2 , __UpperCAmelCase : Optional[Any]=0.02 , __UpperCAmelCase : int=3 , __UpperCAmelCase : Dict=4 , __UpperCAmelCase : str=None , ):
'''simple docstring'''
_A = parent
_A = 13
_A = 7
_A = True
_A = True
_A = True
_A = True
_A = 99
_A = 32
_A = 2
_A = 4
_A = 37
_A = "gelu"
_A = 0.1
_A = 0.1
_A = 512
_A = 16
_A = 2
_A = 0.02
_A = 3
_A = 4
_A = None
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
_A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_A = None
if self.use_input_mask:
_A = random_attention_mask([self.batch_size, self.seq_length] )
_A = None
if self.use_token_type_ids:
_A = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_A = None
_A = None
_A = None
if self.use_labels:
_A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_A = ids_tensor([self.batch_size] , self.num_choices )
_A = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=__UpperCAmelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : str , __UpperCAmelCase : Dict , __UpperCAmelCase : Any , __UpperCAmelCase : str , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Union[str, Any] ):
'''simple docstring'''
_A = TFRoFormerModel(config=__UpperCAmelCase )
_A = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_A = [input_ids, input_mask]
_A = model(__UpperCAmelCase )
_A = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase ( self : int , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : int , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Any , __UpperCAmelCase : List[Any] ):
'''simple docstring'''
_A = True
_A = TFRoFormerForCausalLM(config=__UpperCAmelCase )
_A = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
_A = model(__UpperCAmelCase )["logits"]
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def lowerCAmelCase ( self : int , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Tuple , __UpperCAmelCase : str , __UpperCAmelCase : Tuple , __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : str ):
'''simple docstring'''
_A = TFRoFormerForMaskedLM(config=__UpperCAmelCase )
_A = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
_A = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Union[str, Any] ):
'''simple docstring'''
_A = self.num_labels
_A = TFRoFormerForSequenceClassification(config=__UpperCAmelCase )
_A = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
_A = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : List[Any] ):
'''simple docstring'''
_A = self.num_choices
_A = TFRoFormerForMultipleChoice(config=__UpperCAmelCase )
_A = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
_A = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
_A = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
_A = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
_A = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase ( self : Dict , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : int , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[int] ):
'''simple docstring'''
_A = self.num_labels
_A = TFRoFormerForTokenClassification(config=__UpperCAmelCase )
_A = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
_A = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Dict , __UpperCAmelCase : int , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Any , __UpperCAmelCase : int , __UpperCAmelCase : int ):
'''simple docstring'''
_A = TFRoFormerForQuestionAnswering(config=__UpperCAmelCase )
_A = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
_A = model(__UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
_A = self.prepare_config_and_inputs()
(
(
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) ,
) = config_and_inputs
_A = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class _UpperCAmelCase ( snake_case_ , snake_case_ , unittest.TestCase ):
"""simple docstring"""
snake_case = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
snake_case = (
{
'''feature-extraction''': TFRoFormerModel,
'''fill-mask''': TFRoFormerForMaskedLM,
'''question-answering''': TFRoFormerForQuestionAnswering,
'''text-classification''': TFRoFormerForSequenceClassification,
'''text-generation''': TFRoFormerForCausalLM,
'''token-classification''': TFRoFormerForTokenClassification,
'''zero-shot''': TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
snake_case = False
snake_case = False
def lowerCAmelCase ( self : int , __UpperCAmelCase : Dict , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[int] ):
'''simple docstring'''
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
_A = TFRoFormerModelTester(self )
_A = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37 )
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__UpperCAmelCase )
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*__UpperCAmelCase )
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__UpperCAmelCase )
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__UpperCAmelCase )
def lowerCAmelCase ( self : str ):
'''simple docstring'''
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__UpperCAmelCase )
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__UpperCAmelCase )
@slow
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
_A = TFRoFormerModel.from_pretrained("junnyu/roformer_chinese_base" )
self.assertIsNotNone(__UpperCAmelCase )
@require_tf
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
_A = TFRoFormerForMaskedLM.from_pretrained("junnyu/roformer_chinese_base" )
_A = tf.constant([[0, 1, 2, 3, 4, 5]] )
_A = model(__UpperCAmelCase )[0]
# TODO Replace vocab size
_A = 50000
_A = [1, 6, vocab_size]
self.assertEqual(output.shape , __UpperCAmelCase )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
_A = tf.constant(
[
[
[-0.12053341, -1.0264901, 0.29221946],
[-1.5133783, 0.197433, 0.15190607],
[-5.0135403, -3.900256, -0.84038764],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4 )
@require_tf
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
snake_case = 1E-4
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
_A = tf.constant([[4, 10]] )
_A = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
_A = emba(input_ids.shape )
_A = tf.constant(
[[0.0000, 0.0000, 0.0000, 1.0000, 1.0000, 1.0000], [0.8415, 0.0464, 0.0022, 0.5403, 0.9989, 1.0000]] )
tf.debugging.assert_near(__UpperCAmelCase , __UpperCAmelCase , atol=self.tolerance )
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
_A = tf.constant(
[
[0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
[0.8415, 0.8219, 0.8020, 0.7819, 0.7617],
[0.9093, 0.9364, 0.9581, 0.9749, 0.9870],
] )
_A = TFRoFormerSinusoidalPositionalEmbedding(num_positions=512 , embedding_dim=512 )
emba([2, 16, 512] )
_A = emba.weight[:3, :5]
tf.debugging.assert_near(__UpperCAmelCase , __UpperCAmelCase , atol=self.tolerance )
@require_tf
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
snake_case = 1E-4
def lowerCAmelCase ( self : str ):
'''simple docstring'''
_A = tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
_A = -tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
_A = TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 )
_A = embed_positions([2, 16, 768] )[None, None, :, :]
_A , _A = TFRoFormerSelfAttention.apply_rotary_position_embeddings(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
_A = tf.constant(
[
[0.0000, 0.0100, 0.0200, 0.0300, 0.0400, 0.0500, 0.0600, 0.0700],
[-0.2012, 0.8897, 0.0263, 0.9401, 0.2074, 0.9463, 0.3481, 0.9343],
[-1.7057, 0.6271, -1.2145, 1.3897, -0.6303, 1.7647, -0.1173, 1.8985],
[-2.1731, -1.6397, -2.7358, 0.2854, -2.1840, 1.7183, -1.3018, 2.4871],
[0.2717, -3.6173, -2.9206, -2.1988, -3.6638, 0.3858, -2.9155, 2.2980],
[3.9859, -2.1580, -0.7984, -4.4904, -4.1181, -2.0252, -4.4782, 1.1253],
] )
_A = tf.constant(
[
[0.0000, -0.0100, -0.0200, -0.0300, -0.0400, -0.0500, -0.0600, -0.0700],
[0.2012, -0.8897, -0.0263, -0.9401, -0.2074, -0.9463, -0.3481, -0.9343],
[1.7057, -0.6271, 1.2145, -1.3897, 0.6303, -1.7647, 0.1173, -1.8985],
[2.1731, 1.6397, 2.7358, -0.2854, 2.1840, -1.7183, 1.3018, -2.4871],
[-0.2717, 3.6173, 2.9206, 2.1988, 3.6638, -0.3858, 2.9155, -2.2980],
[-3.9859, 2.1580, 0.7984, 4.4904, 4.1181, 2.0252, 4.4782, -1.1253],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , __UpperCAmelCase , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , __UpperCAmelCase , atol=self.tolerance )
| 79 | 0 |
'''simple docstring'''
import unittest
from transformers import DonutProcessor
a__ : Dict = 'naver-clova-ix/donut-base'
class UpperCAmelCase__ ( unittest.TestCase):
def __lowerCamelCase ( self ) -> Union[str, Any]:
__UpperCamelCase = DonutProcessor.from_pretrained(lowercase )
def __lowerCamelCase ( self ) -> Dict:
__UpperCamelCase = {
"""name""": """John Doe""",
"""age""": """99""",
"""city""": """Atlanta""",
"""state""": """GA""",
"""zip""": """30301""",
"""phone""": """123-4567""",
"""nicknames""": [{"""nickname""": """Johnny"""}, {"""nickname""": """JD"""}],
}
__UpperCamelCase = (
"""<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>"""
"""<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>"""
"""<s_nicknames><s_nickname>Johnny</s_nickname>"""
"""<sep/><s_nickname>JD</s_nickname></s_nicknames>"""
)
__UpperCamelCase = self.processor.tokenajson(lowercase )
self.assertDictEqual(lowercase , lowercase )
| 358 |
'''simple docstring'''
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
a__ : Optional[int] = '%20'.join(argv[1:]) if len(argv) > 1 else quote(str(input('Search: ')))
print('Googling.....')
a__ : Optional[int] = f'''https://www.google.com/search?q={query}&num=100'''
a__ : Union[str, Any] = requests.get(
url,
headers={'User-Agent': str(UserAgent().random)},
)
try:
a__ : Optional[Any] = (
BeautifulSoup(res.text, 'html.parser')
.find('div', attrs={'class': 'yuRUbf'})
.find('a')
.get('href')
)
except AttributeError:
a__ : Union[str, Any] = parse_qs(
BeautifulSoup(res.text, 'html.parser')
.find('div', attrs={'class': 'kCrYT'})
.find('a')
.get('href')
)['url'][0]
webbrowser.open(link)
| 243 | 0 |
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> str:
'''simple docstring'''
# Load configuration defined in the metadata file
with open(UpperCamelCase_ ) as metadata_file:
UpperCamelCase = json.load(UpperCamelCase_ )
UpperCamelCase = LukeConfig(use_entity_aware_attention=UpperCamelCase_ , **metadata["""model_config"""] )
# Load in the weights from the checkpoint_path
UpperCamelCase = torch.load(UpperCamelCase_ , map_location="""cpu""" )
# Load the entity vocab file
UpperCamelCase = load_entity_vocab(UpperCamelCase_ )
UpperCamelCase = RobertaTokenizer.from_pretrained(metadata["""model_config"""]["""bert_model_name"""] )
# Add special tokens to the token vocabulary for downstream tasks
UpperCamelCase = AddedToken("""<ent>""" , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ )
UpperCamelCase = AddedToken("""<ent2>""" , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ )
tokenizer.add_special_tokens({"""additional_special_tokens""": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f"""Saving tokenizer to {pytorch_dump_folder_path}""" )
tokenizer.save_pretrained(UpperCamelCase_ )
with open(os.path.join(UpperCamelCase_ , LukeTokenizer.vocab_files_names["""entity_vocab_file"""] ) , """w""" ) as f:
json.dump(UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase = LukeTokenizer.from_pretrained(UpperCamelCase_ )
# Initialize the embeddings of the special tokens
UpperCamelCase = state_dict["""embeddings.word_embeddings.weight"""]
UpperCamelCase = word_emb[tokenizer.convert_tokens_to_ids(["""@"""] )[0]].unsqueeze(0 )
UpperCamelCase = word_emb[tokenizer.convert_tokens_to_ids(["""#"""] )[0]].unsqueeze(0 )
UpperCamelCase = torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
UpperCamelCase = f"""encoder.layer.{layer_index}.attention.self."""
UpperCamelCase = state_dict[prefix + matrix_name]
UpperCamelCase = state_dict[prefix + matrix_name]
UpperCamelCase = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
UpperCamelCase = state_dict["""entity_embeddings.entity_embeddings.weight"""]
UpperCamelCase = entity_emb[entity_vocab["""[MASK]"""]]
UpperCamelCase = LukeModel(config=UpperCamelCase_ ).eval()
UpperCamelCase , UpperCamelCase = model.load_state_dict(UpperCamelCase_ , strict=UpperCamelCase_ )
if not (len(UpperCamelCase_ ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(f"""Missing keys {", ".join(UpperCamelCase_ )}. Expected only missing embeddings.position_ids""" )
if not (all(key.startswith("""entity_predictions""" ) or key.startswith("""lm_head""" ) for key in unexpected_keys )):
raise ValueError(
"""Unexpected keys"""
f""" {", ".join([key for key in unexpected_keys if not (key.startswith("entity_predictions" ) or key.startswith("lm_head" ))] )}""" )
# Check outputs
UpperCamelCase = LukeTokenizer.from_pretrained(UpperCamelCase_ , task="""entity_classification""" )
UpperCamelCase = (
"""Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the"""
""" new world number one avoid a humiliating second- round exit at Wimbledon ."""
)
UpperCamelCase = (39, 42)
UpperCamelCase = tokenizer(UpperCamelCase_ , entity_spans=[span] , add_prefix_space=UpperCamelCase_ , return_tensors="""pt""" )
UpperCamelCase = model(**UpperCamelCase_ )
# Verify word hidden states
if model_size == "large":
UpperCamelCase = torch.Size((1, 42, 1024) )
UpperCamelCase = torch.tensor(
[[0.0_1_3_3, 0.0_8_6_5, 0.0_0_9_5], [0.3_0_9_3, -0.2_5_7_6, -0.7_4_1_8], [-0.1_7_2_0, -0.2_1_1_7, -0.2_8_6_9]] )
else: # base
UpperCamelCase = torch.Size((1, 42, 768) )
UpperCamelCase = torch.tensor([[0.0_0_3_7, 0.1_3_6_8, -0.0_0_9_1], [0.1_0_9_9, 0.3_3_2_9, -0.1_0_9_5], [0.0_7_6_5, 0.5_3_3_5, 0.1_1_7_9]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f"""Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}""" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCamelCase_ , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
UpperCamelCase = torch.Size((1, 1, 1024) )
UpperCamelCase = torch.tensor([[0.0_4_6_6, -0.0_1_0_6, -0.0_1_7_9]] )
else: # base
UpperCamelCase = torch.Size((1, 1, 768) )
UpperCamelCase = torch.tensor([[0.1_4_5_7, 0.1_0_4_4, 0.0_1_7_4]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
f"""Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"""
f""" {expected_shape}""" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , UpperCamelCase_ , atol=1E-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print("""Saving PyTorch model to {}""".format(UpperCamelCase_ ) )
model.save_pretrained(UpperCamelCase_ )
def lowercase( UpperCamelCase_ ) -> int:
'''simple docstring'''
UpperCamelCase = {}
with open(UpperCamelCase_ , """r""" , encoding="""utf-8""" ) as f:
for index, line in enumerate(UpperCamelCase_ ):
UpperCamelCase , UpperCamelCase = line.rstrip().split("""\t""" )
UpperCamelCase = index
return entity_vocab
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--checkpoint_path""", type=str, help="""Path to a pytorch_model.bin file.""")
parser.add_argument(
"""--metadata_path""", default=None, type=str, help="""Path to a metadata.json file, defining the configuration."""
)
parser.add_argument(
"""--entity_vocab_path""",
default=None,
type=str,
help="""Path to an entity_vocab.tsv file, containing the entity vocabulary.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to where to dump the output PyTorch model."""
)
parser.add_argument(
"""--model_size""", default="""base""", type=str, choices=["""base""", """large"""], help="""Size of the model to be converted."""
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 343 | def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> bool:
'''simple docstring'''
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(UpperCamelCase_ ) )
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> bool:
'''simple docstring'''
# Base Case
if index == len(UpperCamelCase_ ):
return True
# Recursive Step
for i in range(UpperCamelCase_ ):
if valid_coloring(graph[index] , UpperCamelCase_ , UpperCamelCase_ ):
# Color current vertex
UpperCamelCase = i
# Validate coloring
if util_color(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , index + 1 ):
return True
# Backtrack
UpperCamelCase = -1
return False
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> list[int]:
'''simple docstring'''
UpperCamelCase = [-1] * len(UpperCamelCase_ )
if util_color(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , 0 ):
return colored_vertices
return []
| 343 | 1 |
"""simple docstring"""
def snake_case__ ( __lowerCamelCase : str ):
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] =0
for ch in input_str:
lowerCamelCase__ : List[str] =ord(__lowerCamelCase )
lowerCamelCase__ : Any =pow(2 , __lowerCamelCase )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 272 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
_lowercase : Tuple = r"\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `\" / \"`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `\" // \"`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `\"wiki_dpr\"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `\"train\"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `\"compressed\"`)\n The index name of the index associated with the `dataset`. One can choose between `\"legacy\"`, `\"exact\"` and\n `\"compressed\"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a \"dummy\" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n"
@add_start_docstrings(lowerCAmelCase_ )
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
_a = 'rag'
_a = True
def __init__( self : Optional[int], lowerCamelCase : Union[str, Any]=None, lowerCamelCase : Union[str, Any]=True, lowerCamelCase : Dict=None, lowerCamelCase : Optional[Any]=None, lowerCamelCase : Any=None, lowerCamelCase : List[Any]=None, lowerCamelCase : Optional[Any]=None, lowerCamelCase : Any=" / ", lowerCamelCase : Union[str, Any]=" // ", lowerCamelCase : List[Any]=5, lowerCamelCase : int=300, lowerCamelCase : Optional[Any]=768, lowerCamelCase : Optional[Any]=8, lowerCamelCase : Tuple="wiki_dpr", lowerCamelCase : Tuple="train", lowerCamelCase : Union[str, Any]="compressed", lowerCamelCase : List[str]=None, lowerCamelCase : Any=None, lowerCamelCase : Union[str, Any]=False, lowerCamelCase : Dict=False, lowerCamelCase : Optional[int]=0.0, lowerCamelCase : Any=True, lowerCamelCase : Dict=False, lowerCamelCase : Tuple=False, lowerCamelCase : List[str]=False, lowerCamelCase : List[Any]=True, lowerCamelCase : Optional[Any]=None, **lowerCamelCase : str, )-> List[Any]:
super().__init__(
bos_token_id=lowerCamelCase, pad_token_id=lowerCamelCase, eos_token_id=lowerCamelCase, decoder_start_token_id=lowerCamelCase, forced_eos_token_id=lowerCamelCase, is_encoder_decoder=lowerCamelCase, prefix=lowerCamelCase, vocab_size=lowerCamelCase, **lowerCamelCase, )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
lowerCamelCase__ : Tuple =kwargs.pop('''question_encoder''' )
lowerCamelCase__ : int =question_encoder_config.pop('''model_type''' )
lowerCamelCase__ : Dict =kwargs.pop('''generator''' )
lowerCamelCase__ : Tuple =decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
lowerCamelCase__ : Tuple =AutoConfig.for_model(lowerCamelCase, **lowerCamelCase )
lowerCamelCase__ : Dict =AutoConfig.for_model(lowerCamelCase, **lowerCamelCase )
lowerCamelCase__ : Any =reduce_loss
lowerCamelCase__ : Tuple =label_smoothing
lowerCamelCase__ : List[str] =exclude_bos_score
lowerCamelCase__ : Dict =do_marginalize
lowerCamelCase__ : Union[str, Any] =title_sep
lowerCamelCase__ : Dict =doc_sep
lowerCamelCase__ : List[Any] =n_docs
lowerCamelCase__ : List[str] =max_combined_length
lowerCamelCase__ : List[Any] =dataset
lowerCamelCase__ : int =dataset_split
lowerCamelCase__ : List[Any] =index_name
lowerCamelCase__ : int =retrieval_vector_size
lowerCamelCase__ : Dict =retrieval_batch_size
lowerCamelCase__ : str =passages_path
lowerCamelCase__ : Any =index_path
lowerCamelCase__ : List[Any] =use_dummy_dataset
lowerCamelCase__ : Optional[int] =output_retrieved
lowerCamelCase__ : List[str] =do_deduplication
lowerCamelCase__ : Tuple =use_cache
if self.forced_eos_token_id is None:
lowerCamelCase__ : int =getattr(self.generator, '''forced_eos_token_id''', lowerCamelCase )
@classmethod
def snake_case ( cls : List[str], lowerCamelCase : PretrainedConfig, lowerCamelCase : PretrainedConfig, **lowerCamelCase : int )-> PretrainedConfig:
return cls(question_encoder=question_encoder_config.to_dict(), generator=generator_config.to_dict(), **lowerCamelCase )
def snake_case ( self : Optional[int] )-> int:
lowerCamelCase__ : Union[str, Any] =copy.deepcopy(self.__dict__ )
lowerCamelCase__ : Optional[int] =self.question_encoder.to_dict()
lowerCamelCase__ : Tuple =self.generator.to_dict()
lowerCamelCase__ : Optional[Any] =self.__class__.model_type
return output
| 272 | 1 |
"""simple docstring"""
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
_a = logging.get_logger(__name__)
def lowerCamelCase__ ( __snake_case, __snake_case ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'''encoder.deit.blocks.{i}.norm1.weight''', F'''encoder.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''encoder.deit.blocks.{i}.norm1.bias''', F'''encoder.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(F'''encoder.deit.blocks.{i}.attn.proj.weight''', F'''encoder.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(F'''encoder.deit.blocks.{i}.attn.proj.bias''', F'''encoder.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append(
(F'''encoder.deit.blocks.{i}.norm2.weight''', F'''encoder.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''encoder.deit.blocks.{i}.norm2.bias''', F'''encoder.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append(
(F'''encoder.deit.blocks.{i}.mlp.fc1.weight''', F'''encoder.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append(
(F'''encoder.deit.blocks.{i}.mlp.fc1.bias''', F'''encoder.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append(
(F'''encoder.deit.blocks.{i}.mlp.fc2.weight''', F'''encoder.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''encoder.deit.blocks.{i}.mlp.fc2.bias''', F'''encoder.encoder.layer.{i}.output.dense.bias''') )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
('''encoder.deit.cls_token''', '''encoder.embeddings.cls_token'''),
('''encoder.deit.pos_embed''', '''encoder.embeddings.position_embeddings'''),
('''encoder.deit.patch_embed.proj.weight''', '''encoder.embeddings.patch_embeddings.projection.weight'''),
('''encoder.deit.patch_embed.proj.bias''', '''encoder.embeddings.patch_embeddings.projection.bias'''),
('''encoder.deit.norm.weight''', '''encoder.layernorm.weight'''),
('''encoder.deit.norm.bias''', '''encoder.layernorm.bias'''),
] )
return rename_keys
def lowerCamelCase__ ( __snake_case, __snake_case ) -> Optional[Any]:
"""simple docstring"""
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
_UpperCamelCase = state_dict.pop(F'''encoder.deit.blocks.{i}.attn.qkv.weight''' )
_UpperCamelCase = in_proj_weight[
: encoder_config.hidden_size, :
]
_UpperCamelCase = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
_UpperCamelCase = in_proj_weight[
-encoder_config.hidden_size :, :
]
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = dct.pop(__snake_case )
_UpperCamelCase = val
def lowerCamelCase__ ( __snake_case ) -> Optional[Any]:
"""simple docstring"""
if "handwritten" in checkpoint_url:
_UpperCamelCase = '''https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg''' # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
_UpperCamelCase = '''https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg'''
_UpperCamelCase = Image.open(requests.get(__snake_case, stream=__snake_case ).raw ).convert('''RGB''' )
return im
@torch.no_grad()
def lowerCamelCase__ ( __snake_case, __snake_case ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = ViTConfig(image_size=3_84, qkv_bias=__snake_case )
_UpperCamelCase = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
_UpperCamelCase = 7_68
elif "large" in checkpoint_url:
# use ViT-large encoder
_UpperCamelCase = 10_24
_UpperCamelCase = 40_96
_UpperCamelCase = 24
_UpperCamelCase = 16
_UpperCamelCase = 10_24
else:
raise ValueError('''Should either find \'base\' or \'large\' in checkpoint URL''' )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
_UpperCamelCase = False
_UpperCamelCase = '''relu'''
_UpperCamelCase = 10_24
_UpperCamelCase = True
_UpperCamelCase = False
_UpperCamelCase = False
# load HuggingFace model
_UpperCamelCase = ViTModel(__snake_case, add_pooling_layer=__snake_case )
_UpperCamelCase = TrOCRForCausalLM(__snake_case )
_UpperCamelCase = VisionEncoderDecoderModel(encoder=__snake_case, decoder=__snake_case )
model.eval()
# load state_dict of original model, rename some keys
_UpperCamelCase = torch.hub.load_state_dict_from_url(__snake_case, map_location='''cpu''', check_hash=__snake_case )['''model''']
_UpperCamelCase = create_rename_keys(__snake_case, __snake_case )
for src, dest in rename_keys:
rename_key(__snake_case, __snake_case, __snake_case )
read_in_q_k_v(__snake_case, __snake_case )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
_UpperCamelCase = state_dict.pop(__snake_case )
if key.startswith('''decoder''' ) and "output_projection" not in key:
_UpperCamelCase = val
else:
_UpperCamelCase = val
# load state dict
model.load_state_dict(__snake_case )
# Check outputs on an image
_UpperCamelCase = ViTImageProcessor(size=encoder_config.image_size )
_UpperCamelCase = RobertaTokenizer.from_pretrained('''roberta-large''' )
_UpperCamelCase = TrOCRProcessor(__snake_case, __snake_case )
_UpperCamelCase = processor(images=prepare_img(__snake_case ), return_tensors='''pt''' ).pixel_values
# verify logits
_UpperCamelCase = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
_UpperCamelCase = model(pixel_values=__snake_case, decoder_input_ids=__snake_case )
_UpperCamelCase = outputs.logits
_UpperCamelCase = torch.Size([1, 1, 5_02_65] )
if "trocr-base-handwritten" in checkpoint_url:
_UpperCamelCase = torch.tensor(
[-1.4502, -4.6683, -0.5347, -2.9291, 9.1435, -3.0571, 8.9764, 1.7560, 8.7358, -1.5311] )
elif "trocr-large-handwritten" in checkpoint_url:
_UpperCamelCase = torch.tensor(
[-2.6437, -1.3129, -2.2596, -5.3455, 6.3539, 1.7604, 5.4991, 1.4702, 5.6113, 2.0170] )
elif "trocr-base-printed" in checkpoint_url:
_UpperCamelCase = torch.tensor(
[-5.6816, -5.8388, 1.1398, -6.9034, 6.8505, -2.4393, 1.2284, -1.0232, -1.9661, -3.9210] )
elif "trocr-large-printed" in checkpoint_url:
_UpperCamelCase = torch.tensor(
[-6.0162, -7.0959, 4.4155, -5.1063, 7.0468, -3.1631, 2.6466, -0.3081, -0.8106, -1.7535] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :10], __snake_case, atol=1e-3 ), "First elements of logits not as expected"
Path(__snake_case ).mkdir(exist_ok=__snake_case )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(__snake_case )
print(F'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(__snake_case )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt""",
type=str,
help="""URL to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
_a = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 194 |
"""simple docstring"""
from sklearn.metrics import matthews_corrcoef
import datasets
_a = """
Compute the Matthews correlation coefficient (MCC)
The Matthews correlation coefficient is used in machine learning as a
measure of the quality of binary and multiclass classifications. It takes
into account true and false positives and negatives and is generally
regarded as a balanced measure which can be used even if the classes are of
very different sizes. The MCC is in essence a correlation coefficient value
between -1 and +1. A coefficient of +1 represents a perfect prediction, 0
an average random prediction and -1 an inverse prediction. The statistic
is also known as the phi coefficient. [source: Wikipedia]
"""
_a = """
Args:
predictions (list of int): Predicted labels, as returned by a model.
references (list of int): Ground truth labels.
sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.
Returns:
matthews_correlation (dict containing float): Matthews correlation.
Examples:
Example 1, a basic example with only predictions and references as inputs:
>>> matthews_metric = datasets.load_metric(\"matthews_correlation\")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3])
>>> print(round(results['matthews_correlation'], 2))
0.54
Example 2, the same example as above, but also including sample weights:
>>> matthews_metric = datasets.load_metric(\"matthews_correlation\")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 3, 1, 1, 1, 2])
>>> print(round(results['matthews_correlation'], 2))
0.1
Example 3, the same example as above, but with sample weights that cause a negative correlation:
>>> matthews_metric = datasets.load_metric(\"matthews_correlation\")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 1, 0, 0, 0, 1])
>>> print(round(results['matthews_correlation'], 2))
-0.25
"""
_a = """\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _UpperCAmelCase( datasets.Metric ):
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int32'''),
'''references''': datasets.Value('''int32'''),
}) , reference_urls=[
'''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html'''
] , )
def UpperCAmelCase ( self , __a , __a , __a=None) -> Dict:
'''simple docstring'''
return {
"matthews_correlation": float(matthews_corrcoef(__a , __a , sample_weight=__a)),
}
| 194 | 1 |
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class UpperCAmelCase_ ( datasets.BuilderConfig):
lowerCamelCase__ = None
class UpperCAmelCase_ ( datasets.ArrowBasedBuilder):
lowerCamelCase__ = PandasConfig
def snake_case__ ( self):
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features)
def snake_case__ ( self, __a):
'''simple docstring'''
if not self.config.data_files:
raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}")
_lowerCAmelCase : str = dl_manager.download_and_extract(self.config.data_files)
if isinstance(__a, (str, list, tuple)):
_lowerCAmelCase : str = data_files
if isinstance(__a, __a):
_lowerCAmelCase : int = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_lowerCAmelCase : Union[str, Any] = [dl_manager.iter_files(__a) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": files})]
_lowerCAmelCase : str = []
for split_name, files in data_files.items():
if isinstance(__a, __a):
_lowerCAmelCase : Optional[Any] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_lowerCAmelCase : str = [dl_manager.iter_files(__a) for file in files]
splits.append(datasets.SplitGenerator(name=__a, gen_kwargs={"files": files}))
return splits
def snake_case__ ( self, __a):
'''simple docstring'''
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
_lowerCAmelCase : str = table_cast(__a, self.config.features.arrow_schema)
return pa_table
def snake_case__ ( self, __a):
'''simple docstring'''
for i, file in enumerate(itertools.chain.from_iterable(__a)):
with open(__a, "rb") as f:
_lowerCAmelCase : Optional[Any] = pa.Table.from_pandas(pd.read_pickle(__a))
yield i, self._cast_table(__a)
| 300 |
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class UpperCAmelCase_ ( a , a , unittest.TestCase):
lowerCamelCase__ = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
lowerCamelCase__ = (
{
'feature-extraction': TFMobileBertModel,
'fill-mask': TFMobileBertForMaskedLM,
'question-answering': TFMobileBertForQuestionAnswering,
'text-classification': TFMobileBertForSequenceClassification,
'token-classification': TFMobileBertForTokenClassification,
'zero-shot': TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
def snake_case__ ( self, __a, __a, __a=False):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = super()._prepare_for_class(__a, __a, return_labels=__a)
if return_labels:
if model_class in get_values(__a):
_lowerCAmelCase : Tuple = tf.zeros(self.model_tester.batch_size, dtype=tf.intaa)
return inputs_dict
class UpperCAmelCase_ ( a):
def __init__( self, __a, __a=13, __a=7, __a=True, __a=True, __a=True, __a=True, __a=99, __a=32, __a=32, __a=2, __a=4, __a=37, __a="gelu", __a=0.1, __a=0.1, __a=512, __a=16, __a=2, __a=0.02, __a=3, __a=4, __a=None, ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = parent
_lowerCAmelCase : Dict = batch_size
_lowerCAmelCase : str = seq_length
_lowerCAmelCase : int = is_training
_lowerCAmelCase : List[Any] = use_input_mask
_lowerCAmelCase : Optional[Any] = use_token_type_ids
_lowerCAmelCase : Union[str, Any] = use_labels
_lowerCAmelCase : int = vocab_size
_lowerCAmelCase : int = hidden_size
_lowerCAmelCase : Optional[int] = num_hidden_layers
_lowerCAmelCase : Tuple = num_attention_heads
_lowerCAmelCase : Dict = intermediate_size
_lowerCAmelCase : Tuple = hidden_act
_lowerCAmelCase : Any = hidden_dropout_prob
_lowerCAmelCase : Any = attention_probs_dropout_prob
_lowerCAmelCase : List[Any] = max_position_embeddings
_lowerCAmelCase : Any = type_vocab_size
_lowerCAmelCase : List[Any] = type_sequence_label_size
_lowerCAmelCase : Union[str, Any] = initializer_range
_lowerCAmelCase : List[str] = num_labels
_lowerCAmelCase : List[Any] = num_choices
_lowerCAmelCase : str = scope
_lowerCAmelCase : Union[str, Any] = embedding_size
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
_lowerCAmelCase : str = None
if self.use_input_mask:
_lowerCAmelCase : List[str] = random_attention_mask([self.batch_size, self.seq_length])
_lowerCAmelCase : List[str] = None
if self.use_token_type_ids:
_lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
_lowerCAmelCase : Optional[Any] = None
_lowerCAmelCase : Optional[Any] = None
_lowerCAmelCase : Optional[int] = None
if self.use_labels:
_lowerCAmelCase : int = ids_tensor([self.batch_size], self.type_sequence_label_size)
_lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
_lowerCAmelCase : str = ids_tensor([self.batch_size], self.num_choices)
_lowerCAmelCase : Optional[Any] = MobileBertConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, embedding_size=self.embedding_size, )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : str = TFMobileBertModel(config=__a)
_lowerCAmelCase : List[str] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : Any = model(__a)
_lowerCAmelCase : Optional[Any] = [input_ids, input_mask]
_lowerCAmelCase : List[Any] = model(__a)
_lowerCAmelCase : Any = model(__a)
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : int = TFMobileBertForMaskedLM(config=__a)
_lowerCAmelCase : List[str] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : List[Any] = model(__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : str = TFMobileBertForNextSentencePrediction(config=__a)
_lowerCAmelCase : Optional[int] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : List[str] = model(__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, 2))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = TFMobileBertForPreTraining(config=__a)
_lowerCAmelCase : Any = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : Optional[Any] = model(__a)
self.parent.assertEqual(
result.prediction_logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertEqual(result.seq_relationship_logits.shape, (self.batch_size, 2))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Dict = self.num_labels
_lowerCAmelCase : Optional[Any] = TFMobileBertForSequenceClassification(config=__a)
_lowerCAmelCase : List[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : Optional[Any] = model(__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.num_choices
_lowerCAmelCase : List[Any] = TFMobileBertForMultipleChoice(config=__a)
_lowerCAmelCase : Dict = tf.tile(tf.expand_dims(__a, 1), (1, self.num_choices, 1))
_lowerCAmelCase : List[str] = tf.tile(tf.expand_dims(__a, 1), (1, self.num_choices, 1))
_lowerCAmelCase : Optional[int] = tf.tile(tf.expand_dims(__a, 1), (1, self.num_choices, 1))
_lowerCAmelCase : Optional[Any] = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
_lowerCAmelCase : List[str] = model(__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.num_labels
_lowerCAmelCase : Union[str, Any] = TFMobileBertForTokenClassification(config=__a)
_lowerCAmelCase : Optional[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : Union[str, Any] = model(__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : int = TFMobileBertForQuestionAnswering(config=__a)
_lowerCAmelCase : Union[str, Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : Union[str, Any] = model(__a)
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) : Union[str, Any] = config_and_inputs
_lowerCAmelCase : List[str] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = TFMobileBertModelTest.TFMobileBertModelTester(self)
_lowerCAmelCase : List[Any] = ConfigTester(self, config_class=__a, hidden_size=37)
def snake_case__ ( self):
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*__a)
@slow
def snake_case__ ( self):
'''simple docstring'''
for model_name in ["google/mobilebert-uncased"]:
_lowerCAmelCase : List[Any] = TFMobileBertModel.from_pretrained(__a)
self.assertIsNotNone(__a)
@require_tf
class UpperCAmelCase_ ( unittest.TestCase):
@slow
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = TFMobileBertForPreTraining.from_pretrained("google/mobilebert-uncased")
_lowerCAmelCase : Any = tf.constant([[0, 1, 2, 3, 4, 5]])
_lowerCAmelCase : Tuple = model(__a)[0]
_lowerCAmelCase : Union[str, Any] = [1, 6, 3_0522]
self.assertEqual(output.shape, __a)
_lowerCAmelCase : Tuple = tf.constant(
[
[
[-4.5_919_547, -9.248_295, -9.645_256],
[-6.7_306_175, -6.440_284, -6.6_052_837],
[-7.2_743_506, -6.7_847_915, -6.024_673],
]
])
tf.debugging.assert_near(output[:, :3, :3], __a, atol=1E-4)
| 300 | 1 |
'''simple docstring'''
from __future__ import annotations
import csv
import requests
from bsa import BeautifulSoup
def _SCREAMING_SNAKE_CASE (A = "" ) -> dict[str, float]:
"""simple docstring"""
lowercase__ = url or '''https://www.imdb.com/chart/top/?ref_=nv_mv_250'''
lowercase__ = BeautifulSoup(requests.get(A ).text , '''html.parser''' )
lowercase__ = soup.find_all('''td''' , attrs='''titleColumn''' )
lowercase__ = soup.find_all('''td''' , class_='''ratingColumn imdbRating''' )
return {
title.a.text: float(rating.strong.text )
for title, rating in zip(A , A )
}
def _SCREAMING_SNAKE_CASE (A = "IMDb_Top_250_Movies.csv" ) -> None:
"""simple docstring"""
lowercase__ = get_imdb_top_aaa_movies()
with open(A , '''w''' , newline='''''' ) as out_file:
lowercase__ = csv.writer(A )
writer.writerow(['''Movie title''', '''IMDb rating'''] )
for title, rating in movies.items():
writer.writerow([title, rating] )
if __name__ == "__main__":
write_movies()
| 2 |
'''simple docstring'''
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class __lowerCAmelCase :
'''simple docstring'''
def __init__(self : str , UpperCamelCase : Tuple , UpperCamelCase : Optional[int]=99 , UpperCamelCase : Optional[int]=13 , UpperCamelCase : Tuple=16 , UpperCamelCase : Union[str, Any]=7 , UpperCamelCase : List[Any]=True , UpperCamelCase : List[str]=True , UpperCamelCase : str=True , UpperCamelCase : Tuple=False , UpperCamelCase : str=True , UpperCamelCase : Tuple=2 , UpperCamelCase : Optional[int]=32 , UpperCamelCase : Any=4 , UpperCamelCase : Optional[int]=4 , UpperCamelCase : Tuple=30 , UpperCamelCase : str=0 , UpperCamelCase : Tuple=1 , UpperCamelCase : List[Any]=2 , UpperCamelCase : str=None , ):
'''simple docstring'''
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = decoder_seq_length
# For common tests
lowercase__ = self.decoder_seq_length
lowercase__ = is_training
lowercase__ = use_attention_mask
lowercase__ = use_labels
lowercase__ = vocab_size
lowercase__ = d_model
lowercase__ = d_model
lowercase__ = decoder_layers
lowercase__ = decoder_layers
lowercase__ = decoder_ffn_dim
lowercase__ = decoder_attention_heads
lowercase__ = decoder_attention_heads
lowercase__ = eos_token_id
lowercase__ = bos_token_id
lowercase__ = pad_token_id
lowercase__ = decoder_start_token_id
lowercase__ = use_cache
lowercase__ = max_position_embeddings
lowercase__ = None
lowercase__ = decoder_seq_length
lowercase__ = 2
lowercase__ = 1
def UpperCamelCase__ (self : str ):
'''simple docstring'''
lowercase__ = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
lowercase__ = None
if self.use_attention_mask:
lowercase__ = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
lowercase__ = TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def UpperCamelCase__ (self : Tuple , UpperCamelCase : List[Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : Tuple , UpperCamelCase : List[str] , ):
'''simple docstring'''
lowercase__ = True
lowercase__ = TrOCRDecoder(config=UpperCamelCase ).to(UpperCamelCase ).eval()
lowercase__ = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
lowercase__ = model(UpperCamelCase , use_cache=UpperCamelCase )
lowercase__ = model(UpperCamelCase )
lowercase__ = model(UpperCamelCase , use_cache=UpperCamelCase )
self.parent.assertTrue(len(UpperCamelCase ) == len(UpperCamelCase ) )
self.parent.assertTrue(len(UpperCamelCase ) == len(UpperCamelCase ) + 1 )
lowercase__ = outputs['''past_key_values''']
# create hypothetical next token and extent to next_input_ids
lowercase__ = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
lowercase__ = torch.cat([input_ids, next_tokens] , dim=-1 )
lowercase__ = model(UpperCamelCase )['''last_hidden_state''']
lowercase__ = model(UpperCamelCase , past_key_values=UpperCamelCase )['''last_hidden_state''']
# select random slice
lowercase__ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowercase__ = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
lowercase__ = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(UpperCamelCase , UpperCamelCase , atol=1E-3 )
def UpperCamelCase__ (self : Optional[Any] ):
'''simple docstring'''
lowercase__ = self.prepare_config_and_inputs()
lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ = config_and_inputs
lowercase__ = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_torch
class __lowerCAmelCase (lowercase_ , lowercase_ , lowercase_ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
lowerCAmelCase__ : List[Any] = (TrOCRForCausalLM,) if is_torch_available() else ()
lowerCAmelCase__ : Optional[Any] = {"""text-generation""": TrOCRForCausalLM} if is_torch_available() else {}
lowerCAmelCase__ : Optional[Any] = True
lowerCAmelCase__ : List[str] = False
def UpperCamelCase__ (self : Any ):
'''simple docstring'''
lowercase__ = TrOCRStandaloneDecoderModelTester(self , is_training=UpperCamelCase )
lowercase__ = ConfigTester(self , config_class=UpperCamelCase )
def UpperCamelCase__ (self : List[str] ):
'''simple docstring'''
pass
def UpperCamelCase__ (self : Optional[int] ):
'''simple docstring'''
pass
def UpperCamelCase__ (self : Any ):
'''simple docstring'''
pass
def UpperCamelCase__ (self : Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase__ (self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*UpperCamelCase )
def UpperCamelCase__ (self : Optional[int] ):
'''simple docstring'''
return
@unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :)
def UpperCamelCase__ (self : List[str] ):
'''simple docstring'''
pass
| 2 | 1 |
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
lowercase_ = logging.get_logger(__name__)
# General docstring
lowercase_ = '''ResNetConfig'''
# Base docstring
lowercase_ = '''microsoft/resnet-50'''
lowercase_ = [1, 2_048, 7, 7]
# Image classification docstring
lowercase_ = '''microsoft/resnet-50'''
lowercase_ = '''tiger cat'''
lowercase_ = [
'''microsoft/resnet-50''',
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class SCREAMING_SNAKE_CASE (nn.Module ):
def __init__( self : Union[str, Any] , a : List[Any] , a : int , a : List[Any] = 3 , a : Union[str, Any] = 1 , a : str = "relu" )-> str:
"""simple docstring"""
super().__init__()
lowercase__ = nn.Convad(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , kernel_size=_SCREAMING_SNAKE_CASE , stride=_SCREAMING_SNAKE_CASE , padding=kernel_size // 2 , bias=_SCREAMING_SNAKE_CASE )
lowercase__ = nn.BatchNormad(_SCREAMING_SNAKE_CASE )
lowercase__ = ACTaFN[activation] if activation is not None else nn.Identity()
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , a : List[str] )-> Tensor:
"""simple docstring"""
lowercase__ = self.convolution(_SCREAMING_SNAKE_CASE )
lowercase__ = self.normalization(_SCREAMING_SNAKE_CASE )
lowercase__ = self.activation(_SCREAMING_SNAKE_CASE )
return hidden_state
class SCREAMING_SNAKE_CASE (nn.Module ):
def __init__( self : List[Any] , a : int )-> Optional[Any]:
"""simple docstring"""
super().__init__()
lowercase__ = ResNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act )
lowercase__ = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 )
lowercase__ = config.num_channels
def SCREAMING_SNAKE_CASE_ ( self : Any , a : Union[str, Any] )-> Tensor:
"""simple docstring"""
lowercase__ = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
lowercase__ = self.embedder(_SCREAMING_SNAKE_CASE )
lowercase__ = self.pooler(_SCREAMING_SNAKE_CASE )
return embedding
class SCREAMING_SNAKE_CASE (nn.Module ):
def __init__( self : Optional[Any] , a : Optional[int] , a : Tuple , a : int = 2 )-> List[Any]:
"""simple docstring"""
super().__init__()
lowercase__ = nn.Convad(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , kernel_size=1 , stride=_SCREAMING_SNAKE_CASE , bias=_SCREAMING_SNAKE_CASE )
lowercase__ = nn.BatchNormad(_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , a : int )-> Tensor:
"""simple docstring"""
lowercase__ = self.convolution(_SCREAMING_SNAKE_CASE )
lowercase__ = self.normalization(_SCREAMING_SNAKE_CASE )
return hidden_state
class SCREAMING_SNAKE_CASE (nn.Module ):
def __init__( self : Optional[int] , a : List[Any] , a : Optional[Any] , a : List[str] = 1 , a : str = "relu" )-> Tuple:
"""simple docstring"""
super().__init__()
lowercase__ = in_channels != out_channels or stride != 1
lowercase__ = (
ResNetShortCut(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , stride=_SCREAMING_SNAKE_CASE ) if should_apply_shortcut else nn.Identity()
)
lowercase__ = nn.Sequential(
ResNetConvLayer(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , stride=_SCREAMING_SNAKE_CASE ) , ResNetConvLayer(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , activation=_SCREAMING_SNAKE_CASE ) , )
lowercase__ = ACTaFN[activation]
def SCREAMING_SNAKE_CASE_ ( self : Dict , a : Any )-> int:
"""simple docstring"""
lowercase__ = hidden_state
lowercase__ = self.layer(_SCREAMING_SNAKE_CASE )
lowercase__ = self.shortcut(_SCREAMING_SNAKE_CASE )
hidden_state += residual
lowercase__ = self.activation(_SCREAMING_SNAKE_CASE )
return hidden_state
class SCREAMING_SNAKE_CASE (nn.Module ):
def __init__( self : Optional[int] , a : Dict , a : Union[str, Any] , a : Optional[Any] = 1 , a : Dict = "relu" , a : Optional[Any] = 4 )-> Dict:
"""simple docstring"""
super().__init__()
lowercase__ = in_channels != out_channels or stride != 1
lowercase__ = out_channels // reduction
lowercase__ = (
ResNetShortCut(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , stride=_SCREAMING_SNAKE_CASE ) if should_apply_shortcut else nn.Identity()
)
lowercase__ = nn.Sequential(
ResNetConvLayer(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , kernel_size=1 ) , ResNetConvLayer(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , stride=_SCREAMING_SNAKE_CASE ) , ResNetConvLayer(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , kernel_size=1 , activation=_SCREAMING_SNAKE_CASE ) , )
lowercase__ = ACTaFN[activation]
def SCREAMING_SNAKE_CASE_ ( self : Dict , a : Optional[int] )-> int:
"""simple docstring"""
lowercase__ = hidden_state
lowercase__ = self.layer(_SCREAMING_SNAKE_CASE )
lowercase__ = self.shortcut(_SCREAMING_SNAKE_CASE )
hidden_state += residual
lowercase__ = self.activation(_SCREAMING_SNAKE_CASE )
return hidden_state
class SCREAMING_SNAKE_CASE (nn.Module ):
def __init__( self : Any , a : int , a : Optional[int] , a : List[Any] , a : Dict = 2 , a : Tuple = 2 , )-> Tuple:
"""simple docstring"""
super().__init__()
lowercase__ = ResNetBottleNeckLayer if config.layer_type == "bottleneck" else ResNetBasicLayer
lowercase__ = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , stride=_SCREAMING_SNAKE_CASE , activation=config.hidden_act ) , *[layer(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , activation=config.hidden_act ) for _ in range(depth - 1 )] , )
def SCREAMING_SNAKE_CASE_ ( self : Any , a : Any )-> Tensor:
"""simple docstring"""
lowercase__ = input
for layer in self.layers:
lowercase__ = layer(_SCREAMING_SNAKE_CASE )
return hidden_state
class SCREAMING_SNAKE_CASE (nn.Module ):
def __init__( self : str , a : List[Any] )-> Dict:
"""simple docstring"""
super().__init__()
lowercase__ = nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
_SCREAMING_SNAKE_CASE , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
lowercase__ = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(_SCREAMING_SNAKE_CASE , config.depths[1:] ):
self.stages.append(ResNetStage(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , depth=_SCREAMING_SNAKE_CASE ) )
def SCREAMING_SNAKE_CASE_ ( self : Dict , a : Tuple , a : int = False , a : Tuple = True )-> BaseModelOutputWithNoAttention:
"""simple docstring"""
lowercase__ = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
lowercase__ = hidden_states + (hidden_state,)
lowercase__ = stage_module(_SCREAMING_SNAKE_CASE )
if output_hidden_states:
lowercase__ = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=_SCREAMING_SNAKE_CASE , hidden_states=_SCREAMING_SNAKE_CASE , )
class SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
_UpperCamelCase : Tuple = ResNetConfig
_UpperCamelCase : List[Any] = 'resnet'
_UpperCamelCase : Dict = 'pixel_values'
_UpperCamelCase : Optional[int] = True
def SCREAMING_SNAKE_CASE_ ( self : Tuple , a : Optional[int] )-> Optional[int]:
"""simple docstring"""
if isinstance(_SCREAMING_SNAKE_CASE , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='fan_out' , nonlinearity='relu' )
elif isinstance(_SCREAMING_SNAKE_CASE , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , a : Any , a : Tuple=False )-> Optional[int]:
"""simple docstring"""
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
lowercase__ = value
lowercase_ = r'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
lowercase_ = r'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
'The bare ResNet model outputting raw features without any specific head on top.' , SCREAMING_SNAKE_CASE__ , )
class SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
def __init__( self : List[str] , a : Optional[Any] )-> Optional[Any]:
"""simple docstring"""
super().__init__(_SCREAMING_SNAKE_CASE )
lowercase__ = config
lowercase__ = ResNetEmbeddings(_SCREAMING_SNAKE_CASE )
lowercase__ = ResNetEncoder(_SCREAMING_SNAKE_CASE )
lowercase__ = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_SCREAMING_SNAKE_CASE )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=_SCREAMING_SNAKE_CASE , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , a : Tuple , a : Any = None , a : List[str] = None )-> BaseModelOutputWithPoolingAndNoAttention:
"""simple docstring"""
lowercase__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase__ = return_dict if return_dict is not None else self.config.use_return_dict
lowercase__ = self.embedder(_SCREAMING_SNAKE_CASE )
lowercase__ = self.encoder(
_SCREAMING_SNAKE_CASE , output_hidden_states=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE )
lowercase__ = encoder_outputs[0]
lowercase__ = self.pooler(_SCREAMING_SNAKE_CASE )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=_SCREAMING_SNAKE_CASE , pooler_output=_SCREAMING_SNAKE_CASE , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
'\n ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , SCREAMING_SNAKE_CASE__ , )
class SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
def __init__( self : Union[str, Any] , a : List[str] )-> Dict:
"""simple docstring"""
super().__init__(_SCREAMING_SNAKE_CASE )
lowercase__ = config.num_labels
lowercase__ = ResNetModel(_SCREAMING_SNAKE_CASE )
# classification head
lowercase__ = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_SCREAMING_SNAKE_CASE )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_SCREAMING_SNAKE_CASE , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def SCREAMING_SNAKE_CASE_ ( self : Any , a : Union[str, Any] = None , a : int = None , a : Dict = None , a : List[str] = None , )-> ImageClassifierOutputWithNoAttention:
"""simple docstring"""
lowercase__ = return_dict if return_dict is not None else self.config.use_return_dict
lowercase__ = self.resnet(_SCREAMING_SNAKE_CASE , output_hidden_states=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE )
lowercase__ = outputs.pooler_output if return_dict else outputs[1]
lowercase__ = self.classifier(_SCREAMING_SNAKE_CASE )
lowercase__ = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
lowercase__ = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
lowercase__ = "single_label_classification"
else:
lowercase__ = "multi_label_classification"
if self.config.problem_type == "regression":
lowercase__ = MSELoss()
if self.num_labels == 1:
lowercase__ = loss_fct(logits.squeeze() , labels.squeeze() )
else:
lowercase__ = loss_fct(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif self.config.problem_type == "single_label_classification":
lowercase__ = CrossEntropyLoss()
lowercase__ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
lowercase__ = BCEWithLogitsLoss()
lowercase__ = loss_fct(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if not return_dict:
lowercase__ = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=_SCREAMING_SNAKE_CASE , logits=_SCREAMING_SNAKE_CASE , hidden_states=outputs.hidden_states )
@add_start_docstrings(
'\n ResNet backbone, to be used with frameworks like DETR and MaskFormer.\n ' , SCREAMING_SNAKE_CASE__ , )
class SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
def __init__( self : Dict , a : List[str] )-> int:
"""simple docstring"""
super().__init__(_SCREAMING_SNAKE_CASE )
super()._init_backbone(_SCREAMING_SNAKE_CASE )
lowercase__ = [config.embedding_size] + config.hidden_sizes
lowercase__ = ResNetEmbeddings(_SCREAMING_SNAKE_CASE )
lowercase__ = ResNetEncoder(_SCREAMING_SNAKE_CASE )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_SCREAMING_SNAKE_CASE )
@replace_return_docstrings(output_type=_SCREAMING_SNAKE_CASE , config_class=_CONFIG_FOR_DOC )
def SCREAMING_SNAKE_CASE_ ( self : Any , a : Optional[Any] , a : Any = None , a : List[str] = None )-> BackboneOutput:
"""simple docstring"""
lowercase__ = return_dict if return_dict is not None else self.config.use_return_dict
lowercase__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase__ = self.embedder(_SCREAMING_SNAKE_CASE )
lowercase__ = self.encoder(_SCREAMING_SNAKE_CASE , output_hidden_states=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE )
lowercase__ = outputs.hidden_states
lowercase__ = ()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
lowercase__ = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=_SCREAMING_SNAKE_CASE , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=_SCREAMING_SNAKE_CASE , )
| 364 |
import sys
lowercase_ = (
"""73167176531330624919225119674426574742355349194934"""
"""96983520312774506326239578318016984801869478851843"""
"""85861560789112949495459501737958331952853208805511"""
"""12540698747158523863050715693290963295227443043557"""
"""66896648950445244523161731856403098711121722383113"""
"""62229893423380308135336276614282806444486645238749"""
"""30358907296290491560440772390713810515859307960866"""
"""70172427121883998797908792274921901699720888093776"""
"""65727333001053367881220235421809751254540594752243"""
"""52584907711670556013604839586446706324415722155397"""
"""53697817977846174064955149290862569321978468622482"""
"""83972241375657056057490261407972968652414535100474"""
"""82166370484403199890008895243450658541227588666881"""
"""16427171479924442928230863465674813919123162824586"""
"""17866458359124566529476545682848912883142607690042"""
"""24219022671055626321111109370544217506941658960408"""
"""07198403850962455444362981230987879927244284909188"""
"""84580156166097919133875499200524063689912560717606"""
"""05886116467109405077541002256983155200055935729725"""
"""71636269561882670428252483600823257530420752963450"""
)
def __UpperCamelCase (_SCREAMING_SNAKE_CASE = N ) -> int:
lowercase__ = -sys.maxsize - 1
for i in range(len(_SCREAMING_SNAKE_CASE ) - 12 ):
lowercase__ = 1
for j in range(13 ):
product *= int(n[i + j] )
if product > largest_product:
lowercase__ = product
return largest_product
if __name__ == "__main__":
print(f'''{solution() = }''')
| 269 | 0 |
'''simple docstring'''
import argparse
import torch
from torch import nn
from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
A : int = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(a__ , a__ )
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
A : int = list(s_dict.keys() )
for key in keys:
if "transformer_layers" in key:
A : Optional[Any] = s_dict.pop(a__ )
elif "subsample" in key:
A : Optional[Any] = s_dict.pop(a__ )
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
A, A : List[Any] = emb.weight.shape
A : str = nn.Linear(a__ , a__ , bias=a__ )
A : Union[str, Any] = emb.weight.data
return lin_layer
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
A : Tuple = torch.load(a__ , map_location='''cpu''' )
A : Dict = mam_aaa['''args''']
A : List[str] = mam_aaa['''model''']
A : Optional[int] = state_dict['''decoder.output_projection.weight''']
remove_ignore_keys_(a__ )
rename_keys(a__ )
A : Any = state_dict['''decoder.embed_tokens.weight'''].shape[0]
A : str = args.share_decoder_input_output_embed
A : List[str] = [int(a__ ) for i in args.conv_kernel_sizes.split(''',''' )]
A : Union[str, Any] = SpeechaTextConfig(
vocab_size=a__ , max_source_positions=args.max_source_positions , max_target_positions=args.max_target_positions , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='''relu''' , num_conv_layers=len(a__ ) , conv_channels=args.conv_channels , conv_kernel_sizes=a__ , input_feat_per_channel=args.input_feat_per_channel , input_channels=args.input_channels , tie_word_embeddings=a__ , num_beams=5 , max_length=200 , use_cache=a__ , decoder_start_token_id=2 , early_stopping=a__ , )
A : Optional[int] = SpeechaTextForConditionalGeneration(a__ )
A, A : Tuple = model.model.load_state_dict(a__ , strict=a__ )
if len(a__ ) > 0 and not set(a__ ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'''Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'''
F' but all the following weights are missing {missing}' )
if tie_embeds:
A : Tuple = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
A : int = lm_head_weights
model.save_pretrained(a__ )
if __name__ == "__main__":
lowercase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--fairseq_path', type=str, help='Path to the fairseq model (.pt) file.')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
lowercase : List[str] = parser.parse_args()
convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
| 3 |
'''simple docstring'''
import argparse
import os
from . import (
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
AlbertConfig,
BartConfig,
BertConfig,
CamembertConfig,
CTRLConfig,
DistilBertConfig,
DPRConfig,
ElectraConfig,
FlaubertConfig,
GPTaConfig,
LayoutLMConfig,
LxmertConfig,
OpenAIGPTConfig,
RobertaConfig,
TaConfig,
TFAlbertForPreTraining,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFCamembertForMaskedLM,
TFCTRLLMHeadModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
TFElectraForPreTraining,
TFFlaubertWithLMHeadModel,
TFGPTaLMHeadModel,
TFLayoutLMForMaskedLM,
TFLxmertForPreTraining,
TFLxmertVisualFeatureEncoder,
TFOpenAIGPTLMHeadModel,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForSequenceClassification,
TFTaForConditionalGeneration,
TFTransfoXLLMHeadModel,
TFWavaVecaModel,
TFXLMRobertaForMaskedLM,
TFXLMWithLMHeadModel,
TFXLNetLMHeadModel,
TransfoXLConfig,
WavaVecaConfig,
WavaVecaModel,
XLMConfig,
XLMRobertaConfig,
XLNetConfig,
is_torch_available,
load_pytorch_checkpoint_in_tfa_model,
)
from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging
if is_torch_available():
import numpy as np
import torch
from . import (
AlbertForPreTraining,
BartForConditionalGeneration,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
CamembertForMaskedLM,
CTRLLMHeadModel,
DistilBertForMaskedLM,
DistilBertForQuestionAnswering,
DPRContextEncoder,
DPRQuestionEncoder,
DPRReader,
ElectraForPreTraining,
FlaubertWithLMHeadModel,
GPTaLMHeadModel,
LayoutLMForMaskedLM,
LxmertForPreTraining,
LxmertVisualFeatureEncoder,
OpenAIGPTLMHeadModel,
RobertaForMaskedLM,
RobertaForSequenceClassification,
TaForConditionalGeneration,
TransfoXLLMHeadModel,
XLMRobertaForMaskedLM,
XLMWithLMHeadModel,
XLNetLMHeadModel,
)
logging.set_verbosity_info()
UpperCAmelCase : Tuple = {
'bart': (
BartConfig,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
BartForConditionalGeneration,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'bert': (
BertConfig,
TFBertForPreTraining,
BertForPreTraining,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'bert-base-cased-finetuned-mrpc': (
BertConfig,
TFBertForSequenceClassification,
BertForSequenceClassification,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'dpr': (
DPRConfig,
TFDPRQuestionEncoder,
TFDPRContextEncoder,
TFDPRReader,
DPRQuestionEncoder,
DPRContextEncoder,
DPRReader,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'gpt2': (
GPTaConfig,
TFGPTaLMHeadModel,
GPTaLMHeadModel,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'xlnet': (
XLNetConfig,
TFXLNetLMHeadModel,
XLNetLMHeadModel,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'xlm': (
XLMConfig,
TFXLMWithLMHeadModel,
XLMWithLMHeadModel,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'xlm-roberta': (
XLMRobertaConfig,
TFXLMRobertaForMaskedLM,
XLMRobertaForMaskedLM,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'transfo-xl': (
TransfoXLConfig,
TFTransfoXLLMHeadModel,
TransfoXLLMHeadModel,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'openai-gpt': (
OpenAIGPTConfig,
TFOpenAIGPTLMHeadModel,
OpenAIGPTLMHeadModel,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'roberta': (
RobertaConfig,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
RobertaForMaskedLM,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'layoutlm': (
LayoutLMConfig,
TFLayoutLMForMaskedLM,
LayoutLMForMaskedLM,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'roberta-large-mnli': (
RobertaConfig,
TFRobertaForSequenceClassification,
RobertaForSequenceClassification,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'camembert': (
CamembertConfig,
TFCamembertForMaskedLM,
CamembertForMaskedLM,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'flaubert': (
FlaubertConfig,
TFFlaubertWithLMHeadModel,
FlaubertWithLMHeadModel,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'distilbert': (
DistilBertConfig,
TFDistilBertForMaskedLM,
DistilBertForMaskedLM,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'distilbert-base-distilled-squad': (
DistilBertConfig,
TFDistilBertForQuestionAnswering,
DistilBertForQuestionAnswering,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'lxmert': (
LxmertConfig,
TFLxmertForPreTraining,
LxmertForPreTraining,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'lxmert-visual-feature-encoder': (
LxmertConfig,
TFLxmertVisualFeatureEncoder,
LxmertVisualFeatureEncoder,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'ctrl': (
CTRLConfig,
TFCTRLLMHeadModel,
CTRLLMHeadModel,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'albert': (
AlbertConfig,
TFAlbertForPreTraining,
AlbertForPreTraining,
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
't5': (
TaConfig,
TFTaForConditionalGeneration,
TaForConditionalGeneration,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'electra': (
ElectraConfig,
TFElectraForPreTraining,
ElectraForPreTraining,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'wav2vec2': (
WavaVecaConfig,
TFWavaVecaModel,
WavaVecaModel,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
}
def a__ ( a__ , a__ , a__ , a__ , a__=False , a__=True ):
"""simple docstring"""
if model_type not in MODEL_CLASSES:
raise ValueError(F'Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}.' )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = MODEL_CLASSES[model_type]
# Initialise TF model
if config_file in aws_config_map:
__SCREAMING_SNAKE_CASE = cached_file(a__ , a__ , force_download=not use_cached_models )
__SCREAMING_SNAKE_CASE = config_class.from_json_file(a__ )
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = True
print(F'Building TensorFlow model from configuration: {config}' )
__SCREAMING_SNAKE_CASE = model_class(a__ )
# Load weights from tf checkpoint
if pytorch_checkpoint_path in aws_config_map.keys():
__SCREAMING_SNAKE_CASE = cached_file(
a__ , a__ , force_download=not use_cached_models )
# Load PyTorch checkpoint in tf2 model:
__SCREAMING_SNAKE_CASE = load_pytorch_checkpoint_in_tfa_model(a__ , a__ )
if compare_with_pt_model:
__SCREAMING_SNAKE_CASE = tf_model(tf_model.dummy_inputs , training=a__ ) # build the network
__SCREAMING_SNAKE_CASE = torch.load(a__ , map_location="""cpu""" )
__SCREAMING_SNAKE_CASE = pt_model_class.from_pretrained(
pretrained_model_name_or_path=a__ , config=a__ , state_dict=a__ )
with torch.no_grad():
__SCREAMING_SNAKE_CASE = pt_model(**pt_model.dummy_inputs )
__SCREAMING_SNAKE_CASE = pto[0].numpy()
__SCREAMING_SNAKE_CASE = tfo[0].numpy()
__SCREAMING_SNAKE_CASE = np.amax(np.abs(np_pt - np_tf ) )
print(F'Max absolute difference between models outputs {diff}' )
assert diff <= 2E-2, F'Error, model absolute difference is >2e-2: {diff}'
# Save pytorch-model
print(F'Save TensorFlow model to {tf_dump_path}' )
tf_model.save_weights(a__ , save_format="""h5""" )
def a__ ( a__ , a__ , a__=None , a__=None , a__=False , a__=False , a__=False , a__=False , ):
"""simple docstring"""
if args_model_type is None:
__SCREAMING_SNAKE_CASE = list(MODEL_CLASSES.keys() )
else:
__SCREAMING_SNAKE_CASE = [args_model_type]
for j, model_type in enumerate(a__ , start=1 ):
print("""=""" * 1_00 )
print(F' Converting model type {j}/{len(a__ )}: {model_type}' )
print("""=""" * 1_00 )
if model_type not in MODEL_CLASSES:
raise ValueError(F'Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}.' )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = MODEL_CLASSES[model_type]
if model_shortcut_names_or_path is None:
__SCREAMING_SNAKE_CASE = list(aws_model_maps.keys() )
if config_shortcut_names_or_path is None:
__SCREAMING_SNAKE_CASE = model_shortcut_names_or_path
for i, (model_shortcut_name, config_shortcut_name) in enumerate(
zip(a__ , a__ ) , start=1 ):
print("""-""" * 1_00 )
if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name:
if not only_convert_finetuned_models:
print(F' Skipping finetuned checkpoint {model_shortcut_name}' )
continue
__SCREAMING_SNAKE_CASE = model_shortcut_name
elif only_convert_finetuned_models:
print(F' Skipping not finetuned checkpoint {model_shortcut_name}' )
continue
print(
F' Converting checkpoint {i}/{len(a__ )}: {model_shortcut_name} - model_type {model_type}' )
print("""-""" * 1_00 )
if config_shortcut_name in aws_config_map:
__SCREAMING_SNAKE_CASE = cached_file(a__ , a__ , force_download=not use_cached_models )
else:
__SCREAMING_SNAKE_CASE = config_shortcut_name
if model_shortcut_name in aws_model_maps:
__SCREAMING_SNAKE_CASE = cached_file(a__ , a__ , force_download=not use_cached_models )
else:
__SCREAMING_SNAKE_CASE = model_shortcut_name
if os.path.isfile(a__ ):
__SCREAMING_SNAKE_CASE = """converted_model"""
convert_pt_checkpoint_to_tf(
model_type=a__ , pytorch_checkpoint_path=a__ , config_file=a__ , tf_dump_path=os.path.join(a__ , model_shortcut_name + """-tf_model.h5""" ) , compare_with_pt_model=a__ , )
if remove_cached_files:
os.remove(a__ )
os.remove(a__ )
if __name__ == "__main__":
UpperCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_dump_path', default=None, type=str, required=True, help='Path to the output Tensorflow dump file.'
)
parser.add_argument(
'--model_type',
default=None,
type=str,
help=(
f"""Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and """
'convert all the models from AWS.'
),
)
parser.add_argument(
'--pytorch_checkpoint_path',
default=None,
type=str,
help=(
'Path to the PyTorch checkpoint path or shortcut name to download from AWS. '
'If not given, will download and convert all the checkpoints from AWS.'
),
)
parser.add_argument(
'--config_file',
default=None,
type=str,
help=(
'The config json file corresponding to the pre-trained model. \n'
'This specifies the model architecture. If not given and '
'--pytorch_checkpoint_path is not given or is a shortcut name '
'use the configuration associated to the shortcut name on the AWS'
),
)
parser.add_argument(
'--compare_with_pt_model', action='store_true', help='Compare Tensorflow and PyTorch model predictions.'
)
parser.add_argument(
'--use_cached_models',
action='store_true',
help='Use cached models if possible instead of updating to latest checkpoint versions.',
)
parser.add_argument(
'--remove_cached_files',
action='store_true',
help='Remove pytorch models after conversion (save memory when converting in batches).',
)
parser.add_argument('--only_convert_finetuned_models', action='store_true', help='Only convert finetuned models.')
UpperCAmelCase : List[Any] = parser.parse_args()
# if args.pytorch_checkpoint_path is not None:
# convert_pt_checkpoint_to_tf(args.model_type.lower(),
# args.pytorch_checkpoint_path,
# args.config_file if args.config_file is not None else args.pytorch_checkpoint_path,
# args.tf_dump_path,
# compare_with_pt_model=args.compare_with_pt_model,
# use_cached_models=args.use_cached_models)
# else:
convert_all_pt_checkpoints_to_tf(
args.model_type.lower() if args.model_type is not None else None,
args.tf_dump_path,
model_shortcut_names_or_path=[args.pytorch_checkpoint_path]
if args.pytorch_checkpoint_path is not None
else None,
config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None,
compare_with_pt_model=args.compare_with_pt_model,
use_cached_models=args.use_cached_models,
remove_cached_files=args.remove_cached_files,
only_convert_finetuned_models=args.only_convert_finetuned_models,
)
| 267 | 0 |
a__ : Dict = [
'''Audio''',
'''Array2D''',
'''Array3D''',
'''Array4D''',
'''Array5D''',
'''ClassLabel''',
'''Features''',
'''Sequence''',
'''Value''',
'''Image''',
'''Translation''',
'''TranslationVariableLanguages''',
]
from .audio import Audio
from .features import ArrayaD, ArrayaD, ArrayaD, ArrayaD, ClassLabel, Features, Sequence, Value
from .image import Image
from .translation import Translation, TranslationVariableLanguages
| 354 |
from sklearn.metrics import matthews_corrcoef
import datasets
a__ : Optional[Any] = '''
Compute the Matthews correlation coefficient (MCC)
The Matthews correlation coefficient is used in machine learning as a
measure of the quality of binary and multiclass classifications. It takes
into account true and false positives and negatives and is generally
regarded as a balanced measure which can be used even if the classes are of
very different sizes. The MCC is in essence a correlation coefficient value
between -1 and +1. A coefficient of +1 represents a perfect prediction, 0
an average random prediction and -1 an inverse prediction. The statistic
is also known as the phi coefficient. [source: Wikipedia]
'''
a__ : str = '''
Args:
predictions (list of int): Predicted labels, as returned by a model.
references (list of int): Ground truth labels.
sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.
Returns:
matthews_correlation (dict containing float): Matthews correlation.
Examples:
Example 1, a basic example with only predictions and references as inputs:
>>> matthews_metric = datasets.load_metric("matthews_correlation")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3])
>>> print(round(results[\'matthews_correlation\'], 2))
0.54
Example 2, the same example as above, but also including sample weights:
>>> matthews_metric = datasets.load_metric("matthews_correlation")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 3, 1, 1, 1, 2])
>>> print(round(results[\'matthews_correlation\'], 2))
0.1
Example 3, the same example as above, but with sample weights that cause a negative correlation:
>>> matthews_metric = datasets.load_metric("matthews_correlation")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 1, 0, 0, 0, 1])
>>> print(round(results[\'matthews_correlation\'], 2))
-0.25
'''
a__ : Union[str, Any] = '''\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a_ ( datasets.Metric ):
"""simple docstring"""
def __lowerCAmelCase ( self ) ->Union[str, Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=[
'''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html'''
] , )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None ) ->List[str]:
return {
"matthews_correlation": float(matthews_corrcoef(_lowerCamelCase , _lowerCamelCase , sample_weight=_lowerCamelCase ) ),
}
| 19 | 0 |
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class _A( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self ):
__A : int = 'hf-internal-testing/tiny-random-t5'
__A : List[str] = AutoTokenizer.from_pretrained(_A )
__A : Dict = AutoModelForSeqaSeqLM.from_pretrained(_A )
__A : str = tokenizer('This is me' , return_tensors='pt' )
__A : str = model.to_bettertransformer()
self.assertTrue(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
__A : Any = model.generate(**_A )
__A : Any = model.reverse_bettertransformer()
self.assertFalse(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_A )
__A : Dict = AutoModelForSeqaSeqLM.from_pretrained(_A )
self.assertFalse(
any('BetterTransformer' in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
__A : Optional[Any] = model_reloaded.generate(**_A )
self.assertTrue(torch.allclose(_A , _A ) )
def UpperCAmelCase_ ( self ):
__A : List[Any] = 'hf-internal-testing/tiny-random-t5'
__A : List[Any] = AutoModelForSeqaSeqLM.from_pretrained(_A )
__A : Tuple = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(_A ):
model.save_pretrained(_A )
__A : List[Any] = model.reverse_bettertransformer()
model.save_pretrained(_A )
| 280 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class _A( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self ):
__A : List[Any] = tempfile.mkdtemp()
# fmt: off
__A : List[str] = ['', 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
__A : Union[str, Any] = dict(zip(_A , range(len(_A ) ) ) )
__A : Optional[int] = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
__A : int = {'unk_token': '<unk>'}
__A : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__A : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(_A ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(_A ) )
__A : List[Any] = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
'image_std': [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
__A : Optional[int] = os.path.join(self.tmpdirname , _A )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(_A , _A )
def UpperCAmelCase_ ( self , **_A ):
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token='!' , **_A )
def UpperCAmelCase_ ( self , **_A ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token='!' , **_A )
def UpperCAmelCase_ ( self , **_A ):
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **_A )
def UpperCAmelCase_ ( self ):
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase_ ( self ):
__A : int = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__A : Optional[int] = [Image.fromarray(np.moveaxis(_A , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase_ ( self ):
__A : List[Any] = self.get_tokenizer()
__A : str = self.get_rust_tokenizer()
__A : List[str] = self.get_image_processor()
__A : Optional[int] = OwlViTProcessor(tokenizer=_A , image_processor=_A )
processor_slow.save_pretrained(self.tmpdirname )
__A : int = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=_A )
__A : Optional[Any] = OwlViTProcessor(tokenizer=_A , image_processor=_A )
processor_fast.save_pretrained(self.tmpdirname )
__A : Optional[Any] = OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _A )
self.assertIsInstance(processor_fast.tokenizer , _A )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _A )
self.assertIsInstance(processor_fast.image_processor , _A )
def UpperCAmelCase_ ( self ):
__A : List[str] = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__A : Optional[int] = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
__A : Optional[int] = self.get_image_processor(do_normalize=_A )
__A : Any = OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=_A )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _A )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _A )
def UpperCAmelCase_ ( self ):
__A : Optional[Any] = self.get_image_processor()
__A : Optional[Any] = self.get_tokenizer()
__A : Union[str, Any] = OwlViTProcessor(tokenizer=_A , image_processor=_A )
__A : Union[str, Any] = self.prepare_image_inputs()
__A : int = image_processor(_A , return_tensors='np' )
__A : str = processor(images=_A , return_tensors='np' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCAmelCase_ ( self ):
__A : str = self.get_image_processor()
__A : str = self.get_tokenizer()
__A : Tuple = OwlViTProcessor(tokenizer=_A , image_processor=_A )
__A : str = 'lower newer'
__A : str = processor(text=_A , return_tensors='np' )
__A : List[str] = tokenizer(_A , return_tensors='np' )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() )
def UpperCAmelCase_ ( self ):
__A : int = self.get_image_processor()
__A : Optional[int] = self.get_tokenizer()
__A : List[str] = OwlViTProcessor(tokenizer=_A , image_processor=_A )
__A : Any = 'lower newer'
__A : Optional[Any] = self.prepare_image_inputs()
__A : List[Any] = processor(text=_A , images=_A )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(_A ):
processor()
def UpperCAmelCase_ ( self ):
__A : Any = 'google/owlvit-base-patch32'
__A : int = OwlViTProcessor.from_pretrained(_A )
__A : Dict = ['cat', 'nasa badge']
__A : Optional[Any] = processor(text=_A )
__A : Optional[int] = 16
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask'] )
self.assertEqual(inputs['input_ids'].shape , (2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(_A ):
processor()
def UpperCAmelCase_ ( self ):
__A : Tuple = 'google/owlvit-base-patch32'
__A : Any = OwlViTProcessor.from_pretrained(_A )
__A : Dict = [['cat', 'nasa badge'], ['person']]
__A : Dict = processor(text=_A )
__A : Optional[int] = 16
__A : Any = len(_A )
__A : Union[str, Any] = max([len(_A ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask'] )
self.assertEqual(inputs['input_ids'].shape , (batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(_A ):
processor()
def UpperCAmelCase_ ( self ):
__A : List[Any] = 'google/owlvit-base-patch32'
__A : str = OwlViTProcessor.from_pretrained(_A )
__A : Union[str, Any] = ['cat', 'nasa badge']
__A : Tuple = processor(text=_A )
__A : str = 16
__A : int = inputs['input_ids']
__A : List[Any] = [
[49406, 2368, 49407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[49406, 6841, 11301, 49407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask'] )
self.assertEqual(inputs['input_ids'].shape , (2, seq_length) )
self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] )
def UpperCAmelCase_ ( self ):
__A : Optional[Any] = self.get_image_processor()
__A : List[str] = self.get_tokenizer()
__A : Optional[Any] = OwlViTProcessor(tokenizer=_A , image_processor=_A )
__A : Optional[int] = self.prepare_image_inputs()
__A : Optional[int] = self.prepare_image_inputs()
__A : Optional[int] = processor(images=_A , query_images=_A )
self.assertListEqual(list(inputs.keys() ) , ['query_pixel_values', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(_A ):
processor()
def UpperCAmelCase_ ( self ):
__A : Optional[Any] = self.get_image_processor()
__A : Union[str, Any] = self.get_tokenizer()
__A : str = OwlViTProcessor(tokenizer=_A , image_processor=_A )
__A : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__A : Any = processor.batch_decode(_A )
__A : Tuple = tokenizer.batch_decode(_A )
self.assertListEqual(_A , _A )
| 280 | 1 |
"""simple docstring"""
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _lowerCAmelCase( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
a : Optional[int] ='''hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'''
def _a ( self , _lowerCamelCase=0 ):
UpperCamelCase_: Any = floats_tensor((1, 3, 1_2_8, 1_2_8) , rng=random.Random(_lowerCamelCase ) )
UpperCamelCase_: List[Any] = np.random.RandomState(_lowerCamelCase )
UpperCamelCase_: Union[str, Any] = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'strength': 0.7_5,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def _a ( self ):
UpperCamelCase_: List[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
UpperCamelCase_: int = self.get_dummy_inputs()
UpperCamelCase_: Dict = pipe(**_lowerCamelCase ).images
UpperCamelCase_: Union[str, Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 1_2_8, 1_2_8, 3)
UpperCamelCase_: List[Any] = np.array([0.6_9_6_4_3, 0.5_8_4_8_4, 0.5_0_3_1_4, 0.5_8_7_6_0, 0.5_5_3_6_8, 0.5_9_6_4_3, 0.5_1_5_2_9, 0.4_1_2_1_7, 0.4_9_0_8_7] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def _a ( self ):
UpperCamelCase_: Dict = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
UpperCamelCase_: Dict = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
UpperCamelCase_: Union[str, Any] = self.get_dummy_inputs()
UpperCamelCase_: Dict = pipe(**_lowerCamelCase ).images
UpperCamelCase_: List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
UpperCamelCase_: Optional[Any] = np.array([0.6_1_7_3_7, 0.5_4_6_4_2, 0.5_3_1_8_3, 0.5_4_4_6_5, 0.5_2_7_4_2, 0.6_0_5_2_5, 0.4_9_9_6_9, 0.4_0_6_5_5, 0.4_8_1_5_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _a ( self ):
UpperCamelCase_: List[str] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
UpperCamelCase_: str = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
# warmup pass to apply optimizations
UpperCamelCase_: List[str] = pipe(**self.get_dummy_inputs() )
UpperCamelCase_: Optional[Any] = self.get_dummy_inputs()
UpperCamelCase_: int = pipe(**_lowerCamelCase ).images
UpperCamelCase_: str = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
UpperCamelCase_: Dict = np.array([0.5_2_7_6_1, 0.5_9_9_7_7, 0.4_9_0_3_3, 0.4_9_6_1_9, 0.5_4_2_8_2, 0.5_0_3_1_1, 0.4_7_6_0_0, 0.4_0_9_1_8, 0.4_5_2_0_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _a ( self ):
UpperCamelCase_: int = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
UpperCamelCase_: int = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
UpperCamelCase_: Dict = self.get_dummy_inputs()
UpperCamelCase_: Dict = pipe(**_lowerCamelCase ).images
UpperCamelCase_: str = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
UpperCamelCase_: Optional[int] = np.array([0.5_2_9_1_1, 0.6_0_0_0_4, 0.4_9_2_2_9, 0.4_9_8_0_5, 0.5_4_5_0_2, 0.5_0_6_8_0, 0.4_7_7_7_7, 0.4_1_0_2_8, 0.4_5_3_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _a ( self ):
UpperCamelCase_: str = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
UpperCamelCase_: List[Any] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
UpperCamelCase_: List[str] = self.get_dummy_inputs()
UpperCamelCase_: List[str] = pipe(**_lowerCamelCase ).images
UpperCamelCase_: int = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
UpperCamelCase_: List[str] = np.array([0.5_2_9_1_1, 0.6_0_0_0_4, 0.4_9_2_2_9, 0.4_9_8_0_5, 0.5_4_5_0_2, 0.5_0_6_8_0, 0.4_7_7_7_7, 0.4_1_0_2_8, 0.4_5_3_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _a ( self ):
UpperCamelCase_: str = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
UpperCamelCase_: Any = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
UpperCamelCase_: Dict = self.get_dummy_inputs()
UpperCamelCase_: int = pipe(**_lowerCamelCase ).images
UpperCamelCase_: int = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
UpperCamelCase_: Dict = np.array([0.6_5_3_3_1, 0.5_8_2_7_7, 0.4_8_2_0_4, 0.5_6_0_5_9, 0.5_3_6_6_5, 0.5_6_2_3_5, 0.5_0_9_6_9, 0.4_0_0_0_9, 0.4_6_5_5_2] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class _lowerCAmelCase( unittest.TestCase ):
"""simple docstring"""
@property
def _a ( self ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _a ( self ):
UpperCamelCase_: Union[str, Any] = ort.SessionOptions()
UpperCamelCase_: Optional[int] = False
return options
def _a ( self ):
UpperCamelCase_: List[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
UpperCamelCase_: str = init_image.resize((7_6_8, 5_1_2) )
# using the PNDM scheduler by default
UpperCamelCase_: Any = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='onnx' , safety_checker=_lowerCamelCase , feature_extractor=_lowerCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
UpperCamelCase_: List[str] = 'A fantasy landscape, trending on artstation'
UpperCamelCase_: Tuple = np.random.RandomState(0 )
UpperCamelCase_: List[str] = pipe(
prompt=_lowerCamelCase , image=_lowerCamelCase , strength=0.7_5 , guidance_scale=7.5 , num_inference_steps=1_0 , generator=_lowerCamelCase , output_type='np' , )
UpperCamelCase_: Dict = output.images
UpperCamelCase_: int = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert images.shape == (1, 5_1_2, 7_6_8, 3)
UpperCamelCase_: Dict = np.array([0.4_9_0_9, 0.5_0_5_9, 0.5_3_7_2, 0.4_6_2_3, 0.4_8_7_6, 0.5_0_4_9, 0.4_8_2_0, 0.4_9_5_6, 0.5_0_1_9] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def _a ( self ):
UpperCamelCase_: Optional[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
UpperCamelCase_: Dict = init_image.resize((7_6_8, 5_1_2) )
UpperCamelCase_: int = LMSDiscreteScheduler.from_pretrained(
'runwayml/stable-diffusion-v1-5' , subfolder='scheduler' , revision='onnx' )
UpperCamelCase_: Any = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , revision='onnx' , scheduler=_lowerCamelCase , safety_checker=_lowerCamelCase , feature_extractor=_lowerCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
UpperCamelCase_: List[str] = 'A fantasy landscape, trending on artstation'
UpperCamelCase_: int = np.random.RandomState(0 )
UpperCamelCase_: Dict = pipe(
prompt=_lowerCamelCase , image=_lowerCamelCase , strength=0.7_5 , guidance_scale=7.5 , num_inference_steps=2_0 , generator=_lowerCamelCase , output_type='np' , )
UpperCamelCase_: Optional[int] = output.images
UpperCamelCase_: Optional[int] = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert images.shape == (1, 5_1_2, 7_6_8, 3)
UpperCamelCase_: str = np.array([0.8_0_4_3, 0.9_2_6, 0.9_5_8_1, 0.8_1_1_9, 0.8_9_5_4, 0.9_1_3, 0.7_2_0_9, 0.7_4_6_3, 0.7_4_3_1] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2 | 364 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
A_ : Any = 'Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine'
def snake_case () -> Union[str, Any]:
UpperCamelCase_: Tuple = _ask_options(
'In which compute environment are you running?' , ['This machine', 'AWS (Amazon SageMaker)'] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
UpperCamelCase_: List[str] = get_sagemaker_input()
else:
UpperCamelCase_: List[str] = get_cluster_input()
return config
def snake_case (UpperCAmelCase__=None ) -> Union[str, Any]:
if subparsers is not None:
UpperCamelCase_: List[Any] = subparsers.add_parser('config' , description=UpperCAmelCase__ )
else:
UpperCamelCase_: List[Any] = argparse.ArgumentParser('Accelerate config command' , description=UpperCAmelCase__ )
parser.add_argument(
'--config_file' , default=UpperCAmelCase__ , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , )
if subparsers is not None:
parser.set_defaults(func=UpperCAmelCase__ )
return parser
def snake_case (UpperCAmelCase__ ) -> List[Any]:
UpperCamelCase_: Union[str, Any] = get_user_input()
if args.config_file is not None:
UpperCamelCase_: Tuple = args.config_file
else:
if not os.path.isdir(UpperCAmelCase__ ):
os.makedirs(UpperCAmelCase__ )
UpperCamelCase_: Dict = default_yaml_config_file
if config_file.endswith('.json' ):
config.to_json_file(UpperCAmelCase__ )
else:
config.to_yaml_file(UpperCAmelCase__ )
print(F'''accelerate configuration saved at {config_file}''' )
def snake_case () -> str:
UpperCamelCase_: Tuple = config_command_parser()
UpperCamelCase_: int = parser.parse_args()
config_command(UpperCAmelCase__ )
if __name__ == "__main__":
main() | 292 | 0 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCamelCase : Optional[int] = {"""configuration_mmbt""": ["""MMBTConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : List[Any] = ["""MMBTForClassification""", """MMBTModel""", """ModalEmbeddings"""]
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
__lowerCamelCase : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 52 |
def A_ ( _lowerCAmelCase ) -> str:
UpperCamelCase : Optional[int] = int(_lowerCAmelCase )
if decimal in (0, 1): # Exit cases for the recursion
return str(_lowerCAmelCase )
UpperCamelCase , UpperCamelCase : Dict = divmod(_lowerCAmelCase , 2 )
return binary_recursive(_lowerCAmelCase ) + str(_lowerCAmelCase )
def A_ ( _lowerCAmelCase ) -> str:
UpperCamelCase : Tuple = str(_lowerCAmelCase ).strip()
if not number:
raise ValueError("No input value was provided" )
UpperCamelCase : Optional[int] = "-" if number.startswith("-" ) else ""
UpperCamelCase : Any = number.lstrip("-" )
if not number.isnumeric():
raise ValueError("Input value is not an integer" )
return F"""{negative}0b{binary_recursive(int(_lowerCAmelCase ) )}"""
if __name__ == "__main__":
from doctest import testmod
testmod()
| 52 | 1 |
'''simple docstring'''
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
lowerCamelCase_ = 'sshleifer/bart-tiny-random'
lowerCamelCase_ = 'patrickvonplaten/t5-tiny-random'
@require_torch
class lowercase_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowerCAmelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
return AutoConfig.from_pretrained(UpperCamelCase__ )
def lowerCAmelCase_ ( self : Tuple ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE, *_SCREAMING_SNAKE_CASE = create_student_by_copying_alternating_layers(UpperCamelCase__ , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.num_hidden_layers , 1 )
def lowerCAmelCase_ ( self : Optional[int] ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE, *_SCREAMING_SNAKE_CASE = create_student_by_copying_alternating_layers(UpperCamelCase__ , tempfile.mkdtemp() , e=1 , d=UpperCamelCase__ )
def lowerCAmelCase_ ( self : List[Any] ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE, *_SCREAMING_SNAKE_CASE = create_student_by_copying_alternating_layers(UpperCamelCase__ , tempfile.mkdtemp() , e=1 , d=UpperCamelCase__ )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers )
def lowerCAmelCase_ ( self : List[Any] ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE, *_SCREAMING_SNAKE_CASE = create_student_by_copying_alternating_layers(UpperCamelCase__ , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , 1 )
def lowerCAmelCase_ ( self : Optional[Any] ):
"""simple docstring"""
with self.assertRaises(UpperCamelCase__ ):
create_student_by_copying_alternating_layers(UpperCamelCase__ , tempfile.mkdtemp() , e=UpperCamelCase__ , d=UpperCamelCase__ )
| 363 |
'''simple docstring'''
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
lowerCamelCase_ = '<<<<<<< This should probably be modified because it mentions: '
lowerCamelCase_ = '=======\n>>>>>>>\n'
lowerCamelCase_ = [
'TextEncoderConfig',
'ByteTextEncoder',
'SubwordTextEncoder',
'encoder_config',
'maybe_build_from_corpus',
'manual_dir',
]
lowerCamelCase_ = [
# (pattern, replacement)
# Order is important here for some replacements
(r'tfds\.core', r'datasets'),
(r'tf\.io\.gfile\.GFile', r'open'),
(r'tf\.([\w\d]+)', r'datasets.Value(\'\1\')'),
(r'tfds\.features\.Text\(\)', r'datasets.Value(\'string\')'),
(r'tfds\.features\.Text\(', r'datasets.Value(\'string\'),'),
(r'features\s*=\s*tfds.features.FeaturesDict\(', r'features=datasets.Features('),
(r'tfds\.features\.FeaturesDict\(', r'dict('),
(r'The TensorFlow Datasets Authors', r'The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'),
(r'tfds\.', r'datasets.'),
(r'dl_manager\.manual_dir', r'self.config.data_dir'),
(r'self\.builder_config', r'self.config'),
]
def SCREAMING_SNAKE_CASE_ ( __A : Namespace ) -> List[Any]:
return ConvertCommand(args.tfds_path , args.datasets_directory )
class lowercase_ ( A ):
"""simple docstring"""
@staticmethod
def lowerCAmelCase_ ( __lowerCamelCase : ArgumentParser ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = parser.add_parser(
"convert" , help="Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset." , )
train_parser.add_argument(
"--tfds_path" , type=__lowerCamelCase , required=__lowerCamelCase , help="Path to a TensorFlow Datasets folder to convert or a single tfds file to convert." , )
train_parser.add_argument(
"--datasets_directory" , type=__lowerCamelCase , required=__lowerCamelCase , help="Path to the HuggingFace Datasets folder." )
train_parser.set_defaults(func=__lowerCamelCase )
def __init__( self : Dict , __lowerCamelCase : str , __lowerCamelCase : str , *__lowerCamelCase : Tuple ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = get_logger("datasets-cli/converting" )
_SCREAMING_SNAKE_CASE = tfds_path
_SCREAMING_SNAKE_CASE = datasets_directory
def lowerCAmelCase_ ( self : List[Any] ):
"""simple docstring"""
if os.path.isdir(self._tfds_path ):
_SCREAMING_SNAKE_CASE = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
_SCREAMING_SNAKE_CASE = os.path.dirname(self._tfds_path )
else:
raise ValueError("--tfds_path is neither a directory nor a file. Please check path." )
_SCREAMING_SNAKE_CASE = os.path.abspath(self._datasets_directory )
self._logger.info(F"""Converting datasets from {abs_tfds_path} to {abs_datasets_path}""" )
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = {}
if os.path.isdir(self._tfds_path ):
_SCREAMING_SNAKE_CASE = os.listdir(__lowerCamelCase )
else:
_SCREAMING_SNAKE_CASE = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(F"""Looking at file {f_name}""" )
_SCREAMING_SNAKE_CASE = os.path.join(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE = os.path.join(__lowerCamelCase , __lowerCamelCase )
if not os.path.isfile(__lowerCamelCase ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info("Skipping file" )
continue
with open(__lowerCamelCase , encoding="utf-8" ) as f:
_SCREAMING_SNAKE_CASE = f.readlines()
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = []
for line in lines:
_SCREAMING_SNAKE_CASE = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
_SCREAMING_SNAKE_CASE = "import datasets\n"
elif "import tensorflow" in out_line:
# order is important here
_SCREAMING_SNAKE_CASE = ""
continue
elif "from absl import logging" in out_line:
_SCREAMING_SNAKE_CASE = "from datasets import logging\n"
elif "getLogger" in out_line:
_SCREAMING_SNAKE_CASE = out_line.replace("getLogger" , "get_logger" )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = list(filter(lambda __lowerCamelCase : e in out_line , __lowerCamelCase ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(__lowerCamelCase ) + "\n" )
out_lines.append(__lowerCamelCase )
out_lines.append(__lowerCamelCase )
continue
else:
for pattern, replacement in TO_CONVERT:
_SCREAMING_SNAKE_CASE = re.sub(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
_SCREAMING_SNAKE_CASE = re.match(r"from\stensorflow_datasets.*import\s([^\.\r\n]+)" , __lowerCamelCase )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split("," ) )
_SCREAMING_SNAKE_CASE = "from . import " + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(F"""Error converting {out_line.strip()}""" )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
_SCREAMING_SNAKE_CASE = True
out_lines.append(__lowerCamelCase )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
_SCREAMING_SNAKE_CASE = f_name.replace(".py" , "" )
_SCREAMING_SNAKE_CASE = os.path.join(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE = os.path.join(__lowerCamelCase , __lowerCamelCase )
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
self._logger.info(F"""Adding directory {output_dir}""" )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(__lowerCamelCase )
if needs_manual_update:
with_manual_update.append(__lowerCamelCase )
with open(__lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.writelines(__lowerCamelCase )
self._logger.info(F"""Converted in {output_file}""" )
for utils_file in utils_files:
try:
_SCREAMING_SNAKE_CASE = os.path.basename(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = imports_to_builder_map[f_name.replace(".py" , "" )]
self._logger.info(F"""Moving {dest_folder} to {utils_file}""" )
shutil.copy(__lowerCamelCase , __lowerCamelCase )
except KeyError:
self._logger.error(F"""Cannot find destination folder for {utils_file}. Please copy manually.""" )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
F"""You need to manually update file {file_path} to remove configurations using 'TextEncoderConfig'.""" )
| 111 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class __lowerCAmelCase ( A , unittest.TestCase ):
UpperCamelCase = ShapEImgaImgPipeline
UpperCamelCase = ['''image''']
UpperCamelCase = ['''image''']
UpperCamelCase = [
'''num_images_per_prompt''',
'''num_inference_steps''',
'''generator''',
'''latents''',
'''guidance_scale''',
'''frame_size''',
'''output_type''',
'''return_dict''',
]
UpperCamelCase = False
@property
def _lowerCamelCase ( self : Optional[Any]) -> Union[str, Any]:
"""simple docstring"""
return 32
@property
def _lowerCamelCase ( self : int) -> str:
"""simple docstring"""
return 32
@property
def _lowerCamelCase ( self : Union[str, Any]) -> str:
"""simple docstring"""
return self.time_input_dim * 4
@property
def _lowerCamelCase ( self : int) -> List[Any]:
"""simple docstring"""
return 8
@property
def _lowerCamelCase ( self : Dict) -> List[Any]:
"""simple docstring"""
torch.manual_seed(0)
_UpperCAmelCase = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
_UpperCAmelCase = CLIPVisionModel(A)
return model
@property
def _lowerCamelCase ( self : List[str]) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = CLIPImageProcessor(
crop_size=2_24 , do_center_crop=A , do_normalize=A , do_resize=A , image_mean=[0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , image_std=[0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , resample=3 , size=2_24 , )
return image_processor
@property
def _lowerCamelCase ( self : List[Any]) -> Optional[int]:
"""simple docstring"""
torch.manual_seed(0)
_UpperCAmelCase = {
'num_attention_heads': 2,
'attention_head_dim': 16,
'embedding_dim': self.time_input_dim,
'num_embeddings': 32,
'embedding_proj_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'num_layers': 1,
'clip_embed_dim': self.time_input_dim * 2,
'additional_embeddings': 0,
'time_embed_act_fn': 'gelu',
'norm_in_type': 'layer',
'embedding_proj_norm_type': 'layer',
'encoder_hid_proj_type': None,
'added_emb_type': None,
}
_UpperCAmelCase = PriorTransformer(**A)
return model
@property
def _lowerCamelCase ( self : Tuple) -> Tuple:
"""simple docstring"""
torch.manual_seed(0)
_UpperCAmelCase = {
'param_shapes': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'd_latent': self.time_input_dim,
'd_hidden': self.renderer_dim,
'n_output': 12,
'background': (
0.1,
0.1,
0.1,
),
}
_UpperCAmelCase = ShapERenderer(**A)
return model
def _lowerCamelCase ( self : Optional[Any]) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = self.dummy_prior
_UpperCAmelCase = self.dummy_image_encoder
_UpperCAmelCase = self.dummy_image_processor
_UpperCAmelCase = self.dummy_renderer
_UpperCAmelCase = HeunDiscreteScheduler(
beta_schedule='exp' , num_train_timesteps=10_24 , prediction_type='sample' , use_karras_sigmas=A , clip_sample=A , clip_sample_range=1.0 , )
_UpperCAmelCase = {
'prior': prior,
'image_encoder': image_encoder,
'image_processor': image_processor,
'renderer': renderer,
'scheduler': scheduler,
}
return components
def _lowerCamelCase ( self : int , A : Optional[Any] , A : List[str]=0) -> int:
"""simple docstring"""
_UpperCAmelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(A)).to(A)
if str(A).startswith('mps'):
_UpperCAmelCase = torch.manual_seed(A)
else:
_UpperCAmelCase = torch.Generator(device=A).manual_seed(A)
_UpperCAmelCase = {
'image': input_image,
'generator': generator,
'num_inference_steps': 1,
'frame_size': 32,
'output_type': 'np',
}
return inputs
def _lowerCamelCase ( self : Tuple) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = 'cpu'
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = self.pipeline_class(**A)
_UpperCAmelCase = pipe.to(A)
pipe.set_progress_bar_config(disable=A)
_UpperCAmelCase = pipe(**self.get_dummy_inputs(A))
_UpperCAmelCase = output.images[0]
_UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
_UpperCAmelCase = np.array(
[
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def _lowerCamelCase ( self : Optional[int]) -> str:
"""simple docstring"""
self._test_inference_batch_consistent(batch_sizes=[1, 2])
def _lowerCamelCase ( self : List[str]) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = torch_device == 'cpu'
_UpperCAmelCase = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=A , relax_max_difference=A , )
def _lowerCamelCase ( self : Tuple) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = self.pipeline_class(**A)
_UpperCAmelCase = pipe.to(A)
pipe.set_progress_bar_config(disable=A)
_UpperCAmelCase = 1
_UpperCAmelCase = 2
_UpperCAmelCase = self.get_dummy_inputs(A)
for key in inputs.keys():
if key in self.batch_params:
_UpperCAmelCase = batch_size * [inputs[key]]
_UpperCAmelCase = pipe(**A , num_images_per_prompt=A)[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
def _lowerCamelCase ( self : Union[str, Any]) -> Optional[int]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self : Dict) -> int:
"""simple docstring"""
_UpperCAmelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/shap_e/corgi.png')
_UpperCAmelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_img2img_out.npy')
_UpperCAmelCase = ShapEImgaImgPipeline.from_pretrained('openai/shap-e-img2img')
_UpperCAmelCase = pipe.to(A)
pipe.set_progress_bar_config(disable=A)
_UpperCAmelCase = torch.Generator(device=A).manual_seed(0)
_UpperCAmelCase = pipe(
A , generator=A , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type='np' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(A , A)
| 339 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"s-JoL/Open-Llama-V1": "https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json",
}
class __lowerCAmelCase ( A ):
UpperCamelCase = '''open-llama'''
def __init__( self : str , A : List[Any]=10_00_00 , A : Tuple=40_96 , A : Tuple=1_10_08 , A : List[str]=32 , A : Tuple=32 , A : Optional[Any]="silu" , A : int=20_48 , A : Optional[Any]=0.0_2 , A : Dict=1E-6 , A : Optional[Any]=True , A : List[Any]=0 , A : Dict=1 , A : int=2 , A : Dict=False , A : Optional[int]=True , A : List[Any]=0.1 , A : str=0.1 , A : Dict=True , A : Optional[Any]=True , A : Dict=None , **A : Union[str, Any] , ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = vocab_size
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = hidden_size
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = hidden_act
_UpperCAmelCase = initializer_range
_UpperCAmelCase = rms_norm_eps
_UpperCAmelCase = use_cache
_UpperCAmelCase = kwargs.pop(
'use_memorry_efficient_attention' , A)
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_dropout_prob
_UpperCAmelCase = use_stable_embedding
_UpperCAmelCase = shared_input_output_embedding
_UpperCAmelCase = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=A , bos_token_id=A , eos_token_id=A , tie_word_embeddings=A , **A , )
def _lowerCamelCase ( self : List[str]) -> Union[str, Any]:
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , A) or len(self.rope_scaling) != 2:
raise ValueError(
'`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '
F"got {self.rope_scaling}")
_UpperCAmelCase = self.rope_scaling.get('type' , A)
_UpperCAmelCase = self.rope_scaling.get('factor' , A)
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}")
if rope_scaling_factor is None or not isinstance(A , A) or rope_scaling_factor <= 1.0:
raise ValueError(F"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}")
| 339 | 1 |
'''simple docstring'''
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
lowerCamelCase :Tuple = '''http://www.mocksite.com/file1.txt'''
lowerCamelCase :str = '''"text": ["foo", "foo"]'''
lowerCamelCase :Optional[int] = '''6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8'''
class _lowerCAmelCase :
__SCREAMING_SNAKE_CASE : List[Any] = 200
__SCREAMING_SNAKE_CASE : Any = {'Content-Length': '100'}
__SCREAMING_SNAKE_CASE : Dict = {}
def _a (self , **lowercase ):
return [bytes(lowercase , """utf-8""" )]
def a ( *lowerCamelCase__ , **lowerCamelCase__ ):
'''simple docstring'''
return MockResponse()
@pytest.mark.parametrize("""urls_type""" , [str, list, dict] )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
import requests
monkeypatch.setattr(lowerCamelCase__ , """request""" , lowerCamelCase__ )
A_ : Tuple = URL
if issubclass(lowerCamelCase__ , lowerCamelCase__ ):
A_ : Any = url
elif issubclass(lowerCamelCase__ , lowerCamelCase__ ):
A_ : int = [url]
elif issubclass(lowerCamelCase__ , lowerCamelCase__ ):
A_ : Optional[int] = {"""train""": url}
A_ : Union[str, Any] = """dummy"""
A_ : str = """downloads"""
A_ : Optional[int] = tmp_path
A_ : str = DownloadConfig(
cache_dir=os.path.join(lowerCamelCase__ , lowerCamelCase__ ) , use_etag=lowerCamelCase__ , )
A_ : int = DownloadManager(dataset_name=lowerCamelCase__ , download_config=lowerCamelCase__ )
A_ : Tuple = dl_manager.download(lowerCamelCase__ )
A_ : Any = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
A_ : Optional[int] = [downloaded_paths]
A_ : Union[str, Any] = [urls]
elif isinstance(lowerCamelCase__ , lowerCamelCase__ ):
assert "train" in downloaded_paths.keys()
A_ : Any = downloaded_paths.values()
A_ : int = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(lowerCamelCase__ , lowerCamelCase__ ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
A_ : int = Path(lowerCamelCase__ )
A_ : List[str] = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
A_ : List[str] = downloaded_path.read_text()
assert content == CONTENT
A_ : Union[str, Any] = downloaded_path.with_suffix(""".json""" )
assert metadata_downloaded_path.exists()
A_ : List[Any] = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize("""paths_type""" , [str, list, dict] )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Union[str, Any] = str(lowerCamelCase__ )
if issubclass(lowerCamelCase__ , lowerCamelCase__ ):
A_ : str = filename
elif issubclass(lowerCamelCase__ , lowerCamelCase__ ):
A_ : Optional[int] = [filename]
elif issubclass(lowerCamelCase__ , lowerCamelCase__ ):
A_ : str = {"""train""": filename}
A_ : int = """dummy"""
A_ : Tuple = xz_file.parent
A_ : Optional[Any] = """extracted"""
A_ : Union[str, Any] = DownloadConfig(
cache_dir=lowerCamelCase__ , use_etag=lowerCamelCase__ , )
A_ : Dict = DownloadManager(dataset_name=lowerCamelCase__ , download_config=lowerCamelCase__ )
A_ : List[Any] = dl_manager.extract(lowerCamelCase__ )
A_ : Optional[Any] = paths
for extracted_paths in [extracted_paths]:
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
A_ : Tuple = [extracted_paths]
A_ : int = [paths]
elif isinstance(lowerCamelCase__ , lowerCamelCase__ ):
assert "train" in extracted_paths.keys()
A_ : int = extracted_paths.values()
A_ : int = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(lowerCamelCase__ , lowerCamelCase__ ):
assert extracted_path == dl_manager.extracted_paths[input_path]
A_ : Dict = Path(lowerCamelCase__ )
A_ : Tuple = extracted_path.parts
assert parts[-1] == hash_url_to_filename(lowerCamelCase__ , etag=lowerCamelCase__ )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
A_ : int = extracted_path.read_text()
A_ : Optional[int] = text_file.read_text()
assert extracted_file_content == expected_file_content
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
assert path.endswith(""".jsonl""" )
for num_items, line in enumerate(lowerCamelCase__ , start=1 ):
A_ : Optional[Any] = json.loads(line.decode("""utf-8""" ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize("""archive_jsonl""" , ["""tar_jsonl_path""", """zip_jsonl_path"""] )
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Tuple = request.getfixturevalue(lowerCamelCase__ )
A_ : List[str] = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(lowerCamelCase__ ) , start=1 ):
_test_jsonl(lowerCamelCase__ , lowerCamelCase__ )
assert num_jsonl == 2
@pytest.mark.parametrize("""archive_nested_jsonl""" , ["""tar_nested_jsonl_path""", """zip_nested_jsonl_path"""] )
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Tuple = request.getfixturevalue(lowerCamelCase__ )
A_ : Optional[int] = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(lowerCamelCase__ ) , start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(lowerCamelCase__ ) , start=1 ):
_test_jsonl(lowerCamelCase__ , lowerCamelCase__ )
assert num_tar == 1
assert num_jsonl == 2
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Dict = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(lowerCamelCase__ ) , start=1 ):
assert os.path.basename(lowerCamelCase__ ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2 | 361 |
'''simple docstring'''
def a ( lowerCamelCase__ ):
'''simple docstring'''
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod() | 135 | 0 |
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
UpperCAmelCase = logging.getLogger(__name__)
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False , ):
lowercase = bnb_quantization_config.load_in_abit
lowercase = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
'You have a version of `bitsandbytes` that is not compatible with 8bit quantization,'
' make sure you have the latest version of `bitsandbytes` installed.' )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
'You have a version of `bitsandbytes` that is not compatible with 4bit quantization,'
'make sure you have the latest version of `bitsandbytes` installed.' )
lowercase = []
# custom device map
if isinstance(_snake_case , _snake_case ) and len(device_map.keys() ) > 1:
lowercase = [key for key, value in device_map.items() if value in ['''disk''', '''cpu''']]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
lowercase = get_keys_to_not_convert(_snake_case )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(_snake_case )
lowercase = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
lowercase = []
lowercase = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(_snake_case )
# compatibility with peft
lowercase = load_in_abit
lowercase = load_in_abit
lowercase = get_parameter_device(_snake_case )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
'It is not recommended to quantize a loaded model. '
'The model should be instantiated under the `init_empty_weights` context manager.' )
lowercase = replace_with_bnb_layers(_snake_case , _snake_case , modules_to_not_convert=_snake_case )
# convert param to the right dtype
lowercase = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
lowercase = name.replace('.weight' , '' ).replace('.bias' , '' )
lowercase = getattr(_snake_case , _snake_case , _snake_case )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(_snake_case ):
param.to(_snake_case )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError('No GPU found. A GPU is needed for quantization.' )
logger.info(
F'''The model device type is {model_device.type}. However, cuda is needed for quantization.'''
'We move the model to cuda.' )
return model
elif weights_location is None:
raise RuntimeError(
F'''`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ''' )
else:
with init_empty_weights():
lowercase = replace_with_bnb_layers(
_snake_case , _snake_case , modules_to_not_convert=_snake_case )
lowercase = get_quantized_model_device_map(
_snake_case , _snake_case , _snake_case , max_memory=_snake_case , no_split_module_classes=_snake_case , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
lowercase = True
lowercase = any(x in list(device_map.values() ) for x in ['cpu', 'disk'] )
load_checkpoint_in_model(
_snake_case , _snake_case , _snake_case , dtype=bnb_quantization_config.torch_dtype , offload_folder=_snake_case , offload_state_dict=_snake_case , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(_snake_case , device_map=_snake_case , offload_dir=_snake_case )
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None ):
if device_map is None:
if torch.cuda.is_available():
lowercase = {'''''': torch.cuda.current_device()}
else:
raise RuntimeError('No GPU found. A GPU is needed for quantization.' )
logger.info('The device_map was not initialized.' 'Setting device_map to `{\'\':torch.cuda.current_device()}`.' )
if isinstance(_snake_case , _snake_case ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
'If passing a string for `device_map`, please choose \'auto\', \'balanced\', \'balanced_low_0\' or '
'\'sequential\'.' )
lowercase = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
lowercase = {}
lowercase = special_dtypes
lowercase = no_split_module_classes
lowercase = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
lowercase = get_balanced_memory(
_snake_case , low_zero=(device_map == 'balanced_low_0') , max_memory=_snake_case , **_snake_case , )
lowercase = max_memory
lowercase = infer_auto_device_map(_snake_case , **_snake_case )
if isinstance(_snake_case , _snake_case ):
# check if don't have any quantized module on the cpu
lowercase = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
lowercase = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
'\n Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit\n the quantized model. If you want to dispatch the model on the CPU or the disk while keeping\n these modules in `torch_dtype`, you need to pass a custom `device_map` to\n `load_and_quantize_model`. Check\n https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk\n for more details.\n ' )
else:
logger.info(
'Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit' )
del device_map_without_some_modules
return device_map
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None ):
if modules_to_not_convert is None:
lowercase = []
lowercase = _replace_with_bnb_layers(
_snake_case , _snake_case , _snake_case , _snake_case )
if not has_been_replaced:
logger.warning(
'You are loading your model in 8bit or 4bit but no linear modules were found in your model.'
' this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.'
' Please double check your model architecture, or submit an issue on github if you think this is'
' a bug.' )
return model
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , ):
lowercase = False
for name, module in model.named_children():
if current_key_name is None:
lowercase = []
current_key_name.append(_snake_case )
if isinstance(_snake_case , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
lowercase = '''.'''.join(_snake_case )
lowercase = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
lowercase = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
lowercase = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=_snake_case , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
lowercase = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError('load_in_8bit and load_in_4bit can\'t be both False' )
lowercase = module.weight.data
if module.bias is not None:
lowercase = module.bias.data
bnb_module.requires_grad_(_snake_case )
setattr(_snake_case , _snake_case , _snake_case )
lowercase = True
if len(list(module.children() ) ) > 0:
lowercase = _replace_with_bnb_layers(
_snake_case , _snake_case , _snake_case , _snake_case )
lowercase = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
# Create a copy of the model
with init_empty_weights():
lowercase = deepcopy(_snake_case ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
lowercase = find_tied_parameters(_snake_case )
# For compatibility with Accelerate < 0.18
if isinstance(_snake_case , _snake_case ):
lowercase = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
lowercase = sum(_snake_case , [] )
lowercase = len(_snake_case ) > 0
# Check if it is a base model
lowercase = False
if hasattr(_snake_case , 'base_model_prefix' ):
lowercase = not hasattr(_snake_case , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
lowercase = list(model.named_children() )
lowercase = [list_modules[-1][0]]
# add last module together with tied weights
lowercase = set(_snake_case ) - set(_snake_case )
lowercase = list(set(_snake_case ) ) + list(_snake_case )
# remove ".weight" from the keys
lowercase = ['''.weight''', '''.bias''']
lowercase = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
lowercase = name.replace(_snake_case , '' )
filtered_module_names.append(_snake_case )
return filtered_module_names
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
for m in model.modules():
if isinstance(_snake_case , bnb.nn.Linearabit ):
return True
return False
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
return next(parameter.parameters() ).device
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
# if it is not quantized, we quantize and offload the quantized weights and the SCB stats
if fpaa_statistics is None:
set_module_tensor_to_device(_snake_case , _snake_case , 0 , dtype=_snake_case , value=_snake_case )
lowercase = param_name
lowercase = model
if "." in tensor_name:
lowercase = tensor_name.split('.' )
for split in splits[:-1]:
lowercase = getattr(_snake_case , _snake_case )
if new_module is None:
raise ValueError(F'''{module} has no attribute {split}.''' )
lowercase = new_module
lowercase = splits[-1]
# offload weights
lowercase = False
offload_weight(module._parameters[tensor_name] , _snake_case , _snake_case , index=_snake_case )
if hasattr(module._parameters[tensor_name] , 'SCB' ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace('weight' , 'SCB' ) , _snake_case , index=_snake_case , )
else:
offload_weight(_snake_case , _snake_case , _snake_case , index=_snake_case )
offload_weight(_snake_case , param_name.replace('weight' , 'SCB' ) , _snake_case , index=_snake_case )
set_module_tensor_to_device(_snake_case , _snake_case , 'meta' , dtype=_snake_case , value=torch.empty(*param.size() ) )
| 195 |
"""simple docstring"""
import numpy as np
def _snake_case ( _snake_case : np.array ):
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 60 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json',
'YituTech/conv-bert-medium-small': (
'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json'
),
'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json',
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : Dict = 'convbert'
def __init__(self : Dict , __UpperCAmelCase : int=3_0_5_2_2 , __UpperCAmelCase : str=7_6_8 , __UpperCAmelCase : Optional[Any]=1_2 , __UpperCAmelCase : int=1_2 , __UpperCAmelCase : Tuple=3_0_7_2 , __UpperCAmelCase : List[Any]="gelu" , __UpperCAmelCase : Dict=0.1 , __UpperCAmelCase : str=0.1 , __UpperCAmelCase : Dict=5_1_2 , __UpperCAmelCase : Union[str, Any]=2 , __UpperCAmelCase : Optional[int]=0.02 , __UpperCAmelCase : List[str]=1E-12 , __UpperCAmelCase : Union[str, Any]=1 , __UpperCAmelCase : str=0 , __UpperCAmelCase : Optional[int]=2 , __UpperCAmelCase : List[Any]=7_6_8 , __UpperCAmelCase : Optional[Any]=2 , __UpperCAmelCase : List[Any]=9 , __UpperCAmelCase : List[Any]=1 , __UpperCAmelCase : Any=None , **__UpperCAmelCase : str , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(
pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase , )
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = intermediate_size
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = hidden_dropout_prob
UpperCAmelCase__ = attention_probs_dropout_prob
UpperCAmelCase__ = max_position_embeddings
UpperCAmelCase__ = type_vocab_size
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = layer_norm_eps
UpperCAmelCase__ = embedding_size
UpperCAmelCase__ = head_ratio
UpperCAmelCase__ = conv_kernel_size
UpperCAmelCase__ = num_groups
UpperCAmelCase__ = classifier_dropout
class A ( UpperCAmelCase_ ):
@property
def lowercase_ (self : Any ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
UpperCAmelCase__ = {0: "batch", 1: "choice", 2: "sequence"}
else:
UpperCAmelCase__ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 364 | from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class A ( yaml.SafeLoader ):
def lowercase_ (self : Tuple , __UpperCAmelCase : str ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = [self.constructed_objects[key_node] for key_node, _ in node.value]
UpperCAmelCase__ = [tuple(__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else key for key in keys]
UpperCAmelCase__ = Counter(__UpperCAmelCase )
UpperCAmelCase__ = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(f"""Got duplicate yaml keys: {duplicate_keys}""" )
def lowercase_ (self : Dict , __UpperCAmelCase : Dict , __UpperCAmelCase : Any=False ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = super().construct_mapping(__UpperCAmelCase , deep=__UpperCAmelCase )
self._check_no_duplicates_on_constructed_node(__UpperCAmelCase )
return mapping
def lowerCAmelCase_ ( __A ) -> Tuple[Optional[str], str]:
'''simple docstring'''
UpperCAmelCase__ = list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
UpperCAmelCase__ = full_content[1:].index("---" ) + 1
UpperCAmelCase__ = "\n".join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(__A )
class A ( UpperCAmelCase_ ):
# class attributes
__UpperCAmelCase : Optional[Any] = {'train_eval_index'} # train-eval-index in the YAML metadata
@classmethod
def lowercase_ (cls : List[str] , __UpperCAmelCase : Path ) -> "DatasetMetadata":
"""simple docstring"""
with open(__UpperCAmelCase , encoding="utf-8" ) as readme_file:
UpperCAmelCase__ , UpperCAmelCase__ = _split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(__UpperCAmelCase )
else:
return cls()
def lowercase_ (self : int , __UpperCAmelCase : Path ) -> str:
"""simple docstring"""
if path.exists():
with open(__UpperCAmelCase , encoding="utf-8" ) as readme_file:
UpperCAmelCase__ = readme_file.read()
else:
UpperCAmelCase__ = None
UpperCAmelCase__ = self._to_readme(__UpperCAmelCase )
with open(__UpperCAmelCase , "w" , encoding="utf-8" ) as readme_file:
readme_file.write(__UpperCAmelCase )
def lowercase_ (self : str , __UpperCAmelCase : Optional[str] = None ) -> str:
"""simple docstring"""
if readme_content is not None:
UpperCAmelCase__ , UpperCAmelCase__ = _split_yaml_from_readme(__UpperCAmelCase )
UpperCAmelCase__ = "---\n" + self.to_yaml_string() + "---\n" + content
else:
UpperCAmelCase__ = "---\n" + self.to_yaml_string() + "---\n"
return full_content
@classmethod
def lowercase_ (cls : Optional[int] , __UpperCAmelCase : str ) -> "DatasetMetadata":
"""simple docstring"""
UpperCAmelCase__ = yaml.load(__UpperCAmelCase , Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
UpperCAmelCase__ = {
(key.replace("-" , "_" ) if key.replace("-" , "_" ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**__UpperCAmelCase )
def lowercase_ (self : List[str] ) -> str:
"""simple docstring"""
return yaml.safe_dump(
{
(key.replace("_" , "-" ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=__UpperCAmelCase , allow_unicode=__UpperCAmelCase , encoding="utf-8" , ).decode("utf-8" )
UpperCamelCase__ = {
'image-classification': [],
'translation': [],
'image-segmentation': [],
'fill-mask': [],
'automatic-speech-recognition': [],
'token-classification': [],
'sentence-similarity': [],
'audio-classification': [],
'question-answering': [],
'summarization': [],
'zero-shot-classification': [],
'table-to-text': [],
'feature-extraction': [],
'other': [],
'multiple-choice': [],
'text-classification': [],
'text-to-image': [],
'text2text-generation': [],
'zero-shot-image-classification': [],
'tabular-classification': [],
'tabular-regression': [],
'image-to-image': [],
'tabular-to-text': [],
'unconditional-image-generation': [],
'text-retrieval': [],
'text-to-speech': [],
'object-detection': [],
'audio-to-audio': [],
'text-generation': [],
'conversational': [],
'table-question-answering': [],
'visual-question-answering': [],
'image-to-text': [],
'reinforcement-learning': [],
'voice-activity-detection': [],
'time-series-forecasting': [],
'document-question-answering': [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
UpperCamelCase__ = ArgumentParser(usage='Validate the yaml metadata block of a README.md file.')
ap.add_argument('readme_filepath')
UpperCamelCase__ = ap.parse_args()
UpperCamelCase__ = Path(args.readme_filepath)
UpperCamelCase__ = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 143 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.