code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
'''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json''',
'''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json''',
'''junnyu/roformer_chinese_char_small''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json'''
),
'''junnyu/roformer_chinese_char_base''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json'''
),
'''junnyu/roformer_small_discriminator''': (
'''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json'''
),
'''junnyu/roformer_small_generator''': (
'''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json'''
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class UpperCAmelCase_ ( a__ ):
'''simple docstring'''
__A : Optional[int] = """roformer"""
def __init__( self , __A=5_0000 , __A=None , __A=768 , __A=12 , __A=12 , __A=3072 , __A="gelu" , __A=0.1 , __A=0.1 , __A=1536 , __A=2 , __A=0.02 , __A=1e-12 , __A=0 , __A=False , __A=True , **__A , ):
"""simple docstring"""
super().__init__(pad_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
lowerCamelCase : Any = vocab_size
lowerCamelCase : Any = hidden_size if embedding_size is None else embedding_size
lowerCamelCase : Optional[Any] = hidden_size
lowerCamelCase : List[str] = num_hidden_layers
lowerCamelCase : List[str] = num_attention_heads
lowerCamelCase : Optional[int] = hidden_act
lowerCamelCase : Any = intermediate_size
lowerCamelCase : str = hidden_dropout_prob
lowerCamelCase : Tuple = attention_probs_dropout_prob
lowerCamelCase : str = max_position_embeddings
lowerCamelCase : List[Any] = type_vocab_size
lowerCamelCase : Optional[Any] = initializer_range
lowerCamelCase : Dict = layer_norm_eps
lowerCamelCase : str = rotary_value
lowerCamelCase : str = use_cache
class UpperCAmelCase_ ( a__ ):
'''simple docstring'''
@property
def _snake_case ( self ):
"""simple docstring"""
if self.task == "multiple-choice":
lowerCamelCase : Tuple = {0: "batch", 1: "choice", 2: "sequence"}
else:
lowerCamelCase : Any = {0: "batch", 1: "sequence"}
lowerCamelCase : Union[str, Any] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 283 |
"""simple docstring"""
from decimal import Decimal, getcontext
from math import ceil, factorial
def _SCREAMING_SNAKE_CASE ( _lowercase : int ) ->str:
'''simple docstring'''
if not isinstance(_lowercase , _lowercase ):
raise TypeError("Undefined for non-integers" )
elif precision < 1:
raise ValueError("Undefined for non-natural numbers" )
a : Tuple = precision
a : str = ceil(precision / 14 )
a : List[Any] = 42_6880 * Decimal(1_0005 ).sqrt()
a : Union[str, Any] = 1
a : Dict = 1359_1409
a : Optional[int] = Decimal(_lowercase )
for k in range(1 , _lowercase ):
a : int = factorial(6 * k ) // (factorial(3 * k ) * factorial(_lowercase ) ** 3)
linear_term += 5_4514_0134
exponential_term *= -26_2537_4126_4076_8000
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
a : Optional[Any] = 50
print(F'''The first {n} digits of pi is: {pi(n)}''')
| 105 | 0 |
'''simple docstring'''
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
_lowerCAmelCase = logging.get_logger(__name__)
# General docstring
_lowerCAmelCase = '''ResNetConfig'''
# Base docstring
_lowerCAmelCase = '''microsoft/resnet-50'''
_lowerCAmelCase = [1, 2048, 7, 7]
# Image classification docstring
_lowerCAmelCase = '''microsoft/resnet-50'''
_lowerCAmelCase = '''tiger cat'''
_lowerCAmelCase = [
'''microsoft/resnet-50''',
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class lowerCAmelCase_( nn.Module ):
'''simple docstring'''
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase = 3 ,__UpperCAmelCase = 1 ,__UpperCAmelCase = "relu" ) -> Optional[int]:
super().__init__()
lowerCAmelCase__ : int = nn.Convad(
__lowerCamelCase ,__lowerCamelCase ,kernel_size=__lowerCamelCase ,stride=__lowerCamelCase ,padding=kernel_size // 2 ,bias=__lowerCamelCase )
lowerCAmelCase__ : int = nn.BatchNormad(__lowerCamelCase )
lowerCAmelCase__ : Optional[Any] = ACTaFN[activation] if activation is not None else nn.Identity()
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Optional[int]:
lowerCAmelCase__ : Tuple = self.convolution(__lowerCamelCase )
lowerCAmelCase__ : Dict = self.normalization(__lowerCamelCase )
lowerCAmelCase__ : str = self.activation(__lowerCamelCase )
return hidden_state
class lowerCAmelCase_( nn.Module ):
'''simple docstring'''
def __init__( self ,__UpperCAmelCase ) -> List[str]:
super().__init__()
lowerCAmelCase__ : Optional[Any] = ResNetConvLayer(
config.num_channels ,config.embedding_size ,kernel_size=7 ,stride=2 ,activation=config.hidden_act )
lowerCAmelCase__ : Optional[int] = nn.MaxPoolad(kernel_size=3 ,stride=2 ,padding=1 )
lowerCAmelCase__ : str = config.num_channels
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> List[str]:
lowerCAmelCase__ : Dict = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
"""Make sure that the channel dimension of the pixel values match with the one set in the configuration.""" )
lowerCAmelCase__ : int = self.embedder(__lowerCamelCase )
lowerCAmelCase__ : Tuple = self.pooler(__lowerCamelCase )
return embedding
class lowerCAmelCase_( nn.Module ):
'''simple docstring'''
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase = 2 ) -> Union[str, Any]:
super().__init__()
lowerCAmelCase__ : Optional[Any] = nn.Convad(__lowerCamelCase ,__lowerCamelCase ,kernel_size=1 ,stride=__lowerCamelCase ,bias=__lowerCamelCase )
lowerCAmelCase__ : Dict = nn.BatchNormad(__lowerCamelCase )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> str:
lowerCAmelCase__ : str = self.convolution(__lowerCamelCase )
lowerCAmelCase__ : Dict = self.normalization(__lowerCamelCase )
return hidden_state
class lowerCAmelCase_( nn.Module ):
'''simple docstring'''
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase = 1 ,__UpperCAmelCase = "relu" ) -> Tuple:
super().__init__()
lowerCAmelCase__ : List[str] = in_channels != out_channels or stride != 1
lowerCAmelCase__ : Union[str, Any] = (
ResNetShortCut(__lowerCamelCase ,__lowerCamelCase ,stride=__lowerCamelCase ) if should_apply_shortcut else nn.Identity()
)
lowerCAmelCase__ : Optional[int] = nn.Sequential(
ResNetConvLayer(__lowerCamelCase ,__lowerCamelCase ,stride=__lowerCamelCase ) ,ResNetConvLayer(__lowerCamelCase ,__lowerCamelCase ,activation=__lowerCamelCase ) ,)
lowerCAmelCase__ : Optional[int] = ACTaFN[activation]
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> int:
lowerCAmelCase__ : Tuple = hidden_state
lowerCAmelCase__ : List[Any] = self.layer(__lowerCamelCase )
lowerCAmelCase__ : Optional[int] = self.shortcut(__lowerCamelCase )
hidden_state += residual
lowerCAmelCase__ : int = self.activation(__lowerCamelCase )
return hidden_state
class lowerCAmelCase_( nn.Module ):
'''simple docstring'''
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase = 1 ,__UpperCAmelCase = "relu" ,__UpperCAmelCase = 4 ) -> List[str]:
super().__init__()
lowerCAmelCase__ : str = in_channels != out_channels or stride != 1
lowerCAmelCase__ : List[Any] = out_channels // reduction
lowerCAmelCase__ : List[Any] = (
ResNetShortCut(__lowerCamelCase ,__lowerCamelCase ,stride=__lowerCamelCase ) if should_apply_shortcut else nn.Identity()
)
lowerCAmelCase__ : Optional[Any] = nn.Sequential(
ResNetConvLayer(__lowerCamelCase ,__lowerCamelCase ,kernel_size=1 ) ,ResNetConvLayer(__lowerCamelCase ,__lowerCamelCase ,stride=__lowerCamelCase ) ,ResNetConvLayer(__lowerCamelCase ,__lowerCamelCase ,kernel_size=1 ,activation=__lowerCamelCase ) ,)
lowerCAmelCase__ : str = ACTaFN[activation]
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> str:
lowerCAmelCase__ : Union[str, Any] = hidden_state
lowerCAmelCase__ : str = self.layer(__lowerCamelCase )
lowerCAmelCase__ : Tuple = self.shortcut(__lowerCamelCase )
hidden_state += residual
lowerCAmelCase__ : Union[str, Any] = self.activation(__lowerCamelCase )
return hidden_state
class lowerCAmelCase_( nn.Module ):
'''simple docstring'''
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase = 2 ,__UpperCAmelCase = 2 ,) -> int:
super().__init__()
lowerCAmelCase__ : Dict = ResNetBottleNeckLayer if config.layer_type == """bottleneck""" else ResNetBasicLayer
lowerCAmelCase__ : str = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(__lowerCamelCase ,__lowerCamelCase ,stride=__lowerCamelCase ,activation=config.hidden_act ) ,*[layer(__lowerCamelCase ,__lowerCamelCase ,activation=config.hidden_act ) for _ in range(depth - 1 )] ,)
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> List[str]:
lowerCAmelCase__ : Dict = input
for layer in self.layers:
lowerCAmelCase__ : Optional[int] = layer(__lowerCamelCase )
return hidden_state
class lowerCAmelCase_( nn.Module ):
'''simple docstring'''
def __init__( self ,__UpperCAmelCase ) -> Any:
super().__init__()
lowerCAmelCase__ : List[str] = nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
__lowerCamelCase ,config.embedding_size ,config.hidden_sizes[0] ,stride=2 if config.downsample_in_first_stage else 1 ,depth=config.depths[0] ,) )
lowerCAmelCase__ : Optional[int] = zip(config.hidden_sizes ,config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(__lowerCamelCase ,config.depths[1:] ):
self.stages.append(ResNetStage(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,depth=__lowerCamelCase ) )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = False ,__UpperCAmelCase = True ) -> str:
lowerCAmelCase__ : Dict = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
lowerCAmelCase__ : Any = hidden_states + (hidden_state,)
lowerCAmelCase__ : Dict = stage_module(__lowerCamelCase )
if output_hidden_states:
lowerCAmelCase__ : Any = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=__lowerCamelCase ,hidden_states=__lowerCamelCase ,)
class lowerCAmelCase_( _a ):
'''simple docstring'''
__lowercase : Any = ResNetConfig
__lowercase : Dict = """resnet"""
__lowercase : str = """pixel_values"""
__lowercase : Tuple = True
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> str:
if isinstance(__lowerCamelCase ,nn.Convad ):
nn.init.kaiming_normal_(module.weight ,mode="""fan_out""" ,nonlinearity="""relu""" )
elif isinstance(__lowerCamelCase ,(nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight ,1 )
nn.init.constant_(module.bias ,0 )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase=False ) -> int:
if isinstance(__lowerCamelCase ,__lowerCamelCase ):
lowerCAmelCase__ : Union[str, Any] = value
_lowerCAmelCase = R'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
_lowerCAmelCase = R'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
'''The bare ResNet model outputting raw features without any specific head on top.''' , _a , )
class lowerCAmelCase_( _a ):
'''simple docstring'''
def __init__( self ,__UpperCAmelCase ) -> Tuple:
super().__init__(__lowerCamelCase )
lowerCAmelCase__ : Tuple = config
lowerCAmelCase__ : str = ResNetEmbeddings(__lowerCamelCase )
lowerCAmelCase__ : Dict = ResNetEncoder(__lowerCamelCase )
lowerCAmelCase__ : Dict = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__lowerCamelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC ,output_type=__lowerCamelCase ,config_class=_CONFIG_FOR_DOC ,modality="""vision""" ,expected_output=_EXPECTED_OUTPUT_SHAPE ,)
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ,__UpperCAmelCase = None ) -> str:
lowerCAmelCase__ : Optional[int] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCAmelCase__ : Dict = return_dict if return_dict is not None else self.config.use_return_dict
lowerCAmelCase__ : Optional[Any] = self.embedder(__lowerCamelCase )
lowerCAmelCase__ : Tuple = self.encoder(
__lowerCamelCase ,output_hidden_states=__lowerCamelCase ,return_dict=__lowerCamelCase )
lowerCAmelCase__ : Dict = encoder_outputs[0]
lowerCAmelCase__ : Optional[Any] = self.pooler(__lowerCamelCase )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=__lowerCamelCase ,pooler_output=__lowerCamelCase ,hidden_states=encoder_outputs.hidden_states ,)
@add_start_docstrings(
'''
ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''' , _a , )
class lowerCAmelCase_( _a ):
'''simple docstring'''
def __init__( self ,__UpperCAmelCase ) -> Tuple:
super().__init__(__lowerCamelCase )
lowerCAmelCase__ : Optional[Any] = config.num_labels
lowerCAmelCase__ : int = ResNetModel(__lowerCamelCase )
# classification head
lowerCAmelCase__ : Optional[Any] = nn.Sequential(
nn.Flatten() ,nn.Linear(config.hidden_sizes[-1] ,config.num_labels ) if config.num_labels > 0 else nn.Identity() ,)
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__lowerCamelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT ,output_type=__lowerCamelCase ,config_class=_CONFIG_FOR_DOC ,expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT ,)
def UpperCAmelCase_ ( self ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,) -> str:
lowerCAmelCase__ : str = return_dict if return_dict is not None else self.config.use_return_dict
lowerCAmelCase__ : Optional[int] = self.resnet(__lowerCamelCase ,output_hidden_states=__lowerCamelCase ,return_dict=__lowerCamelCase )
lowerCAmelCase__ : Optional[Any] = outputs.pooler_output if return_dict else outputs[1]
lowerCAmelCase__ : Tuple = self.classifier(__lowerCamelCase )
lowerCAmelCase__ : List[Any] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
lowerCAmelCase__ : List[str] = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
lowerCAmelCase__ : List[str] = """single_label_classification"""
else:
lowerCAmelCase__ : Tuple = """multi_label_classification"""
if self.config.problem_type == "regression":
lowerCAmelCase__ : Optional[Any] = MSELoss()
if self.num_labels == 1:
lowerCAmelCase__ : Dict = loss_fct(logits.squeeze() ,labels.squeeze() )
else:
lowerCAmelCase__ : Optional[int] = loss_fct(__lowerCamelCase ,__lowerCamelCase )
elif self.config.problem_type == "single_label_classification":
lowerCAmelCase__ : Any = CrossEntropyLoss()
lowerCAmelCase__ : List[str] = loss_fct(logits.view(-1 ,self.num_labels ) ,labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
lowerCAmelCase__ : Dict = BCEWithLogitsLoss()
lowerCAmelCase__ : Any = loss_fct(__lowerCamelCase ,__lowerCamelCase )
if not return_dict:
lowerCAmelCase__ : Tuple = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=__lowerCamelCase ,logits=__lowerCamelCase ,hidden_states=outputs.hidden_states )
@add_start_docstrings(
'''
ResNet backbone, to be used with frameworks like DETR and MaskFormer.
''' , _a , )
class lowerCAmelCase_( _a , _a ):
'''simple docstring'''
def __init__( self ,__UpperCAmelCase ) -> List[Any]:
super().__init__(__lowerCamelCase )
super()._init_backbone(__lowerCamelCase )
lowerCAmelCase__ : int = [config.embedding_size] + config.hidden_sizes
lowerCAmelCase__ : Any = ResNetEmbeddings(__lowerCamelCase )
lowerCAmelCase__ : Dict = ResNetEncoder(__lowerCamelCase )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__lowerCamelCase )
@replace_return_docstrings(output_type=__lowerCamelCase ,config_class=_CONFIG_FOR_DOC )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ,__UpperCAmelCase = None ) -> str:
lowerCAmelCase__ : Dict = return_dict if return_dict is not None else self.config.use_return_dict
lowerCAmelCase__ : Optional[int] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCAmelCase__ : str = self.embedder(__lowerCamelCase )
lowerCAmelCase__ : List[str] = self.encoder(__lowerCamelCase ,output_hidden_states=__lowerCamelCase ,return_dict=__lowerCamelCase )
lowerCAmelCase__ : List[str] = outputs.hidden_states
lowerCAmelCase__ : int = ()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
lowerCAmelCase__ : Dict = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=__lowerCamelCase ,hidden_states=outputs.hidden_states if output_hidden_states else None ,attentions=__lowerCamelCase ,)
| 368 |
'''simple docstring'''
import os
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : List[str] = len(grid[0] )
lowerCAmelCase__ : int = len(UpperCamelCase )
lowerCAmelCase__ : Optional[int] = 0
lowerCAmelCase__ : Tuple = 0
lowerCAmelCase__ : Optional[Any] = 0
# Check vertically, horizontally, diagonally at the same time (only works
# for nxn grid)
for i in range(UpperCamelCase ):
for j in range(n_rows - 3 ):
lowerCAmelCase__ : str = grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i]
lowerCAmelCase__ : Optional[int] = grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3]
# Left-to-right diagonal (\) product
if i < n_columns - 3:
lowerCAmelCase__ : Optional[int] = (
grid[i][j]
* grid[i + 1][j + 1]
* grid[i + 2][j + 2]
* grid[i + 3][j + 3]
)
# Right-to-left diagonal(/) product
if i > 2:
lowerCAmelCase__ : Tuple = (
grid[i][j]
* grid[i - 1][j + 1]
* grid[i - 2][j + 2]
* grid[i - 3][j + 3]
)
lowerCAmelCase__ : Dict = max(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
if max_product > largest:
lowerCAmelCase__ : Any = max_product
return largest
def _SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowerCAmelCase__ : List[str] = []
with open(os.path.dirname(UpperCamelCase ) + """/grid.txt""" ) as file:
for line in file:
grid.append(line.strip("""\n""" ).split(""" """ ) )
lowerCAmelCase__ : Dict = [[int(UpperCamelCase ) for i in grid[j]] for j in range(len(UpperCamelCase ) )]
return largest_product(UpperCamelCase )
if __name__ == "__main__":
print(solution())
| 184 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
__UpperCamelCase : Union[str, Any] = {'''configuration_beit''': ['''BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BeitConfig''', '''BeitOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : int = ['''BeitFeatureExtractor''']
__UpperCamelCase : Optional[int] = ['''BeitImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : int = [
'''BEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BeitForImageClassification''',
'''BeitForMaskedImageModeling''',
'''BeitForSemanticSegmentation''',
'''BeitModel''',
'''BeitPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[int] = [
'''FlaxBeitForImageClassification''',
'''FlaxBeitForMaskedImageModeling''',
'''FlaxBeitModel''',
'''FlaxBeitPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
__UpperCamelCase : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 106 |
"""simple docstring"""
import math
class a :
def lowerCAmelCase_ ( self : Tuple , __lowerCAmelCase : list[list[float]] , __lowerCAmelCase : list[int] ):
_UpperCAmelCase = 0.0
_UpperCAmelCase = 0.0
for i in range(len(__lowerCAmelCase ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCAmelCase : list[list[int | float]] , __lowerCAmelCase : list[int] , __lowerCAmelCase : int , __lowerCAmelCase : float ):
for i in range(len(__lowerCAmelCase ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def __UpperCAmelCase ( ):
"""simple docstring"""
# Training Examples ( m, n )
_UpperCAmelCase = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
_UpperCAmelCase = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
_UpperCAmelCase = SelfOrganizingMap()
_UpperCAmelCase = 3
_UpperCAmelCase = 0.5
for _ in range(lowercase ):
for j in range(len(lowercase ) ):
# training sample
_UpperCAmelCase = training_samples[j]
# Compute the winning vector
_UpperCAmelCase = self_organizing_map.get_winner(lowercase ,lowercase )
# Update the winning vector
_UpperCAmelCase = self_organizing_map.update(lowercase ,lowercase ,lowercase ,lowercase )
# classify test sample
_UpperCAmelCase = [0, 0, 0, 1]
_UpperCAmelCase = self_organizing_map.get_winner(lowercase ,lowercase )
# results
print(f'''Clusters that the test sample belongs to : {winner}''' )
print(f'''Weights that have been trained : {weights}''' )
# running the main() function
if __name__ == "__main__":
main()
| 289 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
a :Any = logging.get_logger(__name__)
a :List[str] = {
"microsoft/layoutlmv3-base": "https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json",
}
class __a (UpperCamelCase_):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :str = """layoutlmv3"""
def __init__( self , _a=50_265 , _a=768 , _a=12 , _a=12 , _a=3_072 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=2 , _a=0.02 , _a=1E-5 , _a=1 , _a=0 , _a=2 , _a=1_024 , _a=128 , _a=128 , _a=True , _a=32 , _a=128 , _a=64 , _a=256 , _a=True , _a=True , _a=True , _a=224 , _a=3 , _a=16 , _a=None , **_a , ) -> Dict:
"""simple docstring"""
super().__init__(
vocab_size=_a , hidden_size=_a , num_hidden_layers=_a , num_attention_heads=_a , intermediate_size=_a , hidden_act=_a , hidden_dropout_prob=_a , attention_probs_dropout_prob=_a , max_position_embeddings=_a , type_vocab_size=_a , initializer_range=_a , layer_norm_eps=_a , pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , **_a , )
SCREAMING_SNAKE_CASE__ : List[str] = max_ad_position_embeddings
SCREAMING_SNAKE_CASE__ : List[str] = coordinate_size
SCREAMING_SNAKE_CASE__ : List[str] = shape_size
SCREAMING_SNAKE_CASE__ : Tuple = has_relative_attention_bias
SCREAMING_SNAKE_CASE__ : Union[str, Any] = rel_pos_bins
SCREAMING_SNAKE_CASE__ : Tuple = max_rel_pos
SCREAMING_SNAKE_CASE__ : Any = has_spatial_attention_bias
SCREAMING_SNAKE_CASE__ : List[str] = rel_ad_pos_bins
SCREAMING_SNAKE_CASE__ : Optional[int] = max_rel_ad_pos
SCREAMING_SNAKE_CASE__ : Tuple = text_embed
SCREAMING_SNAKE_CASE__ : Dict = visual_embed
SCREAMING_SNAKE_CASE__ : Union[str, Any] = input_size
SCREAMING_SNAKE_CASE__ : Dict = num_channels
SCREAMING_SNAKE_CASE__ : Any = patch_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = classifier_dropout
class __a (UpperCamelCase_):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :List[str] = version.parse("""1.12""")
@property
def _a ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
("""bbox""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
else:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""bbox""", {0: """batch""", 1: """sequence"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels"""}),
] )
@property
def _a ( self ) -> float:
"""simple docstring"""
return 1E-5
@property
def _a ( self ) -> int:
"""simple docstring"""
return 12
def _a ( self , _a , _a = -1 , _a = -1 , _a = False , _a = None , _a = 3 , _a = 40 , _a = 40 , ) -> Mapping[str, Any]:
"""simple docstring"""
setattr(processor.image_processor , """apply_ocr""" , _a )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE__ : List[Any] = compute_effective_axis_dimension(
_a , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE__ : List[Any] = processor.tokenizer.num_special_tokens_to_add(_a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = compute_effective_axis_dimension(
_a , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_a )
# Generate dummy inputs according to compute batch and sequence
SCREAMING_SNAKE_CASE__ : int = [[""" """.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
SCREAMING_SNAKE_CASE__ : List[Any] = [[[48, 84, 73, 128]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
SCREAMING_SNAKE_CASE__ : Dict = self._generate_dummy_images(_a , _a , _a , _a )
SCREAMING_SNAKE_CASE__ : str = dict(
processor(
_a , text=_a , boxes=_a , return_tensors=_a , ) )
return inputs
| 56 |
"""simple docstring"""
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
a :Optional[Any] = logging.get_logger(__name__)
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None ) -> List[str]:
# Recurse if needed
if "." in tensor_name:
SCREAMING_SNAKE_CASE__ : List[Any] = tensor_name.split(""".""" )
for split in splits[:-1]:
SCREAMING_SNAKE_CASE__ : Dict = getattr(__lowerCAmelCase , __lowerCAmelCase )
if new_module is None:
raise ValueError(F'''{module} has no attribute {split}.''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] = new_module
SCREAMING_SNAKE_CASE__ : Any = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(F'''{module} does not have a parameter or a buffer named {tensor_name}.''' )
SCREAMING_SNAKE_CASE__ : List[str] = tensor_name in module._buffers
SCREAMING_SNAKE_CASE__ : Dict = getattr(__lowerCAmelCase , __lowerCAmelCase )
if old_value.device == torch.device("""meta""" ) and device not in ["meta", torch.device("""meta""" )] and value is None:
raise ValueError(F'''{tensor_name} is on the meta device, we need a `value` to put in on {device}.''' )
SCREAMING_SNAKE_CASE__ : List[str] = False
SCREAMING_SNAKE_CASE__ : str = False
if is_buffer or not is_bitsandbytes_available():
SCREAMING_SNAKE_CASE__ : Optional[int] = False
SCREAMING_SNAKE_CASE__ : List[Any] = False
else:
SCREAMING_SNAKE_CASE__ : str = hasattr(bnb.nn , """Params4bit""" ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit )
SCREAMING_SNAKE_CASE__ : str = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams )
if is_abit or is_abit:
SCREAMING_SNAKE_CASE__ : Dict = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
SCREAMING_SNAKE_CASE__ : Tuple = old_value.to(__lowerCAmelCase )
elif isinstance(__lowerCAmelCase , torch.Tensor ):
SCREAMING_SNAKE_CASE__ : int = value.to("""cpu""" )
if value.dtype == torch.inta:
SCREAMING_SNAKE_CASE__ : str = version.parse(importlib.metadata.version("""bitsandbytes""" ) ) > version.parse(
"""0.37.2""" )
if not is_abit_serializable:
raise ValueError(
"""Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. """
"""Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.""" )
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.tensor(__lowerCAmelCase , device="""cpu""" )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls , __lowerCAmelCase ) and fpaa_statistics is None:
SCREAMING_SNAKE_CASE__ : Optional[int] = new_value.T
SCREAMING_SNAKE_CASE__ : Union[str, Any] = old_value.__dict__
if is_abit:
SCREAMING_SNAKE_CASE__ : str = bnb.nn.IntaParams(__lowerCAmelCase , requires_grad=__lowerCAmelCase , **__lowerCAmelCase ).to(__lowerCAmelCase )
elif is_abit:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = bnb.nn.Paramsabit(__lowerCAmelCase , requires_grad=__lowerCAmelCase , **__lowerCAmelCase ).to(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = new_value
if fpaa_statistics is not None:
setattr(module.weight , """SCB""" , fpaa_statistics.to(__lowerCAmelCase ) )
else:
if value is None:
SCREAMING_SNAKE_CASE__ : str = old_value.to(__lowerCAmelCase )
elif isinstance(__lowerCAmelCase , torch.Tensor ):
SCREAMING_SNAKE_CASE__ : List[str] = value.to(__lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.tensor(__lowerCAmelCase , device=__lowerCAmelCase )
if is_buffer:
SCREAMING_SNAKE_CASE__ : List[str] = new_value
else:
SCREAMING_SNAKE_CASE__ : List[Any] = nn.Parameter(__lowerCAmelCase , requires_grad=old_value.requires_grad )
SCREAMING_SNAKE_CASE__ : Dict = new_value
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=False ) -> List[Any]:
for name, module in model.named_children():
if current_key_name is None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = []
current_key_name.append(__lowerCAmelCase )
if (isinstance(__lowerCAmelCase , nn.Linear ) or isinstance(__lowerCAmelCase , __lowerCAmelCase )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in """.""".join(__lowerCAmelCase ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = module.weight.shape
else:
SCREAMING_SNAKE_CASE__ : str = module.in_features
SCREAMING_SNAKE_CASE__ : Dict = module.out_features
if quantization_config.quantization_method() == "llm_int8":
SCREAMING_SNAKE_CASE__ : Dict = bnb.nn.LinearabitLt(
__lowerCAmelCase , __lowerCAmelCase , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , )
SCREAMING_SNAKE_CASE__ : Tuple = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = bnb.nn.Linearabit(
__lowerCAmelCase , __lowerCAmelCase , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , )
SCREAMING_SNAKE_CASE__ : int = True
# Store the module class in case we need to transpose the weight later
SCREAMING_SNAKE_CASE__ : Dict = type(__lowerCAmelCase )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(__lowerCAmelCase )
if len(list(module.children() ) ) > 0:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = _replace_with_bnb_linear(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , has_been_replaced=__lowerCAmelCase , )
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None ) -> str:
SCREAMING_SNAKE_CASE__ : int = ["""lm_head"""] if modules_to_not_convert is None else modules_to_not_convert
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = _replace_with_bnb_linear(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if not has_been_replaced:
logger.warning(
"""You are loading your model in 8bit or 4bit but no linear modules were found in your model."""
""" Please double check your model architecture, or submit an issue on github if you think this is"""
""" a bug.""" )
return model
def _lowercase ( *__lowerCAmelCase , **__lowerCAmelCase ) -> Any:
warnings.warn(
"""`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead""" , __lowerCAmelCase , )
return replace_with_bnb_linear(*__lowerCAmelCase , **__lowerCAmelCase )
def _lowercase ( *__lowerCAmelCase , **__lowerCAmelCase ) -> Union[str, Any]:
warnings.warn(
"""`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead""" , __lowerCAmelCase , )
return set_module_quantized_tensor_to_device(*__lowerCAmelCase , **__lowerCAmelCase )
def _lowercase ( __lowerCAmelCase ) -> Tuple:
SCREAMING_SNAKE_CASE__ : List[Any] = deepcopy(__lowerCAmelCase ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
SCREAMING_SNAKE_CASE__ : List[str] = find_tied_parameters(__lowerCAmelCase )
# For compatibility with Accelerate < 0.18
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
SCREAMING_SNAKE_CASE__ : List[Any] = sum(__lowerCAmelCase , [] )
SCREAMING_SNAKE_CASE__ : str = len(__lowerCAmelCase ) > 0
# Check if it is a base model
SCREAMING_SNAKE_CASE__ : Optional[int] = not hasattr(__lowerCAmelCase , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
SCREAMING_SNAKE_CASE__ : int = list(model.named_children() )
SCREAMING_SNAKE_CASE__ : str = [list_modules[-1][0]]
# add last module together with tied weights
SCREAMING_SNAKE_CASE__ : Any = set(__lowerCAmelCase ) - set(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Any = list(set(__lowerCAmelCase ) ) + list(__lowerCAmelCase )
# remove ".weight" from the keys
SCREAMING_SNAKE_CASE__ : Any = [""".weight""", """.bias"""]
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
SCREAMING_SNAKE_CASE__ : Optional[int] = name.replace(__lowerCAmelCase , """""" )
filtered_module_names.append(__lowerCAmelCase )
return filtered_module_names
| 56 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
"""SCUT-DLVCLab/lilt-roberta-en-base""": (
"""https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json"""
),
}
class _lowerCamelCase ( a_ ):
_lowerCamelCase :Optional[int] = "lilt"
def __init__( self : Union[str, Any] , UpperCamelCase : List[str]=3_05_22 , UpperCamelCase : Any=7_68 , UpperCamelCase : Union[str, Any]=12 , UpperCamelCase : str=12 , UpperCamelCase : List[str]=30_72 , UpperCamelCase : Optional[int]="gelu" , UpperCamelCase : Union[str, Any]=0.1 , UpperCamelCase : List[Any]=0.1 , UpperCamelCase : Dict=5_12 , UpperCamelCase : Optional[Any]=2 , UpperCamelCase : Optional[Any]=0.02 , UpperCamelCase : List[Any]=1E-1_2 , UpperCamelCase : Dict=0 , UpperCamelCase : str="absolute" , UpperCamelCase : Optional[Any]=None , UpperCamelCase : str=4 , UpperCamelCase : Optional[int]=10_24 , **UpperCamelCase : Union[str, Any] , ) -> str:
"""simple docstring"""
super().__init__(pad_token_id=UpperCamelCase , **UpperCamelCase )
lowerCAmelCase__ : str = vocab_size
lowerCAmelCase__ : Optional[int] = hidden_size
lowerCAmelCase__ : Optional[Any] = num_hidden_layers
lowerCAmelCase__ : Any = num_attention_heads
lowerCAmelCase__ : Union[str, Any] = hidden_act
lowerCAmelCase__ : Any = intermediate_size
lowerCAmelCase__ : str = hidden_dropout_prob
lowerCAmelCase__ : Dict = attention_probs_dropout_prob
lowerCAmelCase__ : Optional[Any] = max_position_embeddings
lowerCAmelCase__ : List[Any] = type_vocab_size
lowerCAmelCase__ : Dict = initializer_range
lowerCAmelCase__ : Optional[Any] = layer_norm_eps
lowerCAmelCase__ : Dict = position_embedding_type
lowerCAmelCase__ : Optional[int] = classifier_dropout
lowerCAmelCase__ : List[str] = channel_shrink_ratio
lowerCAmelCase__ : Union[str, Any] = max_ad_position_embeddings
| 242 |
"""simple docstring"""
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class _lowerCamelCase ( a_ ):
def _lowerCAmelCase ( self : Any ) -> str:
"""simple docstring"""
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def _lowerCAmelCase ( self : Any ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : List[Any] = {"""col_1""": [3, 2, 1, 0], """col_2""": ["""a""", """b""", """c""", """d"""]}
return Dataset.from_dict(UpperCamelCase )
def _lowerCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = self._create_example_records()
lowerCAmelCase__ : Tuple = Dataset.from_list(UpperCamelCase )
self.assertListEqual(dset.column_names , ["""col_1""", """col_2"""] )
for i, r in enumerate(UpperCamelCase ):
self.assertDictEqual(UpperCamelCase , example_records[i] )
def _lowerCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ : Any = self._create_example_records()
lowerCAmelCase__ : Optional[Any] = Dataset.from_list(UpperCamelCase )
lowerCAmelCase__ : int = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def _lowerCAmelCase ( self : Tuple ) -> List[Any]: # checks what happens with missing columns
"""simple docstring"""
lowerCAmelCase__ : str = [{"""col_1""": 1}, {"""col_2""": """x"""}]
lowerCAmelCase__ : int = Dataset.from_list(UpperCamelCase )
self.assertDictEqual(dset[0] , {"""col_1""": 1} )
self.assertDictEqual(dset[1] , {"""col_1""": None} ) # NB: first record is used for columns
def _lowerCAmelCase ( self : str ) -> Dict: # checks if the type can be inferred from the second record
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = [{"""col_1""": []}, {"""col_1""": [1, 2]}]
lowerCAmelCase__ : Optional[int] = Dataset.from_list(UpperCamelCase )
self.assertEqual(dset.info.features["""col_1"""] , Sequence(Value("""int64""" ) ) )
def _lowerCAmelCase ( self : Any ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = Dataset.from_list([] )
self.assertEqual(len(UpperCamelCase ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 242 | 1 |
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = logging.get_logger()
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = True ) -> Dict:
'''simple docstring'''
print(f"""Converting {name}...""" )
with torch.no_grad():
if hidden_sizes == 128:
if name[-1] == "S":
UpperCamelCase = timm.create_model("""levit_128s""" , pretrained=UpperCamelCase_ )
else:
UpperCamelCase = timm.create_model("""levit_128""" , pretrained=UpperCamelCase_ )
if hidden_sizes == 192:
UpperCamelCase = timm.create_model("""levit_192""" , pretrained=UpperCamelCase_ )
if hidden_sizes == 256:
UpperCamelCase = timm.create_model("""levit_256""" , pretrained=UpperCamelCase_ )
if hidden_sizes == 384:
UpperCamelCase = timm.create_model("""levit_384""" , pretrained=UpperCamelCase_ )
from_model.eval()
UpperCamelCase = LevitForImageClassificationWithTeacher(UpperCamelCase_ ).eval()
UpperCamelCase = OrderedDict()
UpperCamelCase = from_model.state_dict()
UpperCamelCase = list(from_model.state_dict().keys() )
UpperCamelCase = list(our_model.state_dict().keys() )
print(len(UpperCamelCase_ ) , len(UpperCamelCase_ ) )
for i in range(len(UpperCamelCase_ ) ):
UpperCamelCase = weights[og_keys[i]]
our_model.load_state_dict(UpperCamelCase_ )
UpperCamelCase = torch.randn((2, 3, 224, 224) )
UpperCamelCase = from_model(UpperCamelCase_ )
UpperCamelCase = our_model(UpperCamelCase_ ).logits
assert torch.allclose(UpperCamelCase_ , UpperCamelCase_ ), "The model logits don't match the original one."
UpperCamelCase = name
print(UpperCamelCase_ )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
UpperCamelCase = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(f"""Pushed {checkpoint_name}""" )
def lowercase( UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = True ) -> Dict:
'''simple docstring'''
UpperCamelCase = """imagenet-1k-id2label.json"""
UpperCamelCase = 1000
UpperCamelCase = (1, num_labels)
UpperCamelCase = """huggingface/label-files"""
UpperCamelCase = num_labels
UpperCamelCase = json.load(open(hf_hub_download(UpperCamelCase_ , UpperCamelCase_ , repo_type="""dataset""" ) , """r""" ) )
UpperCamelCase = {int(UpperCamelCase_ ): v for k, v in idalabel.items()}
UpperCamelCase = idalabel
UpperCamelCase = {v: k for k, v in idalabel.items()}
UpperCamelCase = partial(UpperCamelCase_ , num_labels=UpperCamelCase_ , idalabel=UpperCamelCase_ , labelaid=UpperCamelCase_ )
UpperCamelCase = {
"""levit-128S""": 128,
"""levit-128""": 128,
"""levit-192""": 192,
"""levit-256""": 256,
"""levit-384""": 384,
}
UpperCamelCase = {
"""levit-128S""": ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
"""levit-128""": ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
"""levit-192""": ImageNetPreTrainedConfig(
hidden_sizes=[192, 288, 384] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
"""levit-256""": ImageNetPreTrainedConfig(
hidden_sizes=[256, 384, 512] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
"""levit-384""": ImageNetPreTrainedConfig(
hidden_sizes=[384, 512, 768] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , UpperCamelCase_ , names_to_config[model_name] , UpperCamelCase_ , UpperCamelCase_ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
return config, expected_shape
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help="""The name of the model you wish to convert, it must be one of the supported Levit* architecture,""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""levit-dump-folder/""",
type=Path,
required=False,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""")
parser.add_argument(
"""--no-push_to_hub""",
dest="""push_to_hub""",
action="""store_false""",
help="""Do not push model and image processor to the hub""",
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
_SCREAMING_SNAKE_CASE = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 165 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_SCREAMING_SNAKE_CASE = {"""configuration_yolos""": ["""YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP""", """YolosConfig""", """YolosOnnxConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ["""YolosFeatureExtractor"""]
_SCREAMING_SNAKE_CASE = ["""YolosImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"""YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""YolosForObjectDetection""",
"""YolosModel""",
"""YolosPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 165 | 1 |
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class A_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['bs'] , model_result['ss'] ):
lowercase = model_result['result'][batch_size][sequence_length]
self.assertIsNotNone(_UpperCamelCase )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = 'sshleifer/tiny-gpt2'
lowercase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCamelCase , inference=_UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_UpperCamelCase , )
lowercase = PyTorchBenchmark(_UpperCamelCase )
lowercase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = 'sgugger/tiny-distilbert-classification'
lowercase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCamelCase , inference=_UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_UpperCamelCase , only_pretrain_model=_UpperCamelCase , )
lowercase = PyTorchBenchmark(_UpperCamelCase )
lowercase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = 'sshleifer/tiny-gpt2'
lowercase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCamelCase , inference=_UpperCamelCase , torchscript=_UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_UpperCamelCase , )
lowercase = PyTorchBenchmark(_UpperCamelCase )
lowercase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == 'cpu' , 'Cant do half precision' )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = 'sshleifer/tiny-gpt2'
lowercase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCamelCase , inference=_UpperCamelCase , fpaa=_UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_UpperCamelCase , )
lowercase = PyTorchBenchmark(_UpperCamelCase )
lowercase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = 'sshleifer/tiny-gpt2'
lowercase = AutoConfig.from_pretrained(_UpperCamelCase )
# set architectures equal to `None`
lowercase = None
lowercase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCamelCase , inference=_UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_UpperCamelCase , )
lowercase = PyTorchBenchmark(_UpperCamelCase , configs=[config] )
lowercase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = 'sshleifer/tiny-gpt2'
lowercase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCamelCase , inference=_UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_UpperCamelCase , )
lowercase = PyTorchBenchmark(_UpperCamelCase )
lowercase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == 'cpu' , 'Can\'t do half precision' )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = 'sshleifer/tiny-gpt2'
lowercase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCamelCase , inference=_UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , fpaa=_UpperCamelCase , multi_process=_UpperCamelCase , )
lowercase = PyTorchBenchmark(_UpperCamelCase )
lowercase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = 'sshleifer/tiny-gpt2'
lowercase = AutoConfig.from_pretrained(_UpperCamelCase )
lowercase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCamelCase , inference=_UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_UpperCamelCase , )
lowercase = PyTorchBenchmark(_UpperCamelCase , configs=[config] )
lowercase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = 'sshleifer/tinier_bart'
lowercase = AutoConfig.from_pretrained(_UpperCamelCase )
lowercase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCamelCase , inference=_UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_UpperCamelCase , )
lowercase = PyTorchBenchmark(_UpperCamelCase , configs=[config] )
lowercase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = 'sshleifer/tiny-gpt2'
lowercase = AutoConfig.from_pretrained(_UpperCamelCase )
lowercase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCamelCase , inference=_UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_UpperCamelCase , )
lowercase = PyTorchBenchmark(_UpperCamelCase , configs=[config] )
lowercase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = 'sshleifer/tinier_bart'
lowercase = AutoConfig.from_pretrained(_UpperCamelCase )
lowercase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCamelCase , inference=_UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_UpperCamelCase , )
lowercase = PyTorchBenchmark(_UpperCamelCase , configs=[config] )
lowercase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = 'sshleifer/tiny-gpt2'
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCamelCase , inference=_UpperCamelCase , save_to_csv=_UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(_UpperCamelCase , 'inf_time.csv' ) , train_memory_csv_file=os.path.join(_UpperCamelCase , 'train_mem.csv' ) , inference_memory_csv_file=os.path.join(_UpperCamelCase , 'inf_mem.csv' ) , train_time_csv_file=os.path.join(_UpperCamelCase , 'train_time.csv' ) , env_info_csv_file=os.path.join(_UpperCamelCase , 'env.csv' ) , multi_process=_UpperCamelCase , )
lowercase = PyTorchBenchmark(_UpperCamelCase )
benchmark.run()
self.assertTrue(Path(os.path.join(_UpperCamelCase , 'inf_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(_UpperCamelCase , 'train_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(_UpperCamelCase , 'inf_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(_UpperCamelCase , 'train_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(_UpperCamelCase , 'env.csv' ) ).exists() )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = 'sshleifer/tiny-gpt2'
def _check_summary_is_not_empty(snake_case ):
self.assertTrue(hasattr(_UpperCamelCase , 'sequential' ) )
self.assertTrue(hasattr(_UpperCamelCase , 'cumulative' ) )
self.assertTrue(hasattr(_UpperCamelCase , 'current' ) )
self.assertTrue(hasattr(_UpperCamelCase , 'total' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCamelCase , inference=_UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(_UpperCamelCase , 'log.txt' ) , log_print=_UpperCamelCase , trace_memory_line_by_line=_UpperCamelCase , multi_process=_UpperCamelCase , )
lowercase = PyTorchBenchmark(_UpperCamelCase )
lowercase = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(_UpperCamelCase , 'log.txt' ) ).exists() )
| 195 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
_A = {
"configuration_speech_to_text": ["SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "Speech2TextConfig"],
"processing_speech_to_text": ["Speech2TextProcessor"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ["Speech2TextTokenizer"]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ["Speech2TextFeatureExtractor"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFSpeech2TextForConditionalGeneration",
"TFSpeech2TextModel",
"TFSpeech2TextPreTrainedModel",
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"Speech2TextForConditionalGeneration",
"Speech2TextModel",
"Speech2TextPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 231 | 0 |
"""simple docstring"""
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
__snake_case = logging.getLogger()
def __lowerCAmelCase ( lowercase : Path , lowercase : list ) -> List[Any]:
"""simple docstring"""
snake_case : Tuple = "\n".join(__lowerCAmelCase )
Path(__lowerCAmelCase ).open("w" ).writelines(__lowerCAmelCase )
__snake_case = '''patrickvonplaten/t5-tiny-random'''
__snake_case = '''sshleifer/bart-tiny-random'''
__snake_case = '''sshleifer/tiny-mbart'''
__snake_case = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class _lowerCAmelCase ( lowerCamelCase_ ):
def lowerCamelCase ( self , UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Optional[Any] = Path(self.get_auto_remove_tmp_dir() ) / "utest_input.source"
snake_case : Tuple = input_file_name.parent / "utest_output.txt"
assert not output_file_name.exists()
snake_case : int = [" New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County."]
_dump_articles(__snake_case , __snake_case )
snake_case : List[Any] = str(Path(self.get_auto_remove_tmp_dir() ) / "scores.json" )
snake_case : int = "translation_en_to_de" if model == T5_TINY else "summarization"
snake_case : Any = F'\n run_eval_search.py\n {model}\n {input_file_name}\n {output_file_name}\n --score_path {score_path}\n --task {task}\n --num_beams 2\n --length_penalty 2.0\n '.split()
with patch.object(__snake_case , "argv" , __snake_case ):
run_generate()
assert Path(__snake_case ).exists()
# os.remove(Path(output_file_name))
def lowerCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
self.run_eval_tester(__snake_case )
@parameterized.expand([BART_TINY, MBART_TINY] )
@slow
def lowerCamelCase ( self , UpperCamelCase__ ) -> str:
'''simple docstring'''
self.run_eval_tester(__snake_case )
@parameterized.expand([T5_TINY, MBART_TINY] )
@slow
def lowerCamelCase ( self , UpperCamelCase__ ) -> Dict:
'''simple docstring'''
snake_case : str = Path(self.get_auto_remove_tmp_dir() ) / "utest_input.source"
snake_case : Tuple = input_file_name.parent / "utest_output.txt"
assert not output_file_name.exists()
snake_case : Union[str, Any] = {
"en": ["Machine learning is great, isn\'t it?", "I like to eat bananas", "Tomorrow is another great day!"],
"de": [
"Maschinelles Lernen ist großartig, oder?",
"Ich esse gerne Bananen",
"Morgen ist wieder ein toller Tag!",
],
}
snake_case : Union[str, Any] = Path(self.get_auto_remove_tmp_dir() )
snake_case : Dict = str(tmp_dir / "scores.json" )
snake_case : Any = str(tmp_dir / "val.target" )
_dump_articles(__snake_case , text["en"] )
_dump_articles(__snake_case , text["de"] )
snake_case : Optional[Any] = "translation_en_to_de" if model == T5_TINY else "summarization"
snake_case : Dict = F'\n run_eval_search.py\n {model}\n {str(__snake_case )}\n {str(__snake_case )}\n --score_path {score_path}\n --reference_path {reference_path}\n --task {task}\n '.split()
testargs.extend(["--search", "num_beams=1:2 length_penalty=0.9:1.0"] )
with patch.object(__snake_case , "argv" , __snake_case ):
with CaptureStdout() as cs:
run_search()
snake_case : int = [" num_beams | length_penalty", model, "Best score args"]
snake_case : List[Any] = ["Info"]
if "translation" in task:
expected_strings.append("bleu" )
else:
expected_strings.extend(__snake_case )
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(__snake_case ).exists()
os.remove(Path(__snake_case ) )
| 371 |
"""simple docstring"""
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 112 | 0 |
"""simple docstring"""
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
lowerCamelCase__ : List[Any] = '''src/transformers'''
lowerCamelCase__ : Union[str, Any] = '''docs/source/en'''
lowerCamelCase__ : Optional[int] = '''.'''
def UpperCamelCase ( _lowerCAmelCase : Any, _lowerCAmelCase : Optional[Any], _lowerCAmelCase : Optional[int] ) -> Union[str, Any]:
with open(_lowerCAmelCase, """r""", encoding="""utf-8""", newline="""\n""" ) as f:
_UpperCAmelCase : str = f.readlines()
# Find the start prompt.
_UpperCAmelCase : Dict = 0
while not lines[start_index].startswith(_lowerCAmelCase ):
start_index += 1
start_index += 1
_UpperCAmelCase : List[Any] = start_index
while not lines[end_index].startswith(_lowerCAmelCase ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
lowerCamelCase__ : Dict = '''Model|Encoder|Decoder|ForConditionalGeneration'''
# Regexes that match TF/Flax/PT model names.
lowerCamelCase__ : Union[str, Any] = re.compile(r'''TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
lowerCamelCase__ : Optional[int] = re.compile(r'''Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
lowerCamelCase__ : Any = re.compile(r'''(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
# This is to make sure the transformers module imported is the one in the repo.
lowerCamelCase__ : Union[str, Any] = direct_transformers_import(TRANSFORMERS_PATH)
def UpperCamelCase ( _lowerCAmelCase : Union[str, Any] ) -> Any:
_UpperCAmelCase : Optional[int] = re.finditer(""".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)""", _lowerCAmelCase )
return [m.group(0 ) for m in matches]
def UpperCamelCase ( _lowerCAmelCase : Any, _lowerCAmelCase : int ) -> Any:
_UpperCAmelCase : Union[str, Any] = 2 if text == """✅""" or text == """❌""" else len(_lowerCAmelCase )
_UpperCAmelCase : str = (width - text_length) // 2
_UpperCAmelCase : List[Any] = width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def UpperCamelCase ( ) -> List[Any]:
_UpperCAmelCase : int = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
_UpperCAmelCase : List[Any] = {
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
_UpperCAmelCase : int = {name: config.replace("""Config""", """""" ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
_UpperCAmelCase : Dict = collections.defaultdict(_lowerCAmelCase )
_UpperCAmelCase : List[str] = collections.defaultdict(_lowerCAmelCase )
_UpperCAmelCase : List[Any] = collections.defaultdict(_lowerCAmelCase )
_UpperCAmelCase : List[str] = collections.defaultdict(_lowerCAmelCase )
_UpperCAmelCase : str = collections.defaultdict(_lowerCAmelCase )
# Let's lookup through all transformers object (once).
for attr_name in dir(_lowerCAmelCase ):
_UpperCAmelCase : List[str] = None
if attr_name.endswith("""Tokenizer""" ):
_UpperCAmelCase : Optional[int] = slow_tokenizers
_UpperCAmelCase : Optional[int] = attr_name[:-9]
elif attr_name.endswith("""TokenizerFast""" ):
_UpperCAmelCase : List[Any] = fast_tokenizers
_UpperCAmelCase : str = attr_name[:-13]
elif _re_tf_models.match(_lowerCAmelCase ) is not None:
_UpperCAmelCase : Tuple = tf_models
_UpperCAmelCase : Any = _re_tf_models.match(_lowerCAmelCase ).groups()[0]
elif _re_flax_models.match(_lowerCAmelCase ) is not None:
_UpperCAmelCase : Any = flax_models
_UpperCAmelCase : List[Any] = _re_flax_models.match(_lowerCAmelCase ).groups()[0]
elif _re_pt_models.match(_lowerCAmelCase ) is not None:
_UpperCAmelCase : Union[str, Any] = pt_models
_UpperCAmelCase : List[Any] = _re_pt_models.match(_lowerCAmelCase ).groups()[0]
if lookup_dict is not None:
while len(_lowerCAmelCase ) > 0:
if attr_name in model_name_to_prefix.values():
_UpperCAmelCase : List[str] = True
break
# Try again after removing the last word in the name
_UpperCAmelCase : Optional[Any] = """""".join(camel_case_split(_lowerCAmelCase )[:-1] )
# Let's build that table!
_UpperCAmelCase : List[Any] = list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
_UpperCAmelCase : List[Any] = ["""Model""", """Tokenizer slow""", """Tokenizer fast""", """PyTorch support""", """TensorFlow support""", """Flax Support"""]
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
_UpperCAmelCase : List[Any] = [len(_lowerCAmelCase ) + 2 for c in columns]
_UpperCAmelCase : Optional[int] = max([len(_lowerCAmelCase ) for name in model_names] ) + 2
# Build the table per se
_UpperCAmelCase : Tuple = """|""" + """|""".join([_center_text(_lowerCAmelCase, _lowerCAmelCase ) for c, w in zip(_lowerCAmelCase, _lowerCAmelCase )] ) + """|\n"""
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([""":""" + """-""" * (w - 2) + """:""" for w in widths] ) + "|\n"
_UpperCAmelCase : Dict = {True: """✅""", False: """❌"""}
for name in model_names:
_UpperCAmelCase : Optional[int] = model_name_to_prefix[name]
_UpperCAmelCase : Tuple = [
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(_lowerCAmelCase, _lowerCAmelCase ) for l, w in zip(_lowerCAmelCase, _lowerCAmelCase )] ) + "|\n"
return table
def UpperCamelCase ( _lowerCAmelCase : Any=False ) -> Dict:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Any = _find_text_in_file(
filename=os.path.join(_lowerCAmelCase, """index.md""" ), start_prompt="""<!--This table is updated automatically from the auto modules""", end_prompt="""<!-- End table-->""", )
_UpperCAmelCase : List[str] = get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(_lowerCAmelCase, """index.md""" ), """w""", encoding="""utf-8""", newline="""\n""" ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
"""The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.""" )
if __name__ == "__main__":
lowerCamelCase__ : List[str] = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
lowerCamelCase__ : List[Any] = parser.parse_args()
check_model_table(args.fix_and_overwrite)
| 246 |
"""simple docstring"""
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def UpperCamelCase ( _lowerCAmelCase : Any, _lowerCAmelCase : List[str], _lowerCAmelCase : Dict ) -> str:
_UpperCAmelCase : Union[str, Any] = OmegaConf.load(_lowerCAmelCase )
_UpperCAmelCase : str = torch.load(_lowerCAmelCase, map_location="""cpu""" )["""model"""]
_UpperCAmelCase : Dict = list(state_dict.keys() )
# extract state_dict for VQVAE
_UpperCAmelCase : List[str] = {}
_UpperCAmelCase : List[str] = """first_stage_model."""
for key in keys:
if key.startswith(_lowerCAmelCase ):
_UpperCAmelCase : Dict = state_dict[key]
# extract state_dict for UNetLDM
_UpperCAmelCase : str = {}
_UpperCAmelCase : Tuple = """model.diffusion_model."""
for key in keys:
if key.startswith(_lowerCAmelCase ):
_UpperCAmelCase : Tuple = state_dict[key]
_UpperCAmelCase : Optional[Any] = config.model.params.first_stage_config.params
_UpperCAmelCase : Optional[Any] = config.model.params.unet_config.params
_UpperCAmelCase : List[str] = VQModel(**_lowerCAmelCase ).eval()
vqvae.load_state_dict(_lowerCAmelCase )
_UpperCAmelCase : List[Any] = UNetLDMModel(**_lowerCAmelCase ).eval()
unet.load_state_dict(_lowerCAmelCase )
_UpperCAmelCase : Union[str, Any] = DDIMScheduler(
timesteps=config.model.params.timesteps, beta_schedule="""scaled_linear""", beta_start=config.model.params.linear_start, beta_end=config.model.params.linear_end, clip_sample=_lowerCAmelCase, )
_UpperCAmelCase : Tuple = LDMPipeline(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase )
pipeline.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
lowerCamelCase__ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('''--checkpoint_path''', type=str, required=True)
parser.add_argument('''--config_path''', type=str, required=True)
parser.add_argument('''--output_path''', type=str, required=True)
lowerCamelCase__ : List[str] = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 246 | 1 |
"""simple docstring"""
import cva
import numpy as np
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Optional[int] ,lowercase_ : float ,lowercase_ : int ):
if k in (0.04, 0.06):
lowerCAmelCase__ : str = k
lowerCAmelCase__ : Optional[Any] = window_size
else:
raise ValueError('''invalid k value''' )
def __str__( self : Tuple ):
return str(self.k )
def __lowerCAmelCase ( self : List[str] ,lowercase_ : str ):
lowerCAmelCase__ : List[Any] = cva.imread(lowercase_ ,0 )
lowerCAmelCase__ ,lowerCAmelCase__ : Tuple = img.shape
lowerCAmelCase__ : list[list[int]] = []
lowerCAmelCase__ : Union[str, Any] = img.copy()
lowerCAmelCase__ : Tuple = cva.cvtColor(lowercase_ ,cva.COLOR_GRAY2RGB )
lowerCAmelCase__ ,lowerCAmelCase__ : Dict = np.gradient(lowercase_ )
lowerCAmelCase__ : List[str] = dx**2
lowerCAmelCase__ : Any = dy**2
lowerCAmelCase__ : Optional[int] = dx * dy
lowerCAmelCase__ : Optional[int] = 0.04
lowerCAmelCase__ : int = self.window_size // 2
for y in range(lowercase_ ,h - offset ):
for x in range(lowercase_ ,w - offset ):
lowerCAmelCase__ : Dict = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowerCAmelCase__ : List[Any] = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowerCAmelCase__ : Any = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowerCAmelCase__ : str = (wxx * wyy) - (wxy**2)
lowerCAmelCase__ : Any = wxx + wyy
lowerCAmelCase__ : Dict = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) ,0 )
color_img.itemset((y, x, 1) ,0 )
color_img.itemset((y, x, 2) ,2_5_5 )
return color_img, corner_list
if __name__ == "__main__":
__UpperCamelCase : str = HarrisCorner(0.0_4, 3)
__UpperCamelCase , __UpperCamelCase : str = edge_detect.detect('''path_to_image''')
cva.imwrite('''detect.png''', color_img)
| 74 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
__UpperCamelCase : Optional[Any] = '''Create a default config file for Accelerate with only a few flags set.'''
def __SCREAMING_SNAKE_CASE ( A_="no" , A_ = default_json_config_file , A_ = False ):
lowerCAmelCase__ : List[Any] = Path(A_ )
path.parent.mkdir(parents=A_ , exist_ok=A_ )
if path.exists():
print(
f'Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.' )
return False
lowerCAmelCase__ : Optional[int] = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
f'`mixed_precision` should be one of \'no\', \'fp16\', \'bf16\', or \'fp8\'. Received {mixed_precision}' )
lowerCAmelCase__ : Optional[Any] = {
'''compute_environment''': '''LOCAL_MACHINE''',
'''mixed_precision''': mixed_precision,
}
if torch.cuda.is_available():
lowerCAmelCase__ : Union[str, Any] = torch.cuda.device_count()
lowerCAmelCase__ : Tuple = num_gpus
lowerCAmelCase__ : List[str] = False
if num_gpus > 1:
lowerCAmelCase__ : Any = '''MULTI_GPU'''
else:
lowerCAmelCase__ : Union[str, Any] = '''NO'''
elif is_xpu_available() and use_xpu:
lowerCAmelCase__ : Optional[Any] = torch.xpu.device_count()
lowerCAmelCase__ : Tuple = num_xpus
lowerCAmelCase__ : List[str] = False
if num_xpus > 1:
lowerCAmelCase__ : Union[str, Any] = '''MULTI_XPU'''
else:
lowerCAmelCase__ : List[Any] = '''NO'''
elif is_npu_available():
lowerCAmelCase__ : Optional[int] = torch.npu.device_count()
lowerCAmelCase__ : List[Any] = num_npus
lowerCAmelCase__ : Optional[int] = False
if num_npus > 1:
lowerCAmelCase__ : Any = '''MULTI_NPU'''
else:
lowerCAmelCase__ : int = '''NO'''
else:
lowerCAmelCase__ : List[Any] = 0
lowerCAmelCase__ : Union[str, Any] = True
lowerCAmelCase__ : Tuple = 1
lowerCAmelCase__ : Optional[Any] = '''NO'''
lowerCAmelCase__ : Optional[Any] = ClusterConfig(**A_ )
config.to_json_file(A_ )
return path
def __SCREAMING_SNAKE_CASE ( A_ , A_ ):
lowerCAmelCase__ : Any = parser.add_parser('''default''' , parents=A_ , help=A_ , formatter_class=A_ )
parser.add_argument(
'''--config_file''' , default=A_ , help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) , dest='''save_location''' , )
parser.add_argument(
'''--mixed_precision''' , choices=['''no''', '''fp16''', '''bf16'''] , type=A_ , help='''Whether or not to use mixed precision training. '''
'''Choose between FP16 and BF16 (bfloat16) training. '''
'''BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.''' , default='''no''' , )
parser.set_defaults(func=A_ )
return parser
def __SCREAMING_SNAKE_CASE ( A_ ):
lowerCAmelCase__ : List[str] = write_basic_config(args.mixed_precision , args.save_location )
if config_file:
print(f'accelerate configuration saved at {config_file}' )
| 74 | 1 |
from __future__ import annotations
def __lowerCAmelCase ( a__ , a__ ) -> list[int]:
__a = 0
__a = len(a__ ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
__a = i + 1
else:
__a = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"{two_pointer([2, 7, 1_1, 1_5], 9) = }")
| 6 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase__ : Optional[int] = logging.get_logger(__name__)
UpperCamelCase__ : Optional[int] = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
# See all BART models at https://huggingface.co/models?filter=bart
UpperCamelCase__ : Optional[int] = {
'''vocab_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/vocab.json''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/vocab.json''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json''',
},
'''merges_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/merges.txt''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/merges.txt''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt''',
},
}
UpperCamelCase__ : List[Any] = {
'''facebook/bart-base''': 10_24,
'''facebook/bart-large''': 10_24,
'''facebook/bart-large-mnli''': 10_24,
'''facebook/bart-large-cnn''': 10_24,
'''facebook/bart-large-xsum''': 10_24,
'''yjernite/bart_eli5''': 10_24,
}
@lru_cache()
def lowerCAmelCase_ ( ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = (
list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) )
)
__SCREAMING_SNAKE_CASE : int = bs[:]
__SCREAMING_SNAKE_CASE : Any = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_lowerCamelCase )
cs.append(2**8 + n )
n += 1
__SCREAMING_SNAKE_CASE : Dict = [chr(_lowerCamelCase ) for n in cs]
return dict(zip(_lowerCamelCase , _lowerCamelCase ) )
def lowerCAmelCase_ ( _lowerCamelCase: int ):
__SCREAMING_SNAKE_CASE : List[str] = set()
__SCREAMING_SNAKE_CASE : Optional[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__SCREAMING_SNAKE_CASE : Optional[int] = char
return pairs
class _UpperCamelCase ( lowerCamelCase__ ):
'''simple docstring'''
_A : List[str] = VOCAB_FILES_NAMES
_A : Tuple = PRETRAINED_VOCAB_FILES_MAP
_A : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A : int = ['''input_ids''', '''attention_mask''']
def __init__( self : Tuple , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : Tuple="replace" , lowerCAmelCase__ : str="<s>" , lowerCAmelCase__ : Dict="</s>" , lowerCAmelCase__ : Union[str, Any]="</s>" , lowerCAmelCase__ : Any="<s>" , lowerCAmelCase__ : str="<unk>" , lowerCAmelCase__ : Tuple="<pad>" , lowerCAmelCase__ : Union[str, Any]="<mask>" , lowerCAmelCase__ : Dict=False , **lowerCAmelCase__ : Optional[int] , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else bos_token
__SCREAMING_SNAKE_CASE : Optional[Any] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else eos_token
__SCREAMING_SNAKE_CASE : Tuple = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else sep_token
__SCREAMING_SNAKE_CASE : Tuple = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else cls_token
__SCREAMING_SNAKE_CASE : Dict = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else unk_token
__SCREAMING_SNAKE_CASE : List[Any] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__SCREAMING_SNAKE_CASE : int = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else mask_token
super().__init__(
errors=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , **lowerCAmelCase__ , )
with open(lowerCAmelCase__ , encoding="""utf-8""" ) as vocab_handle:
__SCREAMING_SNAKE_CASE : Dict = json.load(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = {v: k for k, v in self.encoder.items()}
__SCREAMING_SNAKE_CASE : Dict = errors # how to handle errors in decoding
__SCREAMING_SNAKE_CASE : Optional[int] = bytes_to_unicode()
__SCREAMING_SNAKE_CASE : Tuple = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCAmelCase__ , encoding="""utf-8""" ) as merges_handle:
__SCREAMING_SNAKE_CASE : Tuple = merges_handle.read().split("""\n""" )[1:-1]
__SCREAMING_SNAKE_CASE : Optional[int] = [tuple(merge.split() ) for merge in bpe_merges]
__SCREAMING_SNAKE_CASE : str = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
__SCREAMING_SNAKE_CASE : Optional[int] = {}
__SCREAMING_SNAKE_CASE : Any = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__SCREAMING_SNAKE_CASE : Tuple = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
def UpperCamelCase__ ( self : Optional[int] ):
"""simple docstring"""
return len(self.encoder )
def UpperCamelCase__ ( self : List[str] ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCamelCase__ ( self : Any , lowerCAmelCase__ : Tuple ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
__SCREAMING_SNAKE_CASE : Optional[int] = tuple(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = get_pairs(lowerCAmelCase__ )
if not pairs:
return token
while True:
__SCREAMING_SNAKE_CASE : Optional[int] = min(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : self.bpe_ranks.get(lowerCAmelCase__ , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = bigram
__SCREAMING_SNAKE_CASE : int = []
__SCREAMING_SNAKE_CASE : List[str] = 0
while i < len(lowerCAmelCase__ ):
try:
__SCREAMING_SNAKE_CASE : Any = word.index(lowerCAmelCase__ , lowerCAmelCase__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__SCREAMING_SNAKE_CASE : Dict = j
if word[i] == first and i < len(lowerCAmelCase__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__SCREAMING_SNAKE_CASE : Optional[int] = tuple(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = new_word
if len(lowerCAmelCase__ ) == 1:
break
else:
__SCREAMING_SNAKE_CASE : Tuple = get_pairs(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = """ """.join(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = word
return word
def UpperCamelCase__ ( self : Optional[int] , lowerCAmelCase__ : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = []
for token in re.findall(self.pat , lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : Optional[Any] = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCAmelCase__ ).split(""" """ ) )
return bpe_tokens
def UpperCamelCase__ ( self : Dict , lowerCAmelCase__ : Tuple ):
"""simple docstring"""
return self.encoder.get(lowerCAmelCase__ , self.encoder.get(self.unk_token ) )
def UpperCamelCase__ ( self : Any , lowerCAmelCase__ : Tuple ):
"""simple docstring"""
return self.decoder.get(lowerCAmelCase__ )
def UpperCamelCase__ ( self : Dict , lowerCAmelCase__ : Optional[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = """""".join(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors )
return text
def UpperCamelCase__ ( self : Optional[Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
__SCREAMING_SNAKE_CASE : str = os.path.join(
lowerCAmelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
__SCREAMING_SNAKE_CASE : List[Any] = os.path.join(
lowerCAmelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(lowerCAmelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCAmelCase__ , ensure_ascii=lowerCAmelCase__ ) + """\n""" )
__SCREAMING_SNAKE_CASE : Optional[int] = 0
with open(lowerCAmelCase__ , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase__ : kv[1] ):
if index != token_index:
logger.warning(
F"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
""" Please check that the tokenizer is not corrupted!""" )
__SCREAMING_SNAKE_CASE : Optional[Any] = token_index
writer.write(""" """.join(lowerCAmelCase__ ) + """\n""" )
index += 1
return vocab_file, merge_file
def UpperCamelCase__ ( self : Optional[int] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__SCREAMING_SNAKE_CASE : List[Any] = [self.cls_token_id]
__SCREAMING_SNAKE_CASE : Optional[int] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCamelCase__ ( self : List[Any] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None , lowerCAmelCase__ : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ , token_ids_a=lowerCAmelCase__ , already_has_special_tokens=lowerCAmelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase__ )) + [1]
return [1] + ([0] * len(lowerCAmelCase__ )) + [1, 1] + ([0] * len(lowerCAmelCase__ )) + [1]
def UpperCamelCase__ ( self : Optional[Any] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = [self.sep_token_id]
__SCREAMING_SNAKE_CASE : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCamelCase__ ( self : Dict , lowerCAmelCase__ : str , lowerCAmelCase__ : Dict=False , **lowerCAmelCase__ : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = kwargs.pop("""add_prefix_space""" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCAmelCase__ ) > 0 and not text[0].isspace()):
__SCREAMING_SNAKE_CASE : List[str] = """ """ + text
return (text, kwargs)
| 112 | 0 |
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> str:
stooge(SCREAMING_SNAKE_CASE_ , 0 , len(SCREAMING_SNAKE_CASE_ ) - 1 )
return arr
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Dict:
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
lowerCAmelCase__ : Union[str, Any] = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , (h - t) )
# Recursively sort last 2/3 elements
stooge(SCREAMING_SNAKE_CASE_ , i + t , (SCREAMING_SNAKE_CASE_) )
# Recursively sort first 2/3 elements
stooge(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , (h - t) )
if __name__ == "__main__":
lowerCamelCase__ = input("""Enter numbers separated by a comma:\n""").strip()
lowerCamelCase__ = [int(item) for item in user_input.split(""",""")]
print(stooge_sort(unsorted))
| 307 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import (
DiffusionPipeline,
UnCLIPImageVariationPipeline,
UnCLIPScheduler,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, load_image, require_torch_gpu, skip_mps
from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A__ ( __magic_name__ , unittest.TestCase ):
lowercase = UnCLIPImageVariationPipeline
lowercase = IMAGE_VARIATION_PARAMS - {'height', 'width', 'guidance_scale'}
lowercase = IMAGE_VARIATION_BATCH_PARAMS
lowercase = [
'generator',
'return_dict',
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
lowercase = False
@property
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
return 32
@property
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
return 32
@property
def _lowerCamelCase ( self : int ):
'''simple docstring'''
return self.time_input_dim
@property
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
return 100
@property
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModelWithProjection(a )
@property
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : List[Any] = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , )
return CLIPVisionModelWithProjection(a )
@property
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : Union[str, Any] = {
'clip_embeddings_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'cross_attention_dim': self.cross_attention_dim,
}
lowerCAmelCase__ : Optional[Any] = UnCLIPTextProjModel(**a )
return model
@property
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : str = {
'sample_size': 32,
# RGB in channels
'in_channels': 3,
# Out channels is double in channels because predicts mean and variance
'out_channels': 6,
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': 'identity',
}
lowerCAmelCase__ : str = UNetaDConditionModel(**a )
return model
@property
def _lowerCamelCase ( self : str ):
'''simple docstring'''
return {
"sample_size": 64,
"layers_per_block": 1,
"down_block_types": ("ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D"),
"up_block_types": ("ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D"),
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"in_channels": 6,
"out_channels": 3,
}
@property
def _lowerCamelCase ( self : str ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : Any = UNetaDModel(**self.dummy_super_res_kwargs )
return model
@property
def _lowerCamelCase ( self : int ):
'''simple docstring'''
torch.manual_seed(1 )
lowerCAmelCase__ : List[str] = UNetaDModel(**self.dummy_super_res_kwargs )
return model
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = self.dummy_decoder
lowerCAmelCase__ : Optional[int] = self.dummy_text_proj
lowerCAmelCase__ : Any = self.dummy_text_encoder
lowerCAmelCase__ : Any = self.dummy_tokenizer
lowerCAmelCase__ : Any = self.dummy_super_res_first
lowerCAmelCase__ : Optional[int] = self.dummy_super_res_last
lowerCAmelCase__ : Dict = UnCLIPScheduler(
variance_type='learned_range' , prediction_type='epsilon' , num_train_timesteps=1_000 , )
lowerCAmelCase__ : Any = UnCLIPScheduler(
variance_type='fixed_small_log' , prediction_type='epsilon' , num_train_timesteps=1_000 , )
lowerCAmelCase__ : Any = CLIPImageProcessor(crop_size=32 , size=32 )
lowerCAmelCase__ : Optional[int] = self.dummy_image_encoder
return {
"decoder": decoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_proj": text_proj,
"feature_extractor": feature_extractor,
"image_encoder": image_encoder,
"super_res_first": super_res_first,
"super_res_last": super_res_last,
"decoder_scheduler": decoder_scheduler,
"super_res_scheduler": super_res_scheduler,
}
def _lowerCamelCase ( self : Any , a : Dict , a : List[str]=0 , a : List[str]=True ):
'''simple docstring'''
lowerCAmelCase__ : Dict = floats_tensor((1, 3, 32, 32) , rng=random.Random(a ) ).to(a )
if str(a ).startswith('mps' ):
lowerCAmelCase__ : Optional[int] = torch.manual_seed(a )
else:
lowerCAmelCase__ : str = torch.Generator(device=a ).manual_seed(a )
if pil_image:
lowerCAmelCase__ : Optional[int] = input_image * 0.5 + 0.5
lowerCAmelCase__ : Dict = input_image.clamp(0 , 1 )
lowerCAmelCase__ : List[Any] = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
lowerCAmelCase__ : Union[str, Any] = DiffusionPipeline.numpy_to_pil(a )[0]
return {
"image": input_image,
"generator": generator,
"decoder_num_inference_steps": 2,
"super_res_num_inference_steps": 2,
"output_type": "np",
}
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = 'cpu'
lowerCAmelCase__ : Any = self.get_dummy_components()
lowerCAmelCase__ : List[str] = self.pipeline_class(**a )
lowerCAmelCase__ : Dict = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Dict = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : str = pipe(**a )
lowerCAmelCase__ : Optional[Any] = output.images
lowerCAmelCase__ : str = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : Optional[int] = pipe(
**a , return_dict=a , )[0]
lowerCAmelCase__ : Optional[Any] = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Tuple = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ : List[str] = np.array(
[
0.9_9_9_7,
0.0_0_0_2,
0.9_9_9_7,
0.9_9_9_7,
0.9_9_6_9,
0.0_0_2_3,
0.9_9_9_7,
0.9_9_6_9,
0.9_9_7_0,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = 'cpu'
lowerCAmelCase__ : Dict = self.get_dummy_components()
lowerCAmelCase__ : Optional[int] = self.pipeline_class(**a )
lowerCAmelCase__ : int = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : List[Any] = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : List[str] = pipe(**a )
lowerCAmelCase__ : Union[str, Any] = output.images
lowerCAmelCase__ : int = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : int = pipe(
**a , return_dict=a , )[0]
lowerCAmelCase__ : Tuple = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Tuple = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ : str = np.array([0.9_9_9_7, 0.0_0_0_3, 0.9_9_9_7, 0.9_9_9_7, 0.9_9_7_0, 0.0_0_2_4, 0.9_9_9_7, 0.9_9_7_1, 0.9_9_7_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = 'cpu'
lowerCAmelCase__ : int = self.get_dummy_components()
lowerCAmelCase__ : Tuple = self.pipeline_class(**a )
lowerCAmelCase__ : Union[str, Any] = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Tuple = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : List[str] = [
pipeline_inputs['image'],
pipeline_inputs['image'],
]
lowerCAmelCase__ : Optional[int] = pipe(**a )
lowerCAmelCase__ : Tuple = output.images
lowerCAmelCase__ : List[str] = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : Union[str, Any] = [
tuple_pipeline_inputs['image'],
tuple_pipeline_inputs['image'],
]
lowerCAmelCase__ : str = pipe(
**a , return_dict=a , )[0]
lowerCAmelCase__ : Optional[Any] = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (2, 64, 64, 3)
lowerCAmelCase__ : Union[str, Any] = np.array(
[
0.9_9_9_7,
0.9_9_8_9,
0.0_0_0_8,
0.0_0_2_1,
0.9_9_6_0,
0.0_0_1_8,
0.0_0_1_4,
0.0_0_0_2,
0.9_9_3_3,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = torch.device('cpu' )
class A__ :
lowercase = 1
lowerCAmelCase__ : Optional[Any] = self.get_dummy_components()
lowerCAmelCase__ : Dict = self.pipeline_class(**a )
lowerCAmelCase__ : Optional[Any] = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Optional[int] = torch.Generator(device=a ).manual_seed(0 )
lowerCAmelCase__ : Optional[int] = pipe.decoder.dtype
lowerCAmelCase__ : Union[str, Any] = 1
lowerCAmelCase__ : str = (
batch_size,
pipe.decoder.config.in_channels,
pipe.decoder.config.sample_size,
pipe.decoder.config.sample_size,
)
lowerCAmelCase__ : List[Any] = pipe.prepare_latents(
a , dtype=a , device=a , generator=a , latents=a , scheduler=DummyScheduler() )
lowerCAmelCase__ : List[str] = (
batch_size,
pipe.super_res_first.config.in_channels // 2,
pipe.super_res_first.config.sample_size,
pipe.super_res_first.config.sample_size,
)
lowerCAmelCase__ : Any = pipe.prepare_latents(
a , dtype=a , device=a , generator=a , latents=a , scheduler=DummyScheduler() )
lowerCAmelCase__ : List[Any] = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : Optional[int] = pipe(
**a , decoder_latents=a , super_res_latents=a ).images
lowerCAmelCase__ : Optional[Any] = self.get_dummy_inputs(a , pil_image=a )
# Don't pass image, instead pass embedding
lowerCAmelCase__ : Union[str, Any] = pipeline_inputs.pop('image' )
lowerCAmelCase__ : Union[str, Any] = pipe.image_encoder(a ).image_embeds
lowerCAmelCase__ : List[Any] = pipe(
**a , decoder_latents=a , super_res_latents=a , image_embeddings=a , ).images
# make sure passing text embeddings manually is identical
assert np.abs(img_out_a - img_out_a ).max() < 1E-4
@skip_mps
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = torch_device == 'cpu'
# Check is relaxed because there is not a torch 2.0 sliced attention added kv processor
lowerCAmelCase__ : int = 1E-2
self._test_attention_slicing_forward_pass(
test_max_difference=a , expected_max_diff=a )
@skip_mps
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = torch_device == 'cpu'
lowerCAmelCase__ : Any = True
lowerCAmelCase__ : Optional[Any] = [
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
self._test_inference_batch_single_identical(
test_max_difference=a , relax_max_difference=a , additional_params_copy_to_batched_inputs=a , )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = [
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
if torch_device == "mps":
# TODO: MPS errors with larger batch sizes
lowerCAmelCase__ : List[str] = [2, 3]
self._test_inference_batch_consistent(
batch_sizes=a , additional_params_copy_to_batched_inputs=a , )
else:
self._test_inference_batch_consistent(
additional_params_copy_to_batched_inputs=a )
@skip_mps
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
return super().test_save_load_local()
@skip_mps
def _lowerCamelCase ( self : str ):
'''simple docstring'''
return super().test_save_load_optional_components()
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unclip/cat.png' )
lowerCAmelCase__ : List[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/unclip/karlo_v1_alpha_cat_variation_fp16.npy' )
lowerCAmelCase__ : Tuple = UnCLIPImageVariationPipeline.from_pretrained(
'kakaobrain/karlo-v1-alpha-image-variations' , torch_dtype=torch.floataa )
lowerCAmelCase__ : Union[str, Any] = pipeline.to(a )
pipeline.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Dict = torch.Generator(device='cpu' ).manual_seed(0 )
lowerCAmelCase__ : List[str] = pipeline(
a , generator=a , output_type='np' , )
lowerCAmelCase__ : Union[str, Any] = output.images[0]
assert image.shape == (256, 256, 3)
assert_mean_pixel_difference(a , a , 15 )
| 307 | 1 |
import colorsys
from PIL import Image # type: ignore
def lowerCAmelCase_ ( __UpperCAmelCase: float , __UpperCAmelCase: float , __UpperCAmelCase: int ) -> float:
UpperCamelCase__ : str = x
UpperCamelCase__ : Optional[Any] = y
for step in range(__UpperCAmelCase ): # noqa: B007
UpperCamelCase__ : Union[str, Any] = a * a - b * b + x
UpperCamelCase__ : Optional[int] = 2 * a * b + y
UpperCamelCase__ : Optional[int] = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def lowerCAmelCase_ ( __UpperCAmelCase: float ) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def lowerCAmelCase_ ( __UpperCAmelCase: float ) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(__UpperCAmelCase , 1 , 1 ) )
def lowerCAmelCase_ ( __UpperCAmelCase: int = 800 , __UpperCAmelCase: int = 600 , __UpperCAmelCase: float = -0.6 , __UpperCAmelCase: float = 0 , __UpperCAmelCase: float = 3.2 , __UpperCAmelCase: int = 50 , __UpperCAmelCase: bool = True , ) -> Image.Image:
UpperCamelCase__ : Optional[int] = Image.new('''RGB''' , (image_width, image_height) )
UpperCamelCase__ : Optional[int] = img.load()
# loop through the image-coordinates
for image_x in range(__UpperCAmelCase ):
for image_y in range(__UpperCAmelCase ):
# determine the figure-coordinates based on the image-coordinates
UpperCamelCase__ : Any = figure_width / image_width * image_height
UpperCamelCase__ : int = figure_center_x + (image_x / image_width - 0.5) * figure_width
UpperCamelCase__ : List[Any] = figure_center_y + (image_y / image_height - 0.5) * figure_height
UpperCamelCase__ : Union[str, Any] = get_distance(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
UpperCamelCase__ : Union[str, Any] = get_color_coded_rgb(__UpperCAmelCase )
else:
UpperCamelCase__ : Dict = get_black_and_white_rgb(__UpperCAmelCase )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
UpperCAmelCase_ = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 201 |
def lowerCAmelCase_ ( __UpperCAmelCase: int = 100_0000 ) -> int:
UpperCamelCase__ : str = limit + 1
UpperCamelCase__ : List[str] = [0] * limit
for first_term in range(1 , __UpperCAmelCase ):
for n in range(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
UpperCamelCase__ : str = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
UpperCamelCase__ : Any = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 201 | 1 |
from itertools import product
from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = k_size // 2
_lowerCAmelCase : int = mgrid[0 - center : k_size - center, 0 - center : k_size - center]
_lowerCAmelCase : List[Any] = 1 / (2 * pi * sigma) * exp(-(square(__lowerCAmelCase ) + square(__lowerCAmelCase )) / (2 * square(__lowerCAmelCase )) )
return g
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = image.shape[0], image.shape[1]
# dst image height and width
_lowerCAmelCase : str = height - k_size + 1
_lowerCAmelCase : int = width - k_size + 1
# im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
_lowerCAmelCase : str = zeros((dst_height * dst_width, k_size * k_size) )
_lowerCAmelCase : str = 0
for i, j in product(range(__lowerCAmelCase ) , range(__lowerCAmelCase ) ):
_lowerCAmelCase : Tuple = ravel(image[i : i + k_size, j : j + k_size] )
_lowerCAmelCase : Optional[Any] = window
row += 1
# turn the kernel into shape(k*k, 1)
_lowerCAmelCase : Tuple = gen_gaussian_kernel(__lowerCAmelCase , __lowerCAmelCase )
_lowerCAmelCase : Tuple = ravel(__lowerCAmelCase )
# reshape and get the dst image
_lowerCAmelCase : Tuple = dot(__lowerCAmelCase , __lowerCAmelCase ).reshape(__lowerCAmelCase , __lowerCAmelCase ).astype(__lowerCAmelCase )
return dst
if __name__ == "__main__":
# read original image
_snake_case = imread(R"../image_data/lena.jpg")
# turn image in gray scale value
_snake_case = cvtColor(img, COLOR_BGR2GRAY)
# get values with two different mask size
_snake_case = gaussian_filter(gray, 3, sigma=1)
_snake_case = gaussian_filter(gray, 5, sigma=0.8)
# show result images
imshow("gaussian filter with 3x3 mask", gaussianaxa)
imshow("gaussian filter with 5x5 mask", gaussianaxa)
waitKey()
| 350 |
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
_snake_case = logging.get_logger(__name__)
class UpperCAmelCase_ ( a):
def __init__( self, *__a, **__a):
'''simple docstring'''
warnings.warn(
"The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use VideoMAEImageProcessor instead.", __a, )
super().__init__(*__a, **__a)
| 300 | 0 |
import numpy as np
def lowerCAmelCase__( lowercase : np.ndarray , lowercase : np.ndarray , lowercase : float = 1E-12 , lowercase : int = 100 , ) -> tuple[float, np.ndarray]:
assert np.shape(lowercase )[0] == np.shape(lowercase )[1]
# Ensure proper dimensionality.
assert np.shape(lowercase )[0] == np.shape(lowercase )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(lowercase ) == np.iscomplexobj(lowercase )
__snake_case : str = np.iscomplexobj(lowercase )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(lowercase , input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
__snake_case : Tuple = False
__snake_case : List[Any] = 0
__snake_case : Union[str, Any] = 0
__snake_case : Any = 1E12
while not convergence:
# Multiple matrix by the vector.
__snake_case : Dict = np.dot(lowercase , lowercase )
# Normalize the resulting output vector.
__snake_case : List[Any] = w / np.linalg.norm(lowercase )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
__snake_case : Optional[int] = vector.conj().T if is_complex else vector.T
__snake_case : Union[str, Any] = np.dot(lowercase , np.dot(lowercase , lowercase ) )
# Check convergence.
__snake_case : int = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
__snake_case : List[Any] = True
__snake_case : str = lambda_
if is_complex:
__snake_case : Any = np.real(lambda_ )
return lambda_, vector
def lowerCAmelCase__( ) -> None:
__snake_case : List[Any] = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
__snake_case : Dict = np.array([41, 4, 20] )
__snake_case : Tuple = real_input_matrix.astype(np.complexaaa )
__snake_case : str = np.triu(1j * complex_input_matrix , 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
__snake_case : Optional[int] = np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
__snake_case : Optional[int] = real_input_matrix
__snake_case : List[str] = real_vector
elif problem_type == "complex":
__snake_case : Any = complex_input_matrix
__snake_case : int = complex_vector
# Our implementation.
__snake_case , __snake_case : Any = power_iteration(lowercase , lowercase )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
__snake_case , __snake_case : Dict = np.linalg.eigh(lowercase )
# Last eigenvalue is the maximum one.
__snake_case : List[str] = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
__snake_case : str = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1E-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(lowercase ) - np.abs(lowercase ) ) <= 1E-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 326 |
from __future__ import annotations
def lowerCAmelCase__( lowercase : str , lowercase : list[str] | None = None ) -> list[list[str]]:
__snake_case : List[str] = word_bank or []
# create a table
__snake_case : int = len(lowercase ) + 1
__snake_case : list[list[list[str]]] = []
for _ in range(lowercase ):
table.append([] )
# seed value
__snake_case : Optional[int] = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(lowercase ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(lowercase )] == word:
__snake_case : list[list[str]] = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(lowercase )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(lowercase )]:
combination.reverse()
return table[len(lowercase )]
if __name__ == "__main__":
print(all_construct('''jwajalapa''', ['''jwa''', '''j''', '''w''', '''a''', '''la''', '''lapa''']))
print(all_construct('''rajamati''', ['''s''', '''raj''', '''amat''', '''raja''', '''ma''', '''i''', '''t''']))
print(
all_construct(
'''hexagonosaurus''',
['''h''', '''ex''', '''hex''', '''ag''', '''ago''', '''ru''', '''auru''', '''rus''', '''go''', '''no''', '''o''', '''s'''],
)
)
| 326 | 1 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> str:
_snake_case = len(lowerCAmelCase__ )
_snake_case = len(lowerCAmelCase__ )
_snake_case = (
first_str_length if first_str_length > second_str_length else second_str_length
)
_snake_case = []
for char_count in range(lowerCAmelCase__ ):
if char_count < first_str_length:
output_list.append(first_str[char_count] )
if char_count < second_str_length:
output_list.append(second_str[char_count] )
return "".join(lowerCAmelCase__ )
if __name__ == "__main__":
print(alternative_string_arrange("AB", "XYZ"), end=" ")
| 360 |
'''simple docstring'''
import random
from .binary_exp_mod import bin_exp_mod
def SCREAMING_SNAKE_CASE__ ( __A , __A=1_000 ) -> str:
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
_snake_case = n - 1
_snake_case = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
_snake_case = 0
while count < prec:
_snake_case = random.randint(2 , n - 1 )
_snake_case = bin_exp_mod(__A , __A , __A )
if b != 1:
_snake_case = True
for _ in range(__A ):
if b == n - 1:
_snake_case = False
break
_snake_case = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
lowercase : Optional[int] = abs(int(input("Enter bound : ").strip()))
print("Here's the list of primes:")
print(", ".join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 160 | 0 |
from __future__ import annotations
import numpy as np
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> List[Any]:
return np.maximum(0 , _UpperCAmelCase )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 50 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase=None ) -> Tuple:
if subparsers is not None:
lowerCamelCase__ : Any = subparsers.add_parser('test' )
else:
lowerCamelCase__ : int = argparse.ArgumentParser('Accelerate test command' )
parser.add_argument(
'--config_file' , default=_UpperCAmelCase , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , )
if subparsers is not None:
parser.set_defaults(func=_UpperCAmelCase )
return parser
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> Union[str, Any]:
lowerCamelCase__ : Tuple = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ['test_utils', 'scripts', 'test_script.py'] )
if args.config_file is None:
lowerCamelCase__ : List[str] = script_name
else:
lowerCamelCase__ : List[Any] = F"""--config_file={args.config_file} {script_name}"""
lowerCamelCase__ : str = ['accelerate-launch'] + test_args.split()
lowerCamelCase__ : Dict = execute_subprocess_async(_UpperCAmelCase , env=os.environ.copy() )
if result.returncode == 0:
print('Test is a success! You are ready for your distributed training!' )
def SCREAMING_SNAKE_CASE ( ) -> Any:
lowerCamelCase__ : Any = test_command_parser()
lowerCamelCase__ : List[Any] = parser.parse_args()
test_command(_UpperCAmelCase )
if __name__ == "__main__":
main()
| 50 | 1 |
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
SCREAMING_SNAKE_CASE :List[str] = (3, 9, -11, 0, 7, 5, 1, -1)
SCREAMING_SNAKE_CASE :Union[str, Any] = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class UpperCAmelCase :
'''simple docstring'''
snake_case_ = 42
snake_case_ = 42
class UpperCAmelCase :
'''simple docstring'''
def __init__( self : int ,A : Iterable[int] ):
__A = None
for i in sorted(A ,reverse=A ):
__A = Node(A ,self.head )
def __iter__( self : Optional[int] ):
__A = self.head
while node:
yield node.data
__A = node.next_node
def __len__( self : List[str] ):
return sum(1 for _ in self )
def __str__( self : List[str] ):
return " -> ".join([str(A ) for node in self] )
def UpperCAmelCase ( a_ , a_ ) -> SortedLinkedList:
"""simple docstring"""
return SortedLinkedList(list(a_ ) + list(a_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE :List[Any] = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 124 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Optional[int] ,A : List[str] ,A : List[Any]=7 ,A : Any=3 ,A : int=30 ,A : List[Any]=4_00 ,A : str=True ,A : int=None ,A : List[str]=0.9 ,A : Dict=None ,A : int=True ,A : Any=[0.5, 0.5, 0.5] ,A : Optional[int]=[0.5, 0.5, 0.5] ,):
__A = size if size is not None else {"shortest_edge": 30}
__A = crop_size if crop_size is not None else {"height": 30, "width": 30}
__A = parent
__A = batch_size
__A = num_channels
__A = min_resolution
__A = max_resolution
__A = do_resize_and_center_crop
__A = size
__A = crop_pct
__A = crop_size
__A = do_normalize
__A = image_mean
__A = image_std
def UpperCamelCase_ ( self : int ):
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case_ = PoolFormerImageProcessor if is_vision_available() else None
def UpperCamelCase_ ( self : Optional[Any] ):
__A = PoolFormerImageProcessingTester(self )
@property
def UpperCamelCase_ ( self : List[Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self : Tuple ):
__A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A ,"do_resize_and_center_crop" ) )
self.assertTrue(hasattr(A ,"size" ) )
self.assertTrue(hasattr(A ,"crop_pct" ) )
self.assertTrue(hasattr(A ,"do_normalize" ) )
self.assertTrue(hasattr(A ,"image_mean" ) )
self.assertTrue(hasattr(A ,"image_std" ) )
def UpperCamelCase_ ( self : Union[str, Any] ):
__A = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{"shortest_edge": 30} )
self.assertEqual(image_processor.crop_size ,{"height": 30, "width": 30} )
__A = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 ,crop_size=84 )
self.assertEqual(image_processor.size ,{"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size ,{"height": 84, "width": 84} )
def UpperCamelCase_ ( self : List[str] ):
pass
def UpperCamelCase_ ( self : Optional[int] ):
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__A = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A )
for image in image_inputs:
self.assertIsInstance(A ,Image.Image )
# Test not batched input
__A = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
# Test batched
__A = image_processing(A ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
def UpperCamelCase_ ( self : List[Any] ):
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__A = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A ,numpify=A )
for image in image_inputs:
self.assertIsInstance(A ,np.ndarray )
# Test not batched input
__A = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
# Test batched
__A = image_processing(A ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
def UpperCamelCase_ ( self : List[Any] ):
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__A = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A ,torchify=A )
for image in image_inputs:
self.assertIsInstance(A ,torch.Tensor )
# Test not batched input
__A = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
# Test batched
__A = image_processing(A ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) ,)
| 124 | 1 |
'''simple docstring'''
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
snake_case_ : Dict = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def A__ ( UpperCAmelCase_ ):
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
return max(metric_fn(UpperCAmelCase_ , UpperCAmelCase_ ) for gt in ground_truths )
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
_UpperCamelCase : int = [line.strip() for line in open(UpperCAmelCase_ , 'r' ).readlines()]
_UpperCamelCase : Union[str, Any] = []
if args.gold_data_mode == "qa":
_UpperCamelCase : List[str] = pd.read_csv(UpperCAmelCase_ , sep='\t' , header=UpperCAmelCase_ )
for answer_list in data[1]:
_UpperCamelCase : Any = ast.literal_eval(UpperCAmelCase_ )
answers.append(UpperCAmelCase_ )
else:
_UpperCamelCase : Optional[int] = [line.strip() for line in open(UpperCAmelCase_ , 'r' ).readlines()]
_UpperCamelCase : Union[str, Any] = [[reference] for reference in references]
_UpperCamelCase : List[str] = 0
for prediction, ground_truths in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
total += 1
em += metric_max_over_ground_truths(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
fa += metric_max_over_ground_truths(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCamelCase : Dict = 100.0 * em / total
_UpperCamelCase : int = 100.0 * fa / total
logger.info(f'F1: {fa:.2f}' )
logger.info(f'EM: {em:.2f}' )
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
_UpperCamelCase : Optional[int] = args.k
_UpperCamelCase : Dict = [line.strip() for line in open(UpperCAmelCase_ , 'r' ).readlines()]
_UpperCamelCase : str = [line.strip() for line in open(UpperCAmelCase_ , 'r' ).readlines()]
_UpperCamelCase : Union[str, Any] = 0
for hypo, reference in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
_UpperCamelCase : List[Any] = set(hypo.split('\t' )[:k] )
_UpperCamelCase : List[Any] = set(reference.split('\t' ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
_UpperCamelCase : Any = 100.0 * em / total
logger.info(f'Precision@{k}: {em: .2f}' )
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
def strip_title(UpperCAmelCase_ ):
if title.startswith('"' ):
_UpperCamelCase : List[str] = title[1:]
if title.endswith('"' ):
_UpperCamelCase : Any = title[:-1]
return title
_UpperCamelCase : Any = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
UpperCAmelCase_ , return_tensors='pt' , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , )['input_ids'].to(args.device )
_UpperCamelCase : List[str] = rag_model.rag.question_encoder(UpperCAmelCase_ )
_UpperCamelCase : Optional[Any] = question_enc_outputs[0]
_UpperCamelCase : str = rag_model.retriever(
UpperCAmelCase_ , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors='pt' , )
_UpperCamelCase : Tuple = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
_UpperCamelCase : Optional[Any] = []
for docs in all_docs:
_UpperCamelCase : Any = [strip_title(UpperCAmelCase_ ) for title in docs['title']]
provenance_strings.append('\t'.join(UpperCAmelCase_ ) )
return provenance_strings
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
with torch.no_grad():
_UpperCamelCase : int = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
UpperCAmelCase_ , return_tensors='pt' , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ )
_UpperCamelCase : Union[str, Any] = inputs_dict.input_ids.to(args.device )
_UpperCamelCase : Optional[int] = inputs_dict.attention_mask.to(args.device )
_UpperCamelCase : str = rag_model.generate( # rag_model overwrites generate
UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=UpperCAmelCase_ , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
_UpperCamelCase : List[str] = rag_model.retriever.generator_tokenizer.batch_decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ )
if args.print_predictions:
for q, a in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
logger.info('Q: {} - A: {}'.format(UpperCAmelCase_ , UpperCAmelCase_ ) )
return answers
def A__ ( ):
_UpperCamelCase : str = argparse.ArgumentParser()
parser.add_argument(
'--model_type' , choices=['rag_sequence', 'rag_token', 'bart'] , type=UpperCAmelCase_ , help=(
'RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the'
' model_name_or_path'
) , )
parser.add_argument(
'--index_name' , default=UpperCAmelCase_ , choices=['exact', 'compressed', 'legacy'] , type=UpperCAmelCase_ , help='RAG model retriever type' , )
parser.add_argument(
'--index_path' , default=UpperCAmelCase_ , type=UpperCAmelCase_ , help='Path to the retrieval index' , )
parser.add_argument('--n_docs' , default=5 , type=UpperCAmelCase_ , help='Number of retrieved docs' )
parser.add_argument(
'--model_name_or_path' , default=UpperCAmelCase_ , type=UpperCAmelCase_ , required=UpperCAmelCase_ , help='Path to pretrained checkpoints or model identifier from huggingface.co/models' , )
parser.add_argument(
'--eval_mode' , choices=['e2e', 'retrieval'] , default='e2e' , type=UpperCAmelCase_ , help=(
'Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates'
' precision@k.'
) , )
parser.add_argument('--k' , default=1 , type=UpperCAmelCase_ , help='k for the precision@k calculation' )
parser.add_argument(
'--evaluation_set' , default=UpperCAmelCase_ , type=UpperCAmelCase_ , required=UpperCAmelCase_ , help='Path to a file containing evaluation samples' , )
parser.add_argument(
'--gold_data_path' , default=UpperCAmelCase_ , type=UpperCAmelCase_ , required=UpperCAmelCase_ , help='Path to a tab-separated file with gold samples' , )
parser.add_argument(
'--gold_data_mode' , default='qa' , type=UpperCAmelCase_ , choices=['qa', 'ans'] , help=(
'Format of the gold data file'
'qa - a single line in the following format: question [tab] answer_list'
'ans - a single line of the gold file contains the expected answer string'
) , )
parser.add_argument(
'--predictions_path' , type=UpperCAmelCase_ , default='predictions.txt' , help='Name of the predictions file, to be stored in the checkpoints directory' , )
parser.add_argument(
'--eval_all_checkpoints' , action='store_true' , help='Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number' , )
parser.add_argument(
'--eval_batch_size' , default=8 , type=UpperCAmelCase_ , help='Batch size per GPU/CPU for evaluation.' , )
parser.add_argument(
'--recalculate' , help='Recalculate predictions even if the prediction file exists' , action='store_true' , )
parser.add_argument(
'--num_beams' , default=4 , type=UpperCAmelCase_ , help='Number of beams to be used when generating answers' , )
parser.add_argument('--min_length' , default=1 , type=UpperCAmelCase_ , help='Min length of the generated answers' )
parser.add_argument('--max_length' , default=5_0 , type=UpperCAmelCase_ , help='Max length of the generated answers' )
parser.add_argument(
'--print_predictions' , action='store_true' , help='If True, prints predictions while evaluating.' , )
parser.add_argument(
'--print_docs' , action='store_true' , help='If True, prints docs retried while generating.' , )
_UpperCamelCase : Optional[Any] = parser.parse_args()
_UpperCamelCase : Tuple = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
return args
def A__ ( UpperCAmelCase_ ):
_UpperCamelCase : Optional[int] = {}
if args.model_type is None:
_UpperCamelCase : Tuple = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith('rag' ):
_UpperCamelCase : str = RagTokenForGeneration if args.model_type == 'rag_token' else RagSequenceForGeneration
_UpperCamelCase : Dict = args.n_docs
if args.index_name is not None:
_UpperCamelCase : int = args.index_name
if args.index_path is not None:
_UpperCamelCase : int = args.index_path
else:
_UpperCamelCase : Any = BartForConditionalGeneration
_UpperCamelCase : int = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info('Evaluate the following checkpoints: %s' , UpperCAmelCase_ )
_UpperCamelCase : Tuple = get_scores if args.eval_mode == 'e2e' else get_precision_at_k
_UpperCamelCase : Dict = evaluate_batch_eae if args.eval_mode == 'e2e' else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info('Calculating metrics based on an existing predictions file: {}'.format(args.predictions_path ) )
score_fn(UpperCAmelCase_ , args.predictions_path , args.gold_data_path )
continue
logger.info('***** Running evaluation for {} *****'.format(UpperCAmelCase_ ) )
logger.info(' Batch size = %d' , args.eval_batch_size )
logger.info(' Predictions will be stored under {}'.format(args.predictions_path ) )
if args.model_type.startswith('rag' ):
_UpperCamelCase : Dict = RagRetriever.from_pretrained(UpperCAmelCase_ , **UpperCAmelCase_ )
_UpperCamelCase : Optional[Any] = model_class.from_pretrained(UpperCAmelCase_ , retriever=UpperCAmelCase_ , **UpperCAmelCase_ )
model.retriever.init_retrieval()
else:
_UpperCamelCase : int = model_class.from_pretrained(UpperCAmelCase_ , **UpperCAmelCase_ )
model.to(args.device )
with open(args.evaluation_set , 'r' ) as eval_file, open(args.predictions_path , 'w' ) as preds_file:
_UpperCamelCase : str = []
for line in tqdm(UpperCAmelCase_ ):
questions.append(line.strip() )
if len(UpperCAmelCase_ ) == args.eval_batch_size:
_UpperCamelCase : List[str] = evaluate_batch_fn(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
preds_file.write('\n'.join(UpperCAmelCase_ ) + '\n' )
preds_file.flush()
_UpperCamelCase : Optional[Any] = []
if len(UpperCAmelCase_ ) > 0:
_UpperCamelCase : Optional[Any] = evaluate_batch_fn(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
preds_file.write('\n'.join(UpperCAmelCase_ ) )
preds_file.flush()
score_fn(UpperCAmelCase_ , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
snake_case_ : int = get_args()
main(args)
| 83 |
import logging
import os
import quant_trainer
import torch
from torch.utils.data import DataLoader
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput
_SCREAMING_SNAKE_CASE : List[str] = logging.getLogger(__name__)
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class A__ ( snake_case__ ):
"""simple docstring"""
def __init__( self , *__snake_case , __snake_case=None , __snake_case=None , __snake_case=None , **__snake_case ):
super().__init__(*__snake_case , **__snake_case )
snake_case = eval_examples
snake_case = post_process_function
snake_case = quant_trainer_args
snake_case = 1_2_8 # default number of calibration samples
def a_ ( self , __snake_case=None ):
if calib_dataset is None and self.calib_dataset is None:
raise ValueError('''Trainer: calibration requires an calib_dataset.''' )
snake_case = calib_dataset if calib_dataset is not None else self.calib_dataset
snake_case = self._remove_unused_columns(__snake_case , description='''Calibration''' )
return DataLoader(
__snake_case , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=__snake_case , )
def a_ ( self , __snake_case=None ):
snake_case = self.train_dataset if calib_dataset is None else calib_dataset
snake_case = self.get_calib_dataloader(__snake_case )
snake_case = self.model
quant_trainer.configure_model(__snake_case , self.quant_trainer_args , calib=__snake_case )
model.eval()
quant_trainer.enable_calibration(__snake_case )
logger.info('''***** Running calibration *****''' )
logger.info(F''' Num examples = {self.calib_num}''' )
logger.info(F''' Batch size = {calib_dataloader.batch_size}''' )
for step, inputs in enumerate(__snake_case ):
# Prediction step
snake_case , snake_case , snake_case = self.prediction_step(__snake_case , __snake_case , prediction_loss_only=__snake_case )
if (step + 1) * calib_dataloader.batch_size >= self.calib_num:
break
quant_trainer.finish_calibration(__snake_case , self.quant_trainer_args )
snake_case = model
def a_ ( self , __snake_case=None , __snake_case=None , __snake_case=None , __snake_case = "eval" ):
snake_case = self.eval_dataset if eval_dataset is None else eval_dataset
snake_case = self.get_eval_dataloader(__snake_case )
snake_case = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
snake_case = self.compute_metrics
snake_case = None
snake_case = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
snake_case = eval_loop(
__snake_case , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__snake_case , )
finally:
snake_case = compute_metrics
if self.post_process_function is not None and self.compute_metrics is not None:
snake_case = self.post_process_function(__snake_case , __snake_case , output.predictions )
snake_case = self.compute_metrics(__snake_case )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
snake_case = metrics.pop(__snake_case )
self.log(__snake_case )
else:
snake_case = {}
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
snake_case = self.callback_handler.on_evaluate(self.args , self.state , self.control , __snake_case )
return metrics
def a_ ( self , __snake_case , __snake_case , __snake_case=None , __snake_case = "test" ):
snake_case = self.get_test_dataloader(__snake_case )
# Temporarily disable metric computation, we will do it in the loop here.
snake_case = self.compute_metrics
snake_case = None
snake_case = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
snake_case = eval_loop(
__snake_case , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__snake_case , )
finally:
snake_case = compute_metrics
if self.post_process_function is None or self.compute_metrics is None:
return output
snake_case = self.post_process_function(__snake_case , __snake_case , output.predictions , '''predict''' )
snake_case = self.compute_metrics(__snake_case )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
snake_case = metrics.pop(__snake_case )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__snake_case )
def a_ ( self , __snake_case="./" ):
snake_case = self.eval_dataset
snake_case = self.get_eval_dataloader(__snake_case )
snake_case = next(iter(__snake_case ) )
# saving device - to make it consistent
snake_case = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
# convert to tuple
snake_case = tuple(v.to(__snake_case ) for k, v in batch.items() )
logger.info('''Converting model to be onnx compatible''' )
from pytorch_quantization.nn import TensorQuantizer
snake_case = True
snake_case = self.model.to(__snake_case )
model.eval()
model.float()
snake_case = model.module if hasattr(__snake_case , '''module''' ) else model
quant_trainer.configure_model(__snake_case , self.quant_trainer_args )
snake_case = os.path.join(__snake_case , '''model.onnx''' )
logger.info(F'''exporting model to {output_model_file}''' )
snake_case = {0: '''batch_size''', 1: '''seq_len'''}
torch.onnx.export(
__snake_case , __snake_case , __snake_case , export_params=__snake_case , opset_version=1_3 , do_constant_folding=__snake_case , input_names=['''input_ids''', '''attention_mask''', '''token_type_ids'''] , output_names=['''output_start_logits''', '''output_end_logits'''] , dynamic_axes={
'''input_ids''': axes,
'''attention_mask''': axes,
'''token_type_ids''': axes,
'''output_start_logits''': axes,
'''output_end_logits''': axes,
} , verbose=__snake_case , )
logger.info('''onnx export finished''' )
| 127 | 0 |
def _a ( lowerCamelCase: int = 1_00_00_00 ) -> int:
'''simple docstring'''
__A = [i - 1 for i in range(limit + 1 )]
for i in range(2 , limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i , limit + 1 , lowerCamelCase ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 250 |
import contextlib
import importlib
import io
import unittest
import transformers
# Try to import everything from transformers to ensure every object can be loaded.
from transformers import * # noqa F406
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch
from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available
if is_torch_available():
from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification
if is_tf_available():
from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification
if is_flax_available():
from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification
snake_case__ : Dict = DUMMY_UNKNOWN_IDENTIFIER
# An actual model hosted on huggingface.co
snake_case__ : Any = 'main'
# Default branch name
snake_case__ : Union[str, Any] = 'f2c752cfc5c0ab6f4bdec59acea69eefbee381c2'
# One particular commit (not the top of `main`)
snake_case__ : Optional[int] = 'aaaaaaa'
# This commit does not exist, so we should 404.
snake_case__ : int = 'd9e9f15bc825e4b2c9249e9578f884bbcb5e3684'
# Sha-1 of config.json on the top of `main`, for checking purposes
snake_case__ : Any = '4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3'
@contextlib.contextmanager
def _a ( ) -> Tuple:
'''simple docstring'''
print('''Welcome!''' )
yield
print('''Bye!''' )
@contextlib.contextmanager
def _a ( ) -> Optional[int]:
'''simple docstring'''
print('''Bonjour!''' )
yield
print('''Au revoir!''' )
class A_ ( unittest.TestCase ):
def _lowerCAmelCase (self :Any )-> Optional[Any]:
# If the spec is missing, importlib would not be able to import the module dynamically.
assert transformers.__spec__ is not None
assert importlib.util.find_spec('''transformers''' ) is not None
class A_ ( unittest.TestCase ):
@unittest.mock.patch('''sys.stdout''' , new_callable=io.StringIO )
def _lowerCAmelCase (self :str , _UpperCamelCase :str )-> Optional[int]:
with ContextManagers([] ):
print('''Transformers are awesome!''' )
# The print statement adds a new line at the end of the output
self.assertEqual(mock_stdout.getvalue() , '''Transformers are awesome!\n''' )
@unittest.mock.patch('''sys.stdout''' , new_callable=io.StringIO )
def _lowerCAmelCase (self :Optional[int] , _UpperCamelCase :List[Any] )-> Union[str, Any]:
with ContextManagers([context_en()] ):
print('''Transformers are awesome!''' )
# The output should be wrapped with an English welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , '''Welcome!\nTransformers are awesome!\nBye!\n''' )
@unittest.mock.patch('''sys.stdout''' , new_callable=io.StringIO )
def _lowerCAmelCase (self :int , _UpperCamelCase :Union[str, Any] )-> int:
with ContextManagers([context_fr(), context_en()] ):
print('''Transformers are awesome!''' )
# The output should be wrapped with an English and French welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , '''Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n''' )
@require_torch
def _lowerCAmelCase (self :int )-> str:
self.assertEqual(find_labels(_UpperCamelCase ) , ['''labels'''] )
self.assertEqual(find_labels(_UpperCamelCase ) , ['''labels''', '''next_sentence_label'''] )
self.assertEqual(find_labels(_UpperCamelCase ) , ['''start_positions''', '''end_positions'''] )
class A_ ( _lowerCamelCase ):
pass
self.assertEqual(find_labels(_UpperCamelCase ) , ['''labels'''] )
@require_tf
def _lowerCAmelCase (self :Any )-> str:
self.assertEqual(find_labels(_UpperCamelCase ) , ['''labels'''] )
self.assertEqual(find_labels(_UpperCamelCase ) , ['''labels''', '''next_sentence_label'''] )
self.assertEqual(find_labels(_UpperCamelCase ) , ['''start_positions''', '''end_positions'''] )
class A_ ( _lowerCamelCase ):
pass
self.assertEqual(find_labels(_UpperCamelCase ) , ['''labels'''] )
@require_flax
def _lowerCAmelCase (self :Optional[int] )-> Dict:
# Flax models don't have labels
self.assertEqual(find_labels(_UpperCamelCase ) , [] )
self.assertEqual(find_labels(_UpperCamelCase ) , [] )
self.assertEqual(find_labels(_UpperCamelCase ) , [] )
class A_ ( _lowerCamelCase ):
pass
self.assertEqual(find_labels(_UpperCamelCase ) , [] )
| 250 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A_ = {
'''configuration_clap''': [
'''CLAP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ClapAudioConfig''',
'''ClapConfig''',
'''ClapTextConfig''',
],
'''processing_clap''': ['''ClapProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
'''CLAP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ClapModel''',
'''ClapPreTrainedModel''',
'''ClapTextModel''',
'''ClapTextModelWithProjection''',
'''ClapAudioModel''',
'''ClapAudioModelWithProjection''',
]
A_ = ['''ClapFeatureExtractor''']
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
A_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 64 |
"""simple docstring"""
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def UpperCAmelCase__ (snake_case__ : Optional[int] , snake_case__ : Any=7 ):
"""simple docstring"""
_snake_case : Any = None
if token is not None:
_snake_case : Any = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"Bearer {token}"}
# The id of a workflow (not of a workflow run)
_snake_case : List[str] = """636036"""
_snake_case : Union[str, Any] = F"https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs"
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += F"?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}"
_snake_case : str = requests.get(snake_case__ , headers=snake_case__ ).json()
return result["workflow_runs"]
def UpperCAmelCase__ (snake_case__ : Optional[Any] ):
"""simple docstring"""
_snake_case : str = get_daily_ci_runs(snake_case__ )
_snake_case : str = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
_snake_case : List[str] = workflow_run["""id"""]
break
return workflow_run_id
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : Union[str, Any] , snake_case__ : Optional[int] ):
"""simple docstring"""
_snake_case : Optional[Any] = get_last_daily_ci_runs(snake_case__ )
if workflow_run_id is not None:
_snake_case : Optional[Any] = get_artifacts_links(worflow_run_id=snake_case__ , token=snake_case__ )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
_snake_case : Optional[int] = artifacts_links[artifact_name]
download_artifact(
artifact_name=snake_case__ , artifact_url=snake_case__ , output_dir=snake_case__ , token=snake_case__ )
def UpperCAmelCase__ (snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : int ):
"""simple docstring"""
get_last_daily_ci_artifacts(snake_case__ , snake_case__ , snake_case__ )
_snake_case : int = {}
for artifact_name in artifact_names:
_snake_case : int = os.path.join(snake_case__ , F"{artifact_name}.zip" )
if os.path.isfile(snake_case__ ):
_snake_case : Tuple = {}
with zipfile.ZipFile(snake_case__ ) as z:
for filename in z.namelist():
if not os.path.isdir(snake_case__ ):
# read the file
with z.open(snake_case__ ) as f:
_snake_case : Any = f.read().decode("""UTF-8""" )
return results
| 64 | 1 |
"""simple docstring"""
from __future__ import annotations
_lowerCAmelCase :str = 1.6_021E-19 # units = C
def lowerCamelCase_ (UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : float , ):
if (conductivity, electron_conc, mobility).count(0 ) != 1:
raise ValueError('''You cannot supply more or less than 2 values''' )
elif conductivity < 0:
raise ValueError('''Conductivity cannot be negative''' )
elif electron_conc < 0:
raise ValueError('''Electron concentration cannot be negative''' )
elif mobility < 0:
raise ValueError('''mobility cannot be negative''' )
elif conductivity == 0:
return (
"conductivity",
mobility * electron_conc * ELECTRON_CHARGE,
)
elif electron_conc == 0:
return (
"electron_conc",
conductivity / (mobility * ELECTRON_CHARGE),
)
else:
return (
"mobility",
conductivity / (electron_conc * ELECTRON_CHARGE),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 68 |
"""simple docstring"""
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class _UpperCAmelCase ( a ,unittest.TestCase ):
'''simple docstring'''
a__ =TransfoXLTokenizer
a__ =False
a__ =False
def __lowerCAmelCase ( self ) -> List[str]:
super().setUp()
_UpperCAmelCase : Dict = [
'''<unk>''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''unwanted''',
'''wa''',
'''un''',
'''running''',
''',''',
'''low''',
'''l''',
]
_UpperCAmelCase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __lowerCAmelCase ( self , **A ) -> Dict:
_UpperCAmelCase : Union[str, Any] = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **A )
def __lowerCAmelCase ( self , A ) -> str:
_UpperCAmelCase : str = '''<unk> UNwanted , running'''
_UpperCAmelCase : Union[str, Any] = '''<unk> unwanted, running'''
return input_text, output_text
def __lowerCAmelCase ( self ) -> List[str]:
_UpperCAmelCase : Any = TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=A )
_UpperCAmelCase : Union[str, Any] = tokenizer.tokenize('''<unk> UNwanted , running''' )
self.assertListEqual(A , ['''<unk>''', '''unwanted''', ''',''', '''running'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , [0, 4, 8, 7] )
def __lowerCAmelCase ( self ) -> str:
_UpperCAmelCase : str = TransfoXLTokenizer(lower_case=A )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo ! how \n Are yoU ? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_UpperCAmelCase : Tuple = TransfoXLTokenizer(lower_case=A )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo ! how \n Are yoU ? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __lowerCAmelCase ( self ) -> int:
_UpperCAmelCase : Tuple = TransfoXLTokenizer(lower_case=A )
_UpperCAmelCase : Optional[Any] = '''Hello (bracket) and side-scrolled [and] Henry\'s $5,000 with 3.34 m. What\'s up!?'''
_UpperCAmelCase : Optional[Any] = [
'''Hello''',
'''(''',
'''bracket''',
''')''',
'''and''',
'''side''',
'''@-@''',
'''scrolled''',
'''[''',
'''and''',
''']''',
'''Henry''',
'''\'s''',
'''$''',
'''5''',
'''@,@''',
'''000''',
'''with''',
'''3''',
'''@.@''',
'''34''',
'''m''',
'''.''',
'''What''',
'''\'s''',
'''up''',
'''!''',
'''?''',
]
self.assertListEqual(tokenizer.tokenize(A ) , A )
self.assertEqual(tokenizer.convert_tokens_to_string(A ) , A )
def __lowerCAmelCase ( self ) -> Optional[int]:
_UpperCAmelCase : str = self.get_tokenizer()
_UpperCAmelCase : List[Any] = len(A )
tokenizer.add_tokens(['''new1''', '''new2'''] )
tokenizer.move_added_token('''new1''' , 1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(A ) , original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode('''new1''' ) , [1] )
self.assertEqual(tokenizer.decode([1] ) , '''new1''' )
| 68 | 1 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
A : List[Any] = logging.get_logger(__name__)
A : int = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
A : Dict = {
'vocab_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
A : Any = {
'vocab_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
A : Tuple = {
'vocab_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'
),
},
}
A : Optional[Any] = {
'facebook/dpr-ctx_encoder-single-nq-base': 5_1_2,
'facebook/dpr-ctx_encoder-multiset-base': 5_1_2,
}
A : Tuple = {
'facebook/dpr-question_encoder-single-nq-base': 5_1_2,
'facebook/dpr-question_encoder-multiset-base': 5_1_2,
}
A : List[Any] = {
'facebook/dpr-reader-single-nq-base': 5_1_2,
'facebook/dpr-reader-multiset-base': 5_1_2,
}
A : Optional[int] = {
'facebook/dpr-ctx_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-ctx_encoder-multiset-base': {'do_lower_case': True},
}
A : str = {
'facebook/dpr-question_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-question_encoder-multiset-base': {'do_lower_case': True},
}
A : Union[str, Any] = {
'facebook/dpr-reader-single-nq-base': {'do_lower_case': True},
'facebook/dpr-reader-multiset-base': {'do_lower_case': True},
}
class __A( a ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
snake_case_ = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
snake_case_ = DPRContextEncoderTokenizer
class __A( a ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
snake_case_ = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
snake_case_ = DPRQuestionEncoderTokenizer
A : Optional[Any] = collections.namedtuple(
'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text']
)
A : Optional[Any] = collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits'])
A : Tuple = R'\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Return:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n '
@add_start_docstrings(a )
class __A:
def __call__( self , _snake_case , _snake_case = None , _snake_case = None , _snake_case = False , _snake_case = False , _snake_case = None , _snake_case = None , _snake_case = None , **_snake_case , ) -> BatchEncoding:
'''simple docstring'''
if titles is None and texts is None:
return super().__call__(
_snake_case , padding=_snake_case , truncation=_snake_case , max_length=_snake_case , return_tensors=_snake_case , return_attention_mask=_snake_case , **_snake_case , )
elif titles is None or texts is None:
__a = titles if texts is None else texts
return super().__call__(
_snake_case , _snake_case , padding=_snake_case , truncation=_snake_case , max_length=_snake_case , return_tensors=_snake_case , return_attention_mask=_snake_case , **_snake_case , )
__a = titles if not isinstance(_snake_case , _snake_case ) else [titles]
__a = texts if not isinstance(_snake_case , _snake_case ) else [texts]
__a = len(_snake_case )
__a = questions if not isinstance(_snake_case , _snake_case ) else [questions] * n_passages
assert len(_snake_case ) == len(
_snake_case ), F"""There should be as many titles than texts but got {len(_snake_case )} titles and {len(_snake_case )} texts."""
__a = super().__call__(_snake_case , _snake_case , padding=_snake_case , truncation=_snake_case )['''input_ids''']
__a = super().__call__(_snake_case , add_special_tokens=_snake_case , padding=_snake_case , truncation=_snake_case )['''input_ids''']
__a = {
'''input_ids''': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(_snake_case , _snake_case )
]
}
if return_attention_mask is not False:
__a = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
__a = attention_mask
return self.pad(_snake_case , padding=_snake_case , max_length=_snake_case , return_tensors=_snake_case )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case = 16 , _snake_case = 64 , _snake_case = 4 , ) -> List[DPRSpanPrediction]:
'''simple docstring'''
__a = reader_input['''input_ids''']
__a , __a , __a = reader_output[:3]
__a = len(_snake_case )
__a = sorted(range(_snake_case ) , reverse=_snake_case , key=relevance_logits.__getitem__ )
__a = []
for doc_id in sorted_docs:
__a = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
__a = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
__a = sequence_ids.index(self.pad_token_id )
else:
__a = len(_snake_case )
__a = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=_snake_case , top_spans=_snake_case , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=_snake_case , start_index=_snake_case , end_index=_snake_case , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(_snake_case ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case , _snake_case , ) -> List[DPRSpanPrediction]:
'''simple docstring'''
__a = []
for start_index, start_score in enumerate(_snake_case ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
__a = sorted(_snake_case , key=lambda _snake_case : x[1] , reverse=_snake_case )
__a = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, F"""Wrong span indices: [{start_index}:{end_index}]"""
__a = end_index - start_index + 1
assert length <= max_answer_length, F"""Span is too long: {length} > {max_answer_length}"""
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(_snake_case ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(a )
class __A( a , a ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = READER_PRETRAINED_VOCAB_FILES_MAP
snake_case_ = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = READER_PRETRAINED_INIT_CONFIGURATION
snake_case_ = ['''input_ids''', '''attention_mask''']
snake_case_ = DPRReaderTokenizer
| 6 |
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
A : int = '0.12' # assumed parallelism: 8
@require_flax
@is_staging_test
class __A( unittest.TestCase ):
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls ) -> Union[str, Any]:
'''simple docstring'''
__a = TOKEN
HfFolder.save_token(_snake_case )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls ) -> Union[str, Any]:
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id='''test-model-flax''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-model-flax-org''' )
except HTTPError:
pass
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]:
'''simple docstring'''
__a = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
__a = FlaxBertModel(_snake_case )
model.push_to_hub('''test-model-flax''' , use_auth_token=self._token )
__a = FlaxBertModel.from_pretrained(F"""{USER}/test-model-flax""" )
__a = flatten_dict(unfreeze(model.params ) )
__a = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
__a = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_snake_case , 1E-3 , msg=F"""{key} not identical""" )
# Reset repo
delete_repo(token=self._token , repo_id='''test-model-flax''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(_snake_case , repo_id='''test-model-flax''' , push_to_hub=_snake_case , use_auth_token=self._token )
__a = FlaxBertModel.from_pretrained(F"""{USER}/test-model-flax""" )
__a = flatten_dict(unfreeze(model.params ) )
__a = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
__a = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_snake_case , 1E-3 , msg=F"""{key} not identical""" )
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[int]:
'''simple docstring'''
__a = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
__a = FlaxBertModel(_snake_case )
model.push_to_hub('''valid_org/test-model-flax-org''' , use_auth_token=self._token )
__a = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
__a = flatten_dict(unfreeze(model.params ) )
__a = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
__a = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_snake_case , 1E-3 , msg=F"""{key} not identical""" )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-model-flax-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
_snake_case , repo_id='''valid_org/test-model-flax-org''' , push_to_hub=_snake_case , use_auth_token=self._token )
__a = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
__a = flatten_dict(unfreeze(model.params ) )
__a = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
__a = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_snake_case , 1E-3 , msg=F"""{key} not identical""" )
def __lowerCAmelCase ( a__ , a__ ) -> str:
__a = True
__a = flatten_dict(modela.params )
__a = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1e-4:
__a = False
return models_are_equal
@require_flax
class __A( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__a = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
__a = FlaxBertModel(_snake_case )
__a = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(_snake_case , _snake_case ) )
with self.assertRaises(_snake_case ):
__a = FlaxBertModel.from_pretrained(_snake_case )
__a = FlaxBertModel.from_pretrained(_snake_case , subfolder=_snake_case )
self.assertTrue(check_models_equal(_snake_case , _snake_case ) )
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]:
'''simple docstring'''
__a = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
__a = FlaxBertModel(_snake_case )
__a = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(_snake_case , _snake_case ) , max_shard_size='''10KB''' )
with self.assertRaises(_snake_case ):
__a = FlaxBertModel.from_pretrained(_snake_case )
__a = FlaxBertModel.from_pretrained(_snake_case , subfolder=_snake_case )
self.assertTrue(check_models_equal(_snake_case , _snake_case ) )
def SCREAMING_SNAKE_CASE_ ( self ) -> int:
'''simple docstring'''
__a = '''bert'''
__a = '''hf-internal-testing/tiny-random-bert-subfolder'''
with self.assertRaises(_snake_case ):
__a = FlaxBertModel.from_pretrained(_snake_case )
__a = FlaxBertModel.from_pretrained(_snake_case , subfolder=_snake_case )
self.assertIsNotNone(_snake_case )
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[int]:
'''simple docstring'''
__a = '''bert'''
__a = '''hf-internal-testing/tiny-random-bert-sharded-subfolder'''
with self.assertRaises(_snake_case ):
__a = FlaxBertModel.from_pretrained(_snake_case )
__a = FlaxBertModel.from_pretrained(_snake_case , subfolder=_snake_case )
self.assertIsNotNone(_snake_case )
| 6 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__SCREAMING_SNAKE_CASE : str = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : str = ['''NllbTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : str = ['''NllbTokenizerFast''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
__SCREAMING_SNAKE_CASE : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 73 |
"""simple docstring"""
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__=3 , lowerCamelCase__=3_2 , lowerCamelCase__=3 , lowerCamelCase__=1_0 , lowerCamelCase__=[1_0, 2_0, 3_0, 4_0] , lowerCamelCase__=[1, 1, 2, 1] , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__="relu" , lowerCamelCase__=3 , lowerCamelCase__=None , ):
_lowerCamelCase = parent
_lowerCamelCase = batch_size
_lowerCamelCase = image_size
_lowerCamelCase = num_channels
_lowerCamelCase = embeddings_size
_lowerCamelCase = hidden_sizes
_lowerCamelCase = depths
_lowerCamelCase = is_training
_lowerCamelCase = use_labels
_lowerCamelCase = hidden_act
_lowerCamelCase = num_labels
_lowerCamelCase = scope
_lowerCamelCase = len(lowerCamelCase__ )
def snake_case__ ( self ):
_lowerCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCamelCase = self.get_config()
return config, pixel_values
def snake_case__ ( self ):
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = FlaxRegNetModel(config=lowerCamelCase__ )
_lowerCamelCase = model(lowerCamelCase__ )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = self.num_labels
_lowerCamelCase = FlaxRegNetForImageClassification(config=lowerCamelCase__ )
_lowerCamelCase = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case__ ( self ):
_lowerCamelCase = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase = config_and_inputs
_lowerCamelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_flax
class lowerCamelCase_( A__, unittest.TestCase ):
'''simple docstring'''
lowercase__ : Union[str, Any] = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
lowercase__ : List[Any] = False
lowercase__ : Tuple = False
lowercase__ : Union[str, Any] = False
def snake_case__ ( self ):
_lowerCamelCase = FlaxRegNetModelTester(self )
_lowerCamelCase = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ )
def snake_case__ ( self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def snake_case__ ( self ):
return
def snake_case__ ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def snake_case__ ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase__ )
@unittest.skip(reason='''RegNet does not use inputs_embeds''' )
def snake_case__ ( self ):
pass
@unittest.skip(reason='''RegNet does not support input and output embeddings''' )
def snake_case__ ( self ):
pass
def snake_case__ ( self ):
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase = model_class(lowerCamelCase__ )
_lowerCamelCase = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase = [*signature.parameters.keys()]
_lowerCamelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
def snake_case__ ( self ):
def check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = model_class(lowerCamelCase__ )
_lowerCamelCase = model(**self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) )
_lowerCamelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_lowerCamelCase = self.model_tester.num_stages
self.assertEqual(len(lowerCamelCase__ ) , expected_num_stages + 1 )
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase = True
check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCamelCase = True
check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def snake_case__ ( self ):
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_lowerCamelCase = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ )
_lowerCamelCase = model_class(lowerCamelCase__ )
@jax.jit
def model_jitted(lowerCamelCase__ , **lowerCamelCase__ ):
return model(pixel_values=lowerCamelCase__ , **lowerCamelCase__ )
with self.subTest('''JIT Enabled''' ):
_lowerCamelCase = model_jitted(**lowerCamelCase__ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
_lowerCamelCase = model_jitted(**lowerCamelCase__ ).to_tuple()
self.assertEqual(len(lowerCamelCase__ ) , len(lowerCamelCase__ ) )
for jitted_output, output in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assertEqual(jitted_output.shape , output.shape )
def lowerCAmelCase_( ) -> Optional[Any]:
_lowerCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_flax
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
@cached_property
def snake_case__ ( self ):
return AutoImageProcessor.from_pretrained('''facebook/regnet-y-040''' ) if is_vision_available() else None
@slow
def snake_case__ ( self ):
_lowerCamelCase = FlaxRegNetForImageClassification.from_pretrained('''facebook/regnet-y-040''' )
_lowerCamelCase = self.default_image_processor
_lowerCamelCase = prepare_img()
_lowerCamelCase = image_processor(images=lowerCamelCase__ , return_tensors='''np''' )
_lowerCamelCase = model(**lowerCamelCase__ )
# verify the logits
_lowerCamelCase = (1, 1_0_0_0)
self.assertEqual(outputs.logits.shape , lowerCamelCase__ )
_lowerCamelCase = jnp.array([-0.4_1_8_0, -1.5_0_5_1, -3.4_8_3_6] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , lowerCamelCase__ , atol=1e-4 ) )
| 73 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __lowerCAmelCase ( a__ , a__ , a__ , unittest.TestCase ):
UpperCamelCase = StableDiffusionInpaintPipeline
UpperCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
UpperCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
UpperCamelCase = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
UpperCamelCase = frozenset([] )
def _lowerCamelCase ( self : Tuple) -> Tuple:
"""simple docstring"""
torch.manual_seed(0)
_UpperCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=__lowerCAmelCase , )
_UpperCAmelCase = PNDMScheduler(skip_prk_steps=__lowerCAmelCase)
torch.manual_seed(0)
_UpperCAmelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0)
_UpperCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act='gelu' , projection_dim=5_12 , )
_UpperCAmelCase = CLIPTextModel(__lowerCAmelCase)
_UpperCAmelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
_UpperCAmelCase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def _lowerCamelCase ( self : List[str] , A : Any , A : Optional[int]=0) -> Any:
"""simple docstring"""
_UpperCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(__lowerCAmelCase)).to(__lowerCAmelCase)
_UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1)[0]
_UpperCAmelCase = Image.fromarray(np.uinta(__lowerCAmelCase)).convert('RGB').resize((64, 64))
_UpperCAmelCase = Image.fromarray(np.uinta(image + 4)).convert('RGB').resize((64, 64))
if str(__lowerCAmelCase).startswith('mps'):
_UpperCAmelCase = torch.manual_seed(__lowerCAmelCase)
else:
_UpperCAmelCase = torch.Generator(device=__lowerCAmelCase).manual_seed(__lowerCAmelCase)
_UpperCAmelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': init_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def _lowerCamelCase ( self : Union[str, Any]) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = StableDiffusionInpaintPipeline(**__lowerCAmelCase)
_UpperCAmelCase = sd_pipe.to(__lowerCAmelCase)
sd_pipe.set_progress_bar_config(disable=__lowerCAmelCase)
_UpperCAmelCase = self.get_dummy_inputs(__lowerCAmelCase)
_UpperCAmelCase = sd_pipe(**__lowerCAmelCase).images
_UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_UpperCAmelCase = np.array([0.4_7_2_7, 0.5_7_3_5, 0.3_9_4_1, 0.5_4_4_6, 0.5_9_2_6, 0.4_3_9_4, 0.5_0_6_2, 0.4_6_5_4, 0.4_4_7_6])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def _lowerCamelCase ( self : Union[str, Any]) -> str:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3)
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
def _lowerCamelCase ( self : Dict) -> int:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self : Dict) -> str:
"""simple docstring"""
_UpperCAmelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png')
_UpperCAmelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png')
_UpperCAmelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'
'/yellow_cat_sitting_on_a_park_bench.npy')
_UpperCAmelCase = '''stabilityai/stable-diffusion-2-inpainting'''
_UpperCAmelCase = StableDiffusionInpaintPipeline.from_pretrained(__lowerCAmelCase , safety_checker=__lowerCAmelCase)
pipe.to(__lowerCAmelCase)
pipe.set_progress_bar_config(disable=__lowerCAmelCase)
pipe.enable_attention_slicing()
_UpperCAmelCase = '''Face of a yellow cat, high resolution, sitting on a park bench'''
_UpperCAmelCase = torch.manual_seed(0)
_UpperCAmelCase = pipe(
prompt=__lowerCAmelCase , image=__lowerCAmelCase , mask_image=__lowerCAmelCase , generator=__lowerCAmelCase , output_type='np' , )
_UpperCAmelCase = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image).max() < 9E-3
def _lowerCamelCase ( self : str) -> str:
"""simple docstring"""
_UpperCAmelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png')
_UpperCAmelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png')
_UpperCAmelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'
'/yellow_cat_sitting_on_a_park_bench_fp16.npy')
_UpperCAmelCase = '''stabilityai/stable-diffusion-2-inpainting'''
_UpperCAmelCase = StableDiffusionInpaintPipeline.from_pretrained(
__lowerCAmelCase , torch_dtype=torch.floataa , safety_checker=__lowerCAmelCase , )
pipe.to(__lowerCAmelCase)
pipe.set_progress_bar_config(disable=__lowerCAmelCase)
pipe.enable_attention_slicing()
_UpperCAmelCase = '''Face of a yellow cat, high resolution, sitting on a park bench'''
_UpperCAmelCase = torch.manual_seed(0)
_UpperCAmelCase = pipe(
prompt=__lowerCAmelCase , image=__lowerCAmelCase , mask_image=__lowerCAmelCase , generator=__lowerCAmelCase , output_type='np' , )
_UpperCAmelCase = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image).max() < 5E-1
def _lowerCamelCase ( self : Union[str, Any]) -> Dict:
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_UpperCAmelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png')
_UpperCAmelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png')
_UpperCAmelCase = '''stabilityai/stable-diffusion-2-inpainting'''
_UpperCAmelCase = PNDMScheduler.from_pretrained(__lowerCAmelCase , subfolder='scheduler')
_UpperCAmelCase = StableDiffusionInpaintPipeline.from_pretrained(
__lowerCAmelCase , safety_checker=__lowerCAmelCase , scheduler=__lowerCAmelCase , torch_dtype=torch.floataa , )
pipe.to(__lowerCAmelCase)
pipe.set_progress_bar_config(disable=__lowerCAmelCase)
pipe.enable_attention_slicing(1)
pipe.enable_sequential_cpu_offload()
_UpperCAmelCase = '''Face of a yellow cat, high resolution, sitting on a park bench'''
_UpperCAmelCase = torch.manual_seed(0)
_UpperCAmelCase = pipe(
prompt=__lowerCAmelCase , image=__lowerCAmelCase , mask_image=__lowerCAmelCase , generator=__lowerCAmelCase , num_inference_steps=2 , output_type='np' , )
_UpperCAmelCase = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.6_5 * 10**9
| 339 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase ( a__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = CTRLTokenizer
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
def _lowerCAmelCase( self ) -> Dict:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase__ : Optional[Any] = ['''adapt''', '''re@@''', '''a@@''', '''apt''', '''c@@''', '''t''', '''<unk>''']
lowercase__ : str = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase ) ) ) )
lowercase__ : Tuple = ['''#version: 0.2''', '''a p''', '''ap t</w>''', '''r e''', '''a d''', '''ad apt</w>''', '''''']
lowercase__ : Optional[Any] = {'''unk_token''': '''<unk>'''}
lowercase__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__lowerCAmelCase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__lowerCAmelCase ) )
def _lowerCAmelCase( self , **__lowerCAmelCase ) -> List[str]:
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def _lowerCAmelCase( self , __lowerCAmelCase ) -> List[str]:
lowercase__ : List[str] = '''adapt react readapt apt'''
lowercase__ : Union[str, Any] = '''adapt react readapt apt'''
return input_text, output_text
def _lowerCAmelCase( self ) -> Optional[Any]:
lowercase__ : Union[str, Any] = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowercase__ : Optional[Any] = '''adapt react readapt apt'''
lowercase__ : Dict = '''adapt re@@ a@@ c@@ t re@@ adapt apt'''.split()
lowercase__ : Union[str, Any] = tokenizer.tokenize(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
lowercase__ : int = tokens + [tokenizer.unk_token]
lowercase__ : List[str] = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , __lowerCAmelCase )
| 198 | 0 |
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
_UpperCAmelCase : List[Any] = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class lowerCAmelCase ( nn.Module ):
def __init__( self : List[Any] , UpperCAmelCase : List[Any] ) -> List[Any]:
super().__init__()
lowerCamelCase__ : Union[str, Any] = torchvision.models.resnetaaa(pretrained=UpperCAmelCase )
lowerCamelCase__ : Any = list(model.children() )[:-2]
lowerCamelCase__ : int = nn.Sequential(*UpperCAmelCase )
lowerCamelCase__ : str = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] )
def A_ ( self : List[Any] , UpperCAmelCase : Union[str, Any] ) -> str:
# Bx3x224x224 -> Bx2048x7x7 -> Bx2048xN -> BxNx2048
lowerCamelCase__ : Union[str, Any] = self.pool(self.model(UpperCAmelCase ) )
lowerCamelCase__ : Optional[Any] = torch.flatten(UpperCAmelCase , start_dim=2 )
lowerCamelCase__ : List[str] = out.transpose(1 , 2 ).contiguous()
return out # BxNx2048
class lowerCAmelCase ( __UpperCamelCase ):
def __init__( self : Union[str, Any] , UpperCAmelCase : Any , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Tuple ) -> Union[str, Any]:
lowerCamelCase__ : Dict = [json.loads(UpperCAmelCase ) for l in open(UpperCAmelCase )]
lowerCamelCase__ : Optional[int] = os.path.dirname(UpperCAmelCase )
lowerCamelCase__ : List[Any] = tokenizer
lowerCamelCase__ : Optional[int] = labels
lowerCamelCase__ : Optional[int] = len(UpperCAmelCase )
lowerCamelCase__ : Dict = max_seq_length
lowerCamelCase__ : List[str] = transforms
def __len__( self : Optional[int] ) -> Optional[Any]:
return len(self.data )
def __getitem__( self : Union[str, Any] , UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
lowerCamelCase__ : Optional[Any] = torch.LongTensor(self.tokenizer.encode(self.data[index]['text'] , add_special_tokens=UpperCAmelCase ) )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = sentence[0], sentence[1:-1], sentence[-1]
lowerCamelCase__ : Optional[Any] = sentence[: self.max_seq_length]
lowerCamelCase__ : Union[str, Any] = torch.zeros(self.n_classes )
lowerCamelCase__ : Any = 1
lowerCamelCase__ : List[Any] = Image.open(os.path.join(self.data_dir , self.data[index]['img'] ) ).convert('RGB' )
lowerCamelCase__ : Dict = self.transforms(UpperCAmelCase )
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def A_ ( self : Union[str, Any] ) -> Tuple:
lowerCamelCase__ : Optional[Any] = Counter()
for row in self.data:
label_freqs.update(row['label'] )
return label_freqs
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> int:
lowerCamelCase__ : str = [len(row['sentence'] ) for row in batch]
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = len(_UpperCAmelCase ), max(_UpperCAmelCase )
lowerCamelCase__ : Union[str, Any] = torch.zeros(_UpperCAmelCase , _UpperCAmelCase , dtype=torch.long )
lowerCamelCase__ : Dict = torch.zeros(_UpperCAmelCase , _UpperCAmelCase , dtype=torch.long )
for i_batch, (input_row, length) in enumerate(zip(_UpperCAmelCase , _UpperCAmelCase ) ):
lowerCamelCase__ : int = input_row['sentence']
lowerCamelCase__ : Optional[Any] = 1
lowerCamelCase__ : List[str] = torch.stack([row['image'] for row in batch] )
lowerCamelCase__ : Dict = torch.stack([row['label'] for row in batch] )
lowerCamelCase__ : Optional[int] = torch.stack([row['image_start_token'] for row in batch] )
lowerCamelCase__ : str = torch.stack([row['image_end_token'] for row in batch] )
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def SCREAMING_SNAKE_CASE ( ) -> Tuple:
return transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.46_777_044, 0.44_531_429, 0.40_661_017] , std=[0.12_221_994, 0.12_145_835, 0.14_380_469] , ),
] )
| 45 |
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class lowerCAmelCase ( yaml.SafeLoader ):
def A_ ( self : List[str] , UpperCAmelCase : Dict ) -> Optional[Any]:
lowerCamelCase__ : List[Any] = [self.constructed_objects[key_node] for key_node, _ in node.value]
lowerCamelCase__ : str = [tuple(UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else key for key in keys]
lowerCamelCase__ : Optional[Any] = Counter(UpperCAmelCase )
lowerCamelCase__ : Tuple = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(F"""Got duplicate yaml keys: {duplicate_keys}""" )
def A_ ( self : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Dict=False ) -> int:
lowerCamelCase__ : int = super().construct_mapping(UpperCAmelCase , deep=UpperCAmelCase )
self._check_no_duplicates_on_constructed_node(UpperCAmelCase )
return mapping
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> Tuple[Optional[str], str]:
lowerCamelCase__ : Tuple = list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
lowerCamelCase__ : List[str] = full_content[1:].index('---' ) + 1
lowerCamelCase__ : Tuple = '\n'.join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(_UpperCAmelCase )
class lowerCAmelCase ( __UpperCamelCase ):
# class attributes
UpperCAmelCase__ = {"""train_eval_index"""} # train-eval-index in the YAML metadata
@classmethod
def A_ ( cls : str , UpperCAmelCase : Path ) -> "DatasetMetadata":
with open(UpperCAmelCase , encoding='utf-8' ) as readme_file:
lowerCamelCase__ , lowerCamelCase__ : List[Any] = _split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(UpperCAmelCase )
else:
return cls()
def A_ ( self : List[str] , UpperCAmelCase : Path ) -> Any:
if path.exists():
with open(UpperCAmelCase , encoding='utf-8' ) as readme_file:
lowerCamelCase__ : Any = readme_file.read()
else:
lowerCamelCase__ : Any = None
lowerCamelCase__ : List[str] = self._to_readme(UpperCAmelCase )
with open(UpperCAmelCase , 'w' , encoding='utf-8' ) as readme_file:
readme_file.write(UpperCAmelCase )
def A_ ( self : Union[str, Any] , UpperCAmelCase : Optional[str] = None ) -> str:
if readme_content is not None:
lowerCamelCase__ , lowerCamelCase__ : int = _split_yaml_from_readme(UpperCAmelCase )
lowerCamelCase__ : Dict = '---\n' + self.to_yaml_string() + '---\n' + content
else:
lowerCamelCase__ : Optional[Any] = '---\n' + self.to_yaml_string() + '---\n'
return full_content
@classmethod
def A_ ( cls : Union[str, Any] , UpperCAmelCase : str ) -> "DatasetMetadata":
lowerCamelCase__ : Any = yaml.load(UpperCAmelCase , Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
lowerCamelCase__ : Tuple = {
(key.replace('-' , '_' ) if key.replace('-' , '_' ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**UpperCAmelCase )
def A_ ( self : Optional[Any] ) -> str:
return yaml.safe_dump(
{
(key.replace('_' , '-' ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=UpperCAmelCase , allow_unicode=UpperCAmelCase , encoding='utf-8' , ).decode('utf-8' )
_UpperCAmelCase : Tuple = {
"""image-classification""": [],
"""translation""": [],
"""image-segmentation""": [],
"""fill-mask""": [],
"""automatic-speech-recognition""": [],
"""token-classification""": [],
"""sentence-similarity""": [],
"""audio-classification""": [],
"""question-answering""": [],
"""summarization""": [],
"""zero-shot-classification""": [],
"""table-to-text""": [],
"""feature-extraction""": [],
"""other""": [],
"""multiple-choice""": [],
"""text-classification""": [],
"""text-to-image""": [],
"""text2text-generation""": [],
"""zero-shot-image-classification""": [],
"""tabular-classification""": [],
"""tabular-regression""": [],
"""image-to-image""": [],
"""tabular-to-text""": [],
"""unconditional-image-generation""": [],
"""text-retrieval""": [],
"""text-to-speech""": [],
"""object-detection""": [],
"""audio-to-audio""": [],
"""text-generation""": [],
"""conversational""": [],
"""table-question-answering""": [],
"""visual-question-answering""": [],
"""image-to-text""": [],
"""reinforcement-learning""": [],
"""voice-activity-detection""": [],
"""time-series-forecasting""": [],
"""document-question-answering""": [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
_UpperCAmelCase : Tuple = ArgumentParser(usage="""Validate the yaml metadata block of a README.md file.""")
ap.add_argument("""readme_filepath""")
_UpperCAmelCase : str = ap.parse_args()
_UpperCAmelCase : Optional[int] = Path(args.readme_filepath)
_UpperCAmelCase : Union[str, Any] = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 45 | 1 |
from ...configuration_utils import PretrainedConfig
A__ = {
"""google/tapas-base-finetuned-sqa""": (
"""https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json"""
),
"""google/tapas-base-finetuned-wtq""": (
"""https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json"""
),
"""google/tapas-base-finetuned-wikisql-supervised""": (
"""https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json"""
),
"""google/tapas-base-finetuned-tabfact""": (
"""https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json"""
),
}
class __lowerCAmelCase ( lowerCamelCase__ ):
__lowerCamelCase = '''tapas'''
def __init__( self , _snake_case=30522 , _snake_case=768 , _snake_case=12 , _snake_case=12 , _snake_case=3072 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=1024 , _snake_case=[3, 256, 256, 2, 256, 256, 10] , _snake_case=0.02 , _snake_case=1e-12 , _snake_case=0 , _snake_case=10.0 , _snake_case=0 , _snake_case=1.0 , _snake_case=None , _snake_case=1.0 , _snake_case=False , _snake_case=None , _snake_case=1.0 , _snake_case=1.0 , _snake_case=False , _snake_case=False , _snake_case="ratio" , _snake_case=None , _snake_case=None , _snake_case=64 , _snake_case=32 , _snake_case=False , _snake_case=True , _snake_case=False , _snake_case=False , _snake_case=True , _snake_case=False , _snake_case=None , _snake_case=None , **_snake_case , ):
"""simple docstring"""
super().__init__(pad_token_id=_snake_case , **_snake_case )
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
_lowerCAmelCase = vocab_size
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = hidden_act
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = type_vocab_sizes
_lowerCAmelCase = initializer_range
_lowerCAmelCase = layer_norm_eps
# Fine-tuning task hyperparameters
_lowerCAmelCase = positive_label_weight
_lowerCAmelCase = num_aggregation_labels
_lowerCAmelCase = aggregation_loss_weight
_lowerCAmelCase = use_answer_as_supervision
_lowerCAmelCase = answer_loss_importance
_lowerCAmelCase = use_normalized_answer_loss
_lowerCAmelCase = huber_loss_delta
_lowerCAmelCase = temperature
_lowerCAmelCase = aggregation_temperature
_lowerCAmelCase = use_gumbel_for_cells
_lowerCAmelCase = use_gumbel_for_aggregation
_lowerCAmelCase = average_approximation_function
_lowerCAmelCase = cell_selection_preference
_lowerCAmelCase = answer_loss_cutoff
_lowerCAmelCase = max_num_rows
_lowerCAmelCase = max_num_columns
_lowerCAmelCase = average_logits_per_cell
_lowerCAmelCase = select_one_column
_lowerCAmelCase = allow_empty_column_selection
_lowerCAmelCase = init_cell_selection_weights_to_zero
_lowerCAmelCase = reset_position_index_per_cell
_lowerCAmelCase = disable_per_token_loss
# Aggregation hyperparameters
_lowerCAmelCase = aggregation_labels
_lowerCAmelCase = no_aggregation_label_index
if isinstance(self.aggregation_labels , _snake_case ):
_lowerCAmelCase = {int(_snake_case ): v for k, v in aggregation_labels.items()}
| 82 |
def _UpperCAmelCase ( snake_case = 50 ):
"""simple docstring"""
_lowerCAmelCase = [1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(f"{solution() = }")
| 82 | 1 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def A (self : Any ):
A = tempfile.mkdtemp()
# fmt: off
A = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
A = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) )
A = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""]
A = {"""unk_token""": """<unk>"""}
A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(UpperCamelCase_ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(UpperCamelCase_ ) )
A = {
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"""image_std""": [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
A = os.path.join(self.tmpdirname , UpperCamelCase_ )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(UpperCamelCase_ , UpperCamelCase_ )
def A (self : List[Any] , **_lowerCAmelCase : List[str] ):
return CLIPTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def A (self : Tuple , **_lowerCAmelCase : Union[str, Any] ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def A (self : Optional[int] , **_lowerCAmelCase : int ):
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def A (self : Union[str, Any] ):
shutil.rmtree(self.tmpdirname )
def A (self : Optional[int] ):
A = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
A = [Image.fromarray(np.moveaxis(UpperCamelCase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def A (self : Union[str, Any] ):
A = self.get_tokenizer()
A = self.get_rust_tokenizer()
A = self.get_image_processor()
A = CLIPProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
processor_slow.save_pretrained(self.tmpdirname )
A = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCamelCase_ )
A = CLIPProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
processor_fast.save_pretrained(self.tmpdirname )
A = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , UpperCamelCase_ )
self.assertIsInstance(processor_fast.tokenizer , UpperCamelCase_ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , UpperCamelCase_ )
self.assertIsInstance(processor_fast.image_processor , UpperCamelCase_ )
def A (self : List[str] ):
A = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
A = self.get_image_processor(do_normalize=UpperCamelCase_ , padding_value=1.0 )
A = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=UpperCamelCase_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCamelCase_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase_ )
def A (self : Union[str, Any] ):
A = self.get_image_processor()
A = self.get_tokenizer()
A = CLIPProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
A = self.prepare_image_inputs()
A = image_processor(UpperCamelCase_ , return_tensors="""np""" )
A = processor(images=UpperCamelCase_ , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def A (self : Optional[Any] ):
A = self.get_image_processor()
A = self.get_tokenizer()
A = CLIPProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
A = """lower newer"""
A = processor(text=UpperCamelCase_ )
A = tokenizer(UpperCamelCase_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def A (self : Optional[Any] ):
A = self.get_image_processor()
A = self.get_tokenizer()
A = CLIPProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
A = """lower newer"""
A = self.prepare_image_inputs()
A = processor(text=UpperCamelCase_ , images=UpperCamelCase_ )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase_ ):
processor()
def A (self : Tuple ):
A = self.get_image_processor()
A = self.get_tokenizer()
A = CLIPProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
A = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A = processor.batch_decode(UpperCamelCase_ )
A = tokenizer.batch_decode(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
def A (self : int ):
A = self.get_image_processor()
A = self.get_tokenizer()
A = CLIPProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
A = """lower newer"""
A = self.prepare_image_inputs()
A = processor(text=UpperCamelCase_ , images=UpperCamelCase_ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 355 |
'''simple docstring'''
from __future__ import annotations
def __a ( UpperCAmelCase ) ->list[int]:
"""simple docstring"""
return [ord(UpperCAmelCase ) - 96 for elem in plain]
def __a ( UpperCAmelCase ) ->str:
"""simple docstring"""
return "".join(chr(elem + 96 ) for elem in encoded )
def __a ( ) ->None:
"""simple docstring"""
A = encode(input("""-> """ ).strip().lower() )
print("""Encoded: """ , UpperCAmelCase )
print("""Decoded:""" , decode(UpperCAmelCase ) )
if __name__ == "__main__":
main()
| 337 | 0 |
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def snake_case_ ( snake_case ) -> str:
return "".join(sorted(_lowerCAmelCase ) )
def snake_case_ ( snake_case ) -> list[str]:
return word_by_signature[signature(_lowerCAmelCase )]
__lowerCAmelCase = Path(__file__).parent.joinpath('''words.txt''').read_text(encoding='''utf-8''')
__lowerCAmelCase = sorted({word.strip().lower() for word in data.splitlines()})
__lowerCAmelCase = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
__lowerCAmelCase = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open('''anagrams.txt''', '''w''') as file:
file.write('''all_anagrams = \n ''')
file.write(pprint.pformat(all_anagrams))
| 196 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__a = {
"configuration_timesformer": ["TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "TimesformerConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TimesformerModel",
"TimesformerForVideoClassification",
"TimesformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 35 | 0 |
from __future__ import annotations
__snake_case : Union[str, Any] = {
"""A""": ["""B""", """C""", """E"""],
"""B""": ["""A""", """D""", """E"""],
"""C""": ["""A""", """F""", """G"""],
"""D""": ["""B"""],
"""E""": ["""A""", """B""", """D"""],
"""F""": ["""C"""],
"""G""": ["""C"""],
}
class __SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ = graph
# mapping node to its parent in resulting breadth first tree
lowerCAmelCase__ = {}
lowerCAmelCase__ = source_vertex
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = {self.source_vertex}
lowerCAmelCase__ = None
lowerCAmelCase__ = [self.source_vertex] # first in first out queue
while queue:
lowerCAmelCase__ = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(_UpperCamelCase )
lowerCAmelCase__ = vertex
queue.append(_UpperCamelCase )
def UpperCamelCase__ ( self , _UpperCamelCase ):
"""simple docstring"""
if target_vertex == self.source_vertex:
return self.source_vertex
lowerCAmelCase__ = self.parent.get(_UpperCamelCase )
if target_vertex_parent is None:
lowerCAmelCase__ = (
F"No path from vertex: {self.source_vertex} to vertex: {target_vertex}"
)
raise ValueError(_UpperCamelCase )
return self.shortest_path(_UpperCamelCase ) + F"->{target_vertex}"
if __name__ == "__main__":
__snake_case : Any = Graph(graph, """G""")
g.breath_first_search()
print(g.shortest_path("""D"""))
print(g.shortest_path("""G"""))
print(g.shortest_path("""Foo"""))
| 122 |
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
__snake_case : Optional[int] = """\
@inproceedings{snover-etal-2006-study,
title = \"A Study of Translation Edit Rate with Targeted Human Annotation\",
author = \"Snover, Matthew and
Dorr, Bonnie and
Schwartz, Rich and
Micciulla, Linnea and
Makhoul, John\",
booktitle = \"Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers\",
month = aug # \" 8-12\",
year = \"2006\",
address = \"Cambridge, Massachusetts, USA\",
publisher = \"Association for Machine Translation in the Americas\",
url = \"https://aclanthology.org/2006.amta-papers.25\",
pages = \"223--231\",
}
@inproceedings{post-2018-call,
title = \"A Call for Clarity in Reporting {BLEU} Scores\",
author = \"Post, Matt\",
booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",
month = oct,
year = \"2018\",
address = \"Belgium, Brussels\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/W18-6319\",
pages = \"186--191\",
}
"""
__snake_case : List[str] = """\
TER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a
hypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu
(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found
here: https://github.com/jhclark/tercom.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.
"""
__snake_case : Dict = """
Produces TER scores alongside the number of edits and reference length.
Args:
predictions (list of str): The system stream (a sequence of segments).
references (list of list of str): A list of one or more reference streams (each a sequence of segments).
normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,
as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.
Only applies if `normalized = True`. Defaults to `False`.
case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.
Returns:
'score' (float): TER score (num_edits / sum_ref_lengths * 100)
'num_edits' (int): The cumulative number of edits
'ref_length' (float): The cumulative average reference length
Examples:
Example 1:
>>> predictions = [\"does this sentence match??\",
... \"what about this sentence?\",
... \"What did the TER metric user say to the developer?\"]
>>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],
... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],
... [\"Your jokes are...\", \"...TERrible\"]]
>>> ter = datasets.load_metric(\"ter\")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{'score': 150.0, 'num_edits': 15, 'ref_length': 10.0}
Example 2:
>>> predictions = [\"does this sentence match??\",
... \"what about this sentence?\"]
>>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],
... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]
>>> ter = datasets.load_metric(\"ter\")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{'score': 62.5, 'num_edits': 5, 'ref_length': 8.0}
Example 3:
>>> predictions = [\"does this sentence match??\",
... \"what about this sentence?\"]
>>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],
... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]
>>> ter = datasets.load_metric(\"ter\")
>>> results = ter.compute(predictions=predictions,
... references=references,
... normalized=True,
... case_sensitive=True)
>>> print(results)
{'score': 57.14285714285714, 'num_edits': 6, 'ref_length': 10.5}
Example 4:
>>> predictions = [\"does this sentence match??\",
... \"what about this sentence?\"]
>>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],
... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]
>>> ter = datasets.load_metric(\"ter\")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{'score': 0.0, 'num_edits': 0, 'ref_length': 8.0}
Example 5:
>>> predictions = [\"does this sentence match??\",
... \"what about this sentence?\",
... \"What did the TER metric user say to the developer?\"]
>>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],
... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],
... [\"Your jokes are...\", \"...TERrible\"]]
>>> ter = datasets.load_metric(\"ter\")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{'score': 100.0, 'num_edits': 10, 'ref_length': 10.0}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class __SCREAMING_SNAKE_CASE ( datasets.Metric):
def UpperCamelCase__ ( self ):
"""simple docstring"""
if version.parse(scb.__version__ ) < version.parse('1.4.12' ):
raise ImportWarning(
'To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'
'You can install it with `pip install "sacrebleu>=1.4.12"`.' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='http://www.cs.umd.edu/~snover/tercom/' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Sequence(datasets.Value('string' , id='sequence' ) , id='references' ),
} ) , codebase_urls=['https://github.com/mjpost/sacreBLEU#ter'] , reference_urls=[
'https://github.com/jhclark/tercom',
] , )
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = False , _UpperCamelCase = False , _UpperCamelCase = False , _UpperCamelCase = False , ):
"""simple docstring"""
lowerCAmelCase__ = len(references[0] )
if any(len(_UpperCamelCase ) != references_per_prediction for refs in references ):
raise ValueError('Sacrebleu requires the same number of references for each prediction' )
lowerCAmelCase__ = [[refs[i] for refs in references] for i in range(_UpperCamelCase )]
lowerCAmelCase__ = TER(
normalized=_UpperCamelCase , no_punct=_UpperCamelCase , asian_support=_UpperCamelCase , case_sensitive=_UpperCamelCase , )
lowerCAmelCase__ = sb_ter.corpus_score(_UpperCamelCase , _UpperCamelCase )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 122 | 1 |
'''simple docstring'''
def lowerCamelCase ( __lowerCamelCase : int ) ->int:
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
raise ValueError("""Input must be an integer""" )
if input_num <= 0:
raise ValueError("""Input must be positive""" )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 58 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase_ = {"""configuration_mbart""": ["""MBART_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MBartConfig""", """MBartOnnxConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["""MBartTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["""MBartTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""MBART_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MBartForCausalLM""",
"""MBartForConditionalGeneration""",
"""MBartForQuestionAnswering""",
"""MBartForSequenceClassification""",
"""MBartModel""",
"""MBartPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""TFMBartForConditionalGeneration""",
"""TFMBartModel""",
"""TFMBartPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""FlaxMBartForConditionalGeneration""",
"""FlaxMBartForQuestionAnswering""",
"""FlaxMBartForSequenceClassification""",
"""FlaxMBartModel""",
"""FlaxMBartPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart import MBartTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart_fast import MBartTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mbart import (
MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 58 | 1 |
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
lowerCAmelCase_ = logging.getLogger(__name__)
def lowerCamelCase_ ( ):
"""simple docstring"""
snake_case_ : List[Any] = argparse.ArgumentParser(
description='''Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.''' )
parser.add_argument(
'''--dataset_name''' , type=lowerCamelCase_ , default='''wikitext''' , help='''Name of the training. Explore datasets at: hf.co/datasets.''' , )
parser.add_argument(
'''--dataset_config''' , type=lowerCamelCase_ , default='''wikitext-103-raw-v1''' , help='''Configuration name of the dataset.''' )
parser.add_argument(
'''--tokenizer_name_or_path''' , type=lowerCamelCase_ , default='''sayakpaul/unigram-tokenizer-wikitext''' , help='''Tokenizer identifier. Can be a local filepath or a Hub identifier.''' , )
parser.add_argument(
'''--shard_size''' , type=lowerCamelCase_ , default=1_000 , help='''Number of entries to go in a single shard.''' , )
parser.add_argument('''--split''' , type=lowerCamelCase_ , default='''train''' , choices=['''train''', '''test''', '''validation'''] )
parser.add_argument(
'''--limit''' , default=lowerCamelCase_ , type=lowerCamelCase_ , help='''Limit the number of shards (used for debugging).''' , )
parser.add_argument(
'''--max_length''' , type=lowerCamelCase_ , default=512 , help='''Maximum sequence length. For training on TPUs, it helps to have a maximum'''
''' sequence length that is a multiple of 8.''' , )
parser.add_argument(
'''--output_dir''' , default='''tf-tpu''' , type=lowerCamelCase_ , help='''Output directory where the TFRecord shards will be saved. If the'''
''' path is appended with `gs://` (\'gs://tf-tpu\', for example) then the TFRecord'''
''' shards will be directly saved to a Google Cloud Storage bucket.''' , )
snake_case_ : Optional[int] = parser.parse_args()
return args
def lowerCamelCase_ ( _UpperCamelCase ):
"""simple docstring"""
def fn(_UpperCamelCase ):
return tokenizer(examples['''text'''] )
return fn
def lowerCamelCase_ ( _UpperCamelCase ):
"""simple docstring"""
snake_case_ : Optional[Any] = []
for i in range(len(tokenized_data['''input_ids'''] ) ):
snake_case_ : List[Any] = {
"""input_ids""": tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data['''input_ids'''][i] ) ),
"""attention_mask""": tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data['''attention_mask'''][i] ) ),
}
snake_case_ : Tuple = tf.train.Features(feature=lowerCamelCase_ )
snake_case_ : Union[str, Any] = tf.train.Example(features=lowerCamelCase_ )
snake_case_ : List[Any] = example.SerializeToString()
records.append(lowerCamelCase_ )
return records
def lowerCamelCase_ ( _UpperCamelCase ):
"""simple docstring"""
snake_case_ : List[str] = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
snake_case_ : List[str] = min(len(lowerCamelCase_ ) , args.limit )
snake_case_ : Optional[Any] = dataset.select(range(lowerCamelCase_ ) )
print(f'''Limiting the dataset to {args.limit} entries.''' )
snake_case_ : Tuple = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
snake_case_ : int = os.path.join(args.output_dir , args.split )
if not os.path.exists(lowerCamelCase_ ):
os.makedirs(lowerCamelCase_ )
else:
snake_case_ : List[str] = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
snake_case_ : List[str] = tokenize_function(lowerCamelCase_ )
snake_case_ : Optional[int] = dataset.map(lowerCamelCase_ , batched=lowerCamelCase_ , num_proc=4 , remove_columns=['''text'''] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(_UpperCamelCase ):
# Concatenate all texts.
snake_case_ : Any = {k: sum(examples[k] , [] ) for k in examples.keys()}
snake_case_ : Tuple = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
snake_case_ : str = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
snake_case_ : Tuple = {
k: [t[i : i + args.max_length] for i in range(0 , lowerCamelCase_ , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
snake_case_ : List[str] = dataset_tokenized.map(lowerCamelCase_ , batched=lowerCamelCase_ , batch_size=1_000 , num_proc=4 )
snake_case_ : Optional[Any] = 0
snake_case_ : str = 0
for shard in range(0 , len(lowerCamelCase_ ) , args.shard_size ):
snake_case_ : List[Any] = grouped_dataset[shard : shard + args.shard_size]
snake_case_ : Dict = len(dataset_snapshot['''input_ids'''] )
snake_case_ : int = os.path.join(lowerCamelCase_ , f'''dataset-{shard_count}-{records_containing}.tfrecord''' )
snake_case_ : List[Any] = get_serialized_examples(lowerCamelCase_ )
with tf.io.TFRecordWriter(lowerCamelCase_ ) as out_file:
for i in range(len(lowerCamelCase_ ) ):
snake_case_ : Dict = serialized_examples[i]
out_file.write(lowerCamelCase_ )
print('''Wrote file {} containing {} records'''.format(lowerCamelCase_ , lowerCamelCase_ ) )
shard_count += 1
total_records += records_containing
with open(f'''split-{args.split}-records-count.txt''' , '''w''' ) as f:
print(f'''Total {args.split} records: {total_records}''' , file=lowerCamelCase_ )
if __name__ == "__main__":
lowerCAmelCase_ = parse_args()
main(args)
| 358 |
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : Optional[Any] = ['''image_processor''', '''tokenizer''']
lowerCamelCase_ : List[Any] = '''BlipImageProcessor'''
lowerCamelCase_ : Union[str, Any] = '''AutoTokenizer'''
def __init__(self , __magic_name__ , __magic_name__ , __magic_name__ ) -> Dict:
'''simple docstring'''
super().__init__(__magic_name__ , __magic_name__ )
# add QFormer tokenizer
snake_case_ : Optional[Any] = qformer_tokenizer
def __call__(self , __magic_name__ = None , __magic_name__ = None , __magic_name__ = True , __magic_name__ = False , __magic_name__ = None , __magic_name__ = None , __magic_name__ = 0 , __magic_name__ = None , __magic_name__ = None , __magic_name__ = False , __magic_name__ = False , __magic_name__ = False , __magic_name__ = False , __magic_name__ = False , __magic_name__ = True , __magic_name__ = None , **__magic_name__ , ) -> BatchFeature:
'''simple docstring'''
if images is None and text is None:
raise ValueError('''You have to specify at least images or text.''' )
snake_case_ : Tuple = BatchFeature()
if text is not None:
snake_case_ : Tuple = self.tokenizer(
text=__magic_name__ , add_special_tokens=__magic_name__ , padding=__magic_name__ , truncation=__magic_name__ , max_length=__magic_name__ , stride=__magic_name__ , pad_to_multiple_of=__magic_name__ , return_attention_mask=__magic_name__ , return_overflowing_tokens=__magic_name__ , return_special_tokens_mask=__magic_name__ , return_offsets_mapping=__magic_name__ , return_token_type_ids=__magic_name__ , return_length=__magic_name__ , verbose=__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ , )
encoding.update(__magic_name__ )
snake_case_ : Optional[Any] = self.qformer_tokenizer(
text=__magic_name__ , add_special_tokens=__magic_name__ , padding=__magic_name__ , truncation=__magic_name__ , max_length=__magic_name__ , stride=__magic_name__ , pad_to_multiple_of=__magic_name__ , return_attention_mask=__magic_name__ , return_overflowing_tokens=__magic_name__ , return_special_tokens_mask=__magic_name__ , return_offsets_mapping=__magic_name__ , return_token_type_ids=__magic_name__ , return_length=__magic_name__ , verbose=__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ , )
snake_case_ : Optional[int] = qformer_text_encoding.pop('''input_ids''' )
snake_case_ : Tuple = qformer_text_encoding.pop('''attention_mask''' )
if images is not None:
snake_case_ : Any = self.image_processor(__magic_name__ , return_tensors=__magic_name__ )
encoding.update(__magic_name__ )
return encoding
def lowerCamelCase (self , *__magic_name__ , **__magic_name__ ) -> List[Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*__magic_name__ , **__magic_name__ )
def lowerCamelCase (self , *__magic_name__ , **__magic_name__ ) -> Union[str, Any]:
'''simple docstring'''
return self.tokenizer.decode(*__magic_name__ , **__magic_name__ )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def lowerCamelCase (self ) -> Any:
'''simple docstring'''
snake_case_ : Optional[Any] = self.tokenizer.model_input_names
snake_case_ : List[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def lowerCamelCase (self , __magic_name__ , **__magic_name__ ) -> List[Any]:
'''simple docstring'''
if os.path.isfile(__magic_name__ ):
raise ValueError(F'''Provided path ({save_directory}) should be a directory, not a file''' )
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
snake_case_ : Any = os.path.join(__magic_name__ , '''qformer_tokenizer''' )
self.qformer_tokenizer.save_pretrained(__magic_name__ )
return super().save_pretrained(__magic_name__ , **__magic_name__ )
@classmethod
def lowerCamelCase (cls , __magic_name__ , **__magic_name__ ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Any = AutoTokenizer.from_pretrained(__magic_name__ , subfolder='''qformer_tokenizer''' )
snake_case_ : str = cls._get_arguments_from_pretrained(__magic_name__ , **__magic_name__ )
args.append(__magic_name__ )
return cls(*__magic_name__ )
| 279 | 0 |
'''simple docstring'''
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ) -> str:
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise ValueError('''iterations must be defined as integers''' )
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or not number >= 1:
raise ValueError(
'''starting number must be
and integer and be more than 0''' )
if not iterations >= 1:
raise ValueError('''Iterations must be done more than 0 times to play FizzBuzz''' )
__lowerCamelCase = ''''''
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(UpperCamelCase__ )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 67 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxCrossAttnUpBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
FlaxUpBlockaD,
)
@flax.struct.dataclass
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
_snake_case : jnp.ndarray
@flax_register_to_config
class __lowerCAmelCase ( nn.Module , __magic_name__ , __magic_name__ ):
"""simple docstring"""
_snake_case : int = 3_2
_snake_case : int = 4
_snake_case : int = 4
_snake_case : Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
_snake_case : Tuple[str] = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")
_snake_case : Union[bool, Tuple[bool]] = False
_snake_case : Tuple[int] = (3_2_0, 6_4_0, 1_2_8_0, 1_2_8_0)
_snake_case : int = 2
_snake_case : Union[int, Tuple[int]] = 8
_snake_case : Optional[Union[int, Tuple[int]]] = None
_snake_case : int = 1_2_8_0
_snake_case : float = 0.0
_snake_case : bool = False
_snake_case : jnp.dtype = jnp.floataa
_snake_case : bool = True
_snake_case : int = 0
_snake_case : bool = False
def snake_case__ ( self : List[Any] , lowerCAmelCase__ : jax.random.KeyArray ) -> FrozenDict:
'''simple docstring'''
_UpperCamelCase = (1, self.in_channels, self.sample_size, self.sample_size)
_UpperCamelCase = jnp.zeros(lowerCAmelCase__ , dtype=jnp.floataa )
_UpperCamelCase = jnp.ones((1,) , dtype=jnp.intaa )
_UpperCamelCase = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
_UpperCamelCase , _UpperCamelCase = jax.random.split(lowerCAmelCase__ )
_UpperCamelCase = {'''params''': params_rng, '''dropout''': dropout_rng}
return self.init(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )["params"]
def snake_case__ ( self : List[Any] ) -> Any:
'''simple docstring'''
_UpperCamelCase = self.block_out_channels
_UpperCamelCase = block_out_channels[0] * 4
if self.num_attention_heads is not None:
raise ValueError(
'''At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.''' )
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
_UpperCamelCase = self.num_attention_heads or self.attention_head_dim
# input
_UpperCamelCase = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
_UpperCamelCase = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
_UpperCamelCase = FlaxTimestepEmbedding(lowerCAmelCase__ , dtype=self.dtype )
_UpperCamelCase = self.only_cross_attention
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCamelCase = (only_cross_attention,) * len(self.down_block_types )
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCamelCase = (num_attention_heads,) * len(self.down_block_types )
# down
_UpperCamelCase = []
_UpperCamelCase = block_out_channels[0]
for i, down_block_type in enumerate(self.down_block_types ):
_UpperCamelCase = output_channel
_UpperCamelCase = block_out_channels[i]
_UpperCamelCase = i == len(lowerCAmelCase__ ) - 1
if down_block_type == "CrossAttnDownBlock2D":
_UpperCamelCase = FlaxCrossAttnDownBlockaD(
in_channels=lowerCAmelCase__ , out_channels=lowerCAmelCase__ , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
_UpperCamelCase = FlaxDownBlockaD(
in_channels=lowerCAmelCase__ , out_channels=lowerCAmelCase__ , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(lowerCAmelCase__ )
_UpperCamelCase = down_blocks
# mid
_UpperCamelCase = FlaxUNetMidBlockaDCrossAttn(
in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
# up
_UpperCamelCase = []
_UpperCamelCase = list(reversed(lowerCAmelCase__ ) )
_UpperCamelCase = list(reversed(lowerCAmelCase__ ) )
_UpperCamelCase = list(reversed(lowerCAmelCase__ ) )
_UpperCamelCase = reversed_block_out_channels[0]
for i, up_block_type in enumerate(self.up_block_types ):
_UpperCamelCase = output_channel
_UpperCamelCase = reversed_block_out_channels[i]
_UpperCamelCase = reversed_block_out_channels[min(i + 1 , len(lowerCAmelCase__ ) - 1 )]
_UpperCamelCase = i == len(lowerCAmelCase__ ) - 1
if up_block_type == "CrossAttnUpBlock2D":
_UpperCamelCase = FlaxCrossAttnUpBlockaD(
in_channels=lowerCAmelCase__ , out_channels=lowerCAmelCase__ , prev_output_channel=lowerCAmelCase__ , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
_UpperCamelCase = FlaxUpBlockaD(
in_channels=lowerCAmelCase__ , out_channels=lowerCAmelCase__ , prev_output_channel=lowerCAmelCase__ , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , )
up_blocks.append(lowerCAmelCase__ )
_UpperCamelCase = output_channel
_UpperCamelCase = up_blocks
# out
_UpperCamelCase = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
_UpperCamelCase = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : List[str] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : int=None , lowerCAmelCase__ : Any=None , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : bool = False , ) -> Union[FlaxUNetaDConditionOutput, Tuple]:
'''simple docstring'''
if not isinstance(lowerCAmelCase__ , jnp.ndarray ):
_UpperCamelCase = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(lowerCAmelCase__ , jnp.ndarray ) and len(timesteps.shape ) == 0:
_UpperCamelCase = timesteps.astype(dtype=jnp.floataa )
_UpperCamelCase = jnp.expand_dims(lowerCAmelCase__ , 0 )
_UpperCamelCase = self.time_proj(lowerCAmelCase__ )
_UpperCamelCase = self.time_embedding(lowerCAmelCase__ )
# 2. pre-process
_UpperCamelCase = jnp.transpose(lowerCAmelCase__ , (0, 2, 3, 1) )
_UpperCamelCase = self.conv_in(lowerCAmelCase__ )
# 3. down
_UpperCamelCase = (sample,)
for down_block in self.down_blocks:
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCamelCase , _UpperCamelCase = down_block(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , deterministic=not train )
else:
_UpperCamelCase , _UpperCamelCase = down_block(lowerCAmelCase__ , lowerCAmelCase__ , deterministic=not train )
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
_UpperCamelCase = ()
for down_block_res_sample, down_block_additional_residual in zip(
lowerCAmelCase__ , lowerCAmelCase__ ):
down_block_res_sample += down_block_additional_residual
new_down_block_res_samples += (down_block_res_sample,)
_UpperCamelCase = new_down_block_res_samples
# 4. mid
_UpperCamelCase = self.mid_block(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , deterministic=not train )
if mid_block_additional_residual is not None:
sample += mid_block_additional_residual
# 5. up
for up_block in self.up_blocks:
_UpperCamelCase = down_block_res_samples[-(self.layers_per_block + 1) :]
_UpperCamelCase = down_block_res_samples[: -(self.layers_per_block + 1)]
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCamelCase = up_block(
lowerCAmelCase__ , temb=lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , res_hidden_states_tuple=lowerCAmelCase__ , deterministic=not train , )
else:
_UpperCamelCase = up_block(lowerCAmelCase__ , temb=lowerCAmelCase__ , res_hidden_states_tuple=lowerCAmelCase__ , deterministic=not train )
# 6. post-process
_UpperCamelCase = self.conv_norm_out(lowerCAmelCase__ )
_UpperCamelCase = nn.silu(lowerCAmelCase__ )
_UpperCamelCase = self.conv_out(lowerCAmelCase__ )
_UpperCamelCase = jnp.transpose(lowerCAmelCase__ , (0, 3, 1, 2) )
if not return_dict:
return (sample,)
return FlaxUNetaDConditionOutput(sample=lowerCAmelCase__ )
| 324 | 0 |
'''simple docstring'''
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
__UpperCAmelCase =os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
__UpperCAmelCase =" def __init__(self, config):\n super().__init__()\n self.transform = BertPredictionHeadTransform(config)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n\n # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`\n self.decoder.bias = self.bias\n\n def forward(self, hidden_states):\n hidden_states = self.transform(hidden_states)\n hidden_states = self.decoder(hidden_states)\n return hidden_states\n"
class a__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
__lowerCamelCase = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , '''models/bert/''' ) )
__lowerCamelCase = self.transformer_dir
shutil.copy(
os.path.join(a , '''src/transformers/models/bert/modeling_bert.py''' ) , os.path.join(self.transformer_dir , '''models/bert/modeling_bert.py''' ) , )
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
__lowerCamelCase = '''src/transformers'''
shutil.rmtree(self.transformer_dir )
def SCREAMING_SNAKE_CASE__ ( self : Any , a : List[Any] , a : Optional[Any] , a : List[str] , a : Tuple=None ):
"""simple docstring"""
__lowerCamelCase = comment + f"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
__lowerCamelCase = comment + f"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
__lowerCamelCase = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 )
__lowerCamelCase = black.format_str(a , mode=a )
__lowerCamelCase = os.path.join(self.transformer_dir , '''new_code.py''' )
with open(a , '''w''' , newline='''\n''' ) as f:
f.write(a )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(a ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=a )
with open(a , '''r''' ) as f:
self.assertTrue(f.read() , a )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
__lowerCamelCase = check_copies.find_code_in_transformers('''models.bert.modeling_bert.BertLMPredictionHead''' )
self.assertEqual(a , a )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead''' , '''BertLMPredictionHead''' , REFERENCE_CODE + '''\n''' , )
# With no empty line at the end
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead''' , '''BertLMPredictionHead''' , a , )
# Copy consistency with rename
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel''' , '''TestModelLMPredictionHead''' , re.sub('''Bert''' , '''TestModel''' , a ) , )
# Copy consistency with a really long name
__lowerCamelCase = '''TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'''
self.check_copy_consistency(
f"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}""" , f"""{long_class_name}LMPredictionHead""" , re.sub('''Bert''' , a , a ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel''' , '''TestModelLMPredictionHead''' , a , overwrite_result=re.sub('''Bert''' , '''TestModel''' , a ) , )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
__lowerCamelCase = check_copies.LOCALIZED_READMES['''README_zh-hans.md''']
__lowerCamelCase = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'''
''' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'''
''' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'''
''' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1.'''
''' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),'''
''' released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'''
''' lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same'''
''' method has been applied to compress GPT2 into'''
''' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'''
''' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'''
''' Multilingual BERT into'''
''' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'''
''' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**'''
''' (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders'''
''' as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang'''
''' Luong, Quoc V. Le, Christopher D. Manning.'''
)
__lowerCamelCase = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'''
''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'''
)
__lowerCamelCase = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'''
''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1.'''
''' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文'''
''' [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'''
''' lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same'''
''' method has been applied to compress GPT2 into'''
''' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'''
''' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'''
''' Multilingual BERT into'''
''' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'''
''' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自'''
''' Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather'''
''' than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,'''
''' Christopher D. Manning 发布。\n'''
)
__lowerCamelCase , __lowerCamelCase = check_copies.convert_to_localized_md(
a , a , localized_readme['''format_model_list'''] )
self.assertFalse(a )
self.assertEqual(a , a )
__lowerCamelCase , __lowerCamelCase = check_copies.convert_to_localized_md(
a , a , localized_readme['''format_model_list'''] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(a )
__lowerCamelCase = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'''
''' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'''
''' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'''
''' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.'''
)
__lowerCamelCase = (
'''1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and'''
''' the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'''
)
__lowerCamelCase = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'''
''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'''
)
__lowerCamelCase , __lowerCamelCase = check_copies.convert_to_localized_md(
a , a , localized_readme['''format_model_list'''] )
# Check if the model link is synchronized.
self.assertEqual(a , a )
| 237 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__UpperCAmelCase =logging.get_logger(__name__)
class a__ ( UpperCAmelCase__ ):
lowerCamelCase : Dict =["pixel_values"]
def __init__( self : List[str] , a : bool = True , a : Dict[str, int] = None , a : int = 0.9 , a : PILImageResampling = PILImageResampling.BICUBIC , a : bool = True , a : Dict[str, int] = None , a : Union[int, float] = 1 / 2_55 , a : bool = True , a : bool = True , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[float, List[float]]] = None , **a : Dict , ):
"""simple docstring"""
super().__init__(**a )
__lowerCamelCase = size if size is not None else {'''shortest_edge''': 2_24}
__lowerCamelCase = get_size_dict(a , default_to_square=a )
__lowerCamelCase = crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24}
__lowerCamelCase = get_size_dict(a , param_name='''crop_size''' )
__lowerCamelCase = do_resize
__lowerCamelCase = size
__lowerCamelCase = crop_pct
__lowerCamelCase = resample
__lowerCamelCase = do_center_crop
__lowerCamelCase = crop_size
__lowerCamelCase = do_rescale
__lowerCamelCase = rescale_factor
__lowerCamelCase = do_normalize
__lowerCamelCase = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
__lowerCamelCase = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def SCREAMING_SNAKE_CASE__ ( self : Any , a : np.ndarray , a : Dict[str, int] , a : Optional[float] = None , a : PILImageResampling = PILImageResampling.BICUBIC , a : Optional[Union[str, ChannelDimension]] = None , **a : List[str] , ):
"""simple docstring"""
__lowerCamelCase = get_size_dict(a , default_to_square=a )
if "shortest_edge" not in size and ("height" not in size or "width" not in size):
raise ValueError(f"""size must contain 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""" )
if crop_pct is not None:
if "shortest_edge" in size:
__lowerCamelCase = int(size['''shortest_edge'''] / crop_pct )
elif "height" in size and "width" in size:
if size["height"] == size["width"]:
__lowerCamelCase = int(size['''height'''] / crop_pct )
else:
__lowerCamelCase = (int(size['''height'''] / crop_pct ), int(size['''width'''] / crop_pct ))
else:
raise ValueError('''Invalid size for resize: {}'''.format(a ) )
__lowerCamelCase = get_resize_output_image_size(a , size=a , default_to_square=a )
else:
if "shortest_edge" in size:
__lowerCamelCase = get_resize_output_image_size(a , size=size['''shortest_edge'''] , default_to_square=a )
elif "height" in size and "width" in size:
__lowerCamelCase = (size['''height'''], size['''width'''])
else:
raise ValueError('''Invalid size for resize: {}'''.format(a ) )
return resize(a , size=a , resample=a , data_format=a , **a )
def SCREAMING_SNAKE_CASE__ ( self : Dict , a : np.ndarray , a : Dict[str, int] , a : Optional[Union[str, ChannelDimension]] = None , **a : Dict , ):
"""simple docstring"""
__lowerCamelCase = get_size_dict(a )
if "height" not in size or "width" not in size:
raise ValueError(f"""size must contain 'height' and 'width' as keys. Got {size.keys()}""" )
return center_crop(a , size=(size['''height'''], size['''width''']) , data_format=a , **a )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , a : np.ndarray , a : Union[int, float] , a : Optional[Union[str, ChannelDimension]] = None , **a : Union[str, Any] , ):
"""simple docstring"""
return rescale(a , scale=a , data_format=a , **a )
def SCREAMING_SNAKE_CASE__ ( self : List[str] , a : np.ndarray , a : Union[float, List[float]] , a : Union[float, List[float]] , a : Optional[Union[str, ChannelDimension]] = None , **a : Any , ):
"""simple docstring"""
return normalize(a , mean=a , std=a , data_format=a , **a )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , a : ImageInput , a : bool = None , a : Dict[str, int] = None , a : int = None , a : PILImageResampling = None , a : bool = None , a : Dict[str, int] = None , a : bool = None , a : float = None , a : bool = None , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[str, TensorType]] = None , a : ChannelDimension = ChannelDimension.FIRST , **a : Tuple , ):
"""simple docstring"""
__lowerCamelCase = do_resize if do_resize is not None else self.do_resize
__lowerCamelCase = crop_pct if crop_pct is not None else self.crop_pct
__lowerCamelCase = resample if resample is not None else self.resample
__lowerCamelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
__lowerCamelCase = do_rescale if do_rescale is not None else self.do_rescale
__lowerCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowerCamelCase = do_normalize if do_normalize is not None else self.do_normalize
__lowerCamelCase = image_mean if image_mean is not None else self.image_mean
__lowerCamelCase = image_std if image_std is not None else self.image_std
__lowerCamelCase = size if size is not None else self.size
__lowerCamelCase = get_size_dict(a , default_to_square=a )
__lowerCamelCase = crop_size if crop_size is not None else self.crop_size
__lowerCamelCase = get_size_dict(a , param_name='''crop_size''' )
__lowerCamelCase = make_list_of_images(a )
if not valid_images(a ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_center_crop and crop_pct is None:
raise ValueError('''Crop_pct must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
__lowerCamelCase = [to_numpy_array(a ) for image in images]
if do_resize:
__lowerCamelCase = [self.resize(image=a , size=a , crop_pct=a , resample=a ) for image in images]
if do_center_crop:
__lowerCamelCase = [self.center_crop(image=a , size=a ) for image in images]
if do_rescale:
__lowerCamelCase = [self.rescale(image=a , scale=a ) for image in images]
if do_normalize:
__lowerCamelCase = [self.normalize(image=a , mean=a , std=a ) for image in images]
__lowerCamelCase = [to_channel_dimension_format(a , a ) for image in images]
__lowerCamelCase = {'''pixel_values''': images}
return BatchFeature(data=a , tensor_type=a )
| 237 | 1 |
'''simple docstring'''
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
A =logging.get_logger(__name__)
class _a ( __a ):
__a : Dict = ["""pixel_values"""]
def __init__( self : int , lowercase : bool = True , lowercase : Union[int, float] = 1 / 255 , lowercase : bool = True , lowercase : int = 8 , **lowercase : Optional[int] , ):
'''simple docstring'''
super().__init__(**lowercase )
UpperCAmelCase = do_rescale
UpperCAmelCase = rescale_factor
UpperCAmelCase = do_pad
UpperCAmelCase = pad_size
def A ( self : Any , lowercase : np.ndarray , lowercase : float , lowercase : Optional[Union[str, ChannelDimension]] = None , **lowercase : str ):
'''simple docstring'''
return rescale(lowercase , scale=lowercase , data_format=lowercase , **lowercase )
def A ( self : Optional[int] , lowercase : np.ndarray , lowercase : int , lowercase : Optional[Union[str, ChannelDimension]] = None ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase = get_image_size(lowercase )
UpperCAmelCase = (old_height // size + 1) * size - old_height
UpperCAmelCase = (old_width // size + 1) * size - old_width
return pad(lowercase , ((0, pad_height), (0, pad_width)) , mode='''symmetric''' , data_format=lowercase )
def A ( self : Union[str, Any] , lowercase : ImageInput , lowercase : Optional[bool] = None , lowercase : Optional[float] = None , lowercase : Optional[bool] = None , lowercase : Optional[int] = None , lowercase : Optional[Union[str, TensorType]] = None , lowercase : Union[str, ChannelDimension] = ChannelDimension.FIRST , **lowercase : Optional[Any] , ):
'''simple docstring'''
UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase = do_pad if do_pad is not None else self.do_pad
UpperCAmelCase = pad_size if pad_size is not None else self.pad_size
UpperCAmelCase = make_list_of_images(lowercase )
if not valid_images(lowercase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
# All transformations expect numpy arrays.
UpperCAmelCase = [to_numpy_array(lowercase ) for image in images]
if do_rescale:
UpperCAmelCase = [self.rescale(image=lowercase , scale=lowercase ) for image in images]
if do_pad:
UpperCAmelCase = [self.pad(lowercase , size=lowercase ) for image in images]
UpperCAmelCase = [to_channel_dimension_format(lowercase , lowercase ) for image in images]
UpperCAmelCase = {'''pixel_values''': images}
return BatchFeature(data=lowercase , tensor_type=lowercase )
| 34 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
A =logging.getLogger(__name__)
def snake_case_ (_a : Dict , _a : Union[str, Any] ):
return (preds == labels).mean()
@dataclass
class _a :
__a : str = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
__a : Optional[str] = field(
default=__a , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
__a : Optional[str] = field(
default=__a , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
__a : Optional[str] = field(
default=__a , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class _a :
__a : str = field(metadata={"""help""": """The name of the task to train on: """ + """, """.join(processors.keys() )} )
__a : str = field(metadata={"""help""": """Should contain the data files for the task."""} )
__a : int = field(
default=128 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
__a : bool = field(
default=__a , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def snake_case_ ():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , _a )
# Set seed
set_seed(training_args.seed )
try:
UpperCAmelCase = processors[data_args.task_name]()
UpperCAmelCase = processor.get_labels()
UpperCAmelCase = len(_a )
except KeyError:
raise ValueError('''Task not found: %s''' % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCAmelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_a , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
UpperCAmelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
UpperCAmelCase = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=_a , cache_dir=model_args.cache_dir , )
# Get datasets
UpperCAmelCase = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=_a , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
UpperCAmelCase = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=_a , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(_a : EvalPrediction ) -> Dict:
UpperCAmelCase = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(_a , p.label_ids )}
# Data collator
UpperCAmelCase = DataCollatorWithPadding(_a , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
UpperCAmelCase = Trainer(
model=_a , args=_a , train_dataset=_a , eval_dataset=_a , compute_metrics=_a , data_collator=_a , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
UpperCAmelCase = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
UpperCAmelCase = trainer.evaluate()
UpperCAmelCase = os.path.join(training_args.output_dir , '''eval_results.txt''' )
if trainer.is_world_master():
with open(_a , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(''' %s = %s''' , _a , _a )
writer.write('''%s = %s\n''' % (key, value) )
results.update(_a )
return results
def snake_case_ (_a : Optional[int] ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 34 | 1 |
def __magic_name__ ( __a : int ):
'''simple docstring'''
return 1 if digit in (0, 1) else (digit * factorial(digit - 1 ))
def __magic_name__ ( __a : int ):
'''simple docstring'''
UpperCamelCase__ = 0
UpperCamelCase__ = number
while duplicate > 0:
UpperCamelCase__ , UpperCamelCase__ = divmod(__a , 10 )
fact_sum += factorial(__a )
return fact_sum == number
if __name__ == "__main__":
print('''Program to check whether a number is a Krisnamurthy Number or not.''')
lowerCamelCase_ = int(input('''Enter number: ''').strip())
print(
f'{number} is {"" if krishnamurthy(number) else "not "}a Krishnamurthy Number.'
)
| 359 |
import argparse
import collections
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def __magic_name__ ( __a : Dict , __a : Tuple , __a : int , __a : Dict="attention" ):
'''simple docstring'''
UpperCamelCase__ = params[f"{prefix}/layers_{i}/{layer_name}/key/kernel"]
UpperCamelCase__ = params[f"{prefix}/layers_{i}/{layer_name}/out/kernel"]
UpperCamelCase__ = params[f"{prefix}/layers_{i}/{layer_name}/query/kernel"]
UpperCamelCase__ = params[f"{prefix}/layers_{i}/{layer_name}/value/kernel"]
return k, o, q, v
def __magic_name__ ( __a : Dict , __a : Any , __a : Dict , __a : Optional[Any]=False ):
'''simple docstring'''
if split_mlp_wi:
UpperCamelCase__ = params[f"{prefix}/layers_{i}/mlp/wi_0/kernel"]
UpperCamelCase__ = params[f"{prefix}/layers_{i}/mlp/wi_1/kernel"]
UpperCamelCase__ = (wi_a, wi_a)
else:
UpperCamelCase__ = params[f"{prefix}/layers_{i}/mlp/wi/kernel"]
UpperCamelCase__ = params[f"{prefix}/layers_{i}/mlp/wo/kernel"]
return wi, wo
def __magic_name__ ( __a : Tuple , __a : Tuple , __a : Optional[int] , __a : Dict ):
'''simple docstring'''
return params[f"{prefix}/layers_{i}/{layer_name}/scale"]
def __magic_name__ ( __a : dict , *, __a : int , __a : bool ):
'''simple docstring'''
UpperCamelCase__ = traverse_util.flatten_dict(variables["""target"""] )
UpperCamelCase__ = {"""/""".join(__a ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
UpperCamelCase__ = """encoder/layers_0/mlp/wi_0/kernel""" in old
print("""Split MLP:""" , __a )
UpperCamelCase__ = collections.OrderedDict()
# Shared embeddings.
UpperCamelCase__ = old["""token_embedder/embedding"""]
# Encoder.
for i in range(__a ):
# Block i, layer 0 (Self Attention).
UpperCamelCase__ = tax_layer_norm_lookup(__a , __a , """encoder""" , """pre_attention_layer_norm""" )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = tax_attention_lookup(__a , __a , """encoder""" , """attention""" )
UpperCamelCase__ = layer_norm
UpperCamelCase__ = k.T
UpperCamelCase__ = o.T
UpperCamelCase__ = q.T
UpperCamelCase__ = v.T
# Block i, layer 1 (MLP).
UpperCamelCase__ = tax_layer_norm_lookup(__a , __a , """encoder""" , """pre_mlp_layer_norm""" )
UpperCamelCase__ , UpperCamelCase__ = tax_mlp_lookup(__a , __a , """encoder""" , __a )
UpperCamelCase__ = layer_norm
if split_mlp_wi:
UpperCamelCase__ = wi[0].T
UpperCamelCase__ = wi[1].T
else:
UpperCamelCase__ = wi.T
UpperCamelCase__ = wo.T
UpperCamelCase__ = old[
"""encoder/relpos_bias/rel_embedding"""
].T
UpperCamelCase__ = old["""encoder/encoder_norm/scale"""]
if not is_encoder_only:
# Decoder.
for i in range(__a ):
# Block i, layer 0 (Self Attention).
UpperCamelCase__ = tax_layer_norm_lookup(__a , __a , """decoder""" , """pre_self_attention_layer_norm""" )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = tax_attention_lookup(__a , __a , """decoder""" , """self_attention""" )
UpperCamelCase__ = layer_norm
UpperCamelCase__ = k.T
UpperCamelCase__ = o.T
UpperCamelCase__ = q.T
UpperCamelCase__ = v.T
# Block i, layer 1 (Cross Attention).
UpperCamelCase__ = tax_layer_norm_lookup(__a , __a , """decoder""" , """pre_cross_attention_layer_norm""" )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = tax_attention_lookup(__a , __a , """decoder""" , """encoder_decoder_attention""" )
UpperCamelCase__ = layer_norm
UpperCamelCase__ = k.T
UpperCamelCase__ = o.T
UpperCamelCase__ = q.T
UpperCamelCase__ = v.T
# Block i, layer 2 (MLP).
UpperCamelCase__ = tax_layer_norm_lookup(__a , __a , """decoder""" , """pre_mlp_layer_norm""" )
UpperCamelCase__ , UpperCamelCase__ = tax_mlp_lookup(__a , __a , """decoder""" , __a )
UpperCamelCase__ = layer_norm
if split_mlp_wi:
UpperCamelCase__ = wi[0].T
UpperCamelCase__ = wi[1].T
else:
UpperCamelCase__ = wi.T
UpperCamelCase__ = wo.T
UpperCamelCase__ = old["""decoder/decoder_norm/scale"""]
UpperCamelCase__ = old[
"""decoder/relpos_bias/rel_embedding"""
].T
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
UpperCamelCase__ = old["""decoder/logits_dense/kernel"""].T
return new
def __magic_name__ ( __a : List[Any] , __a : bool ):
'''simple docstring'''
UpperCamelCase__ = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
UpperCamelCase__ = state_dict["""shared.weight"""]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
UpperCamelCase__ = state_dict["""shared.weight"""]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("""Using shared word embeddings as lm_head.""" )
UpperCamelCase__ = state_dict["""shared.weight"""]
return state_dict
def __magic_name__ ( __a : Optional[int] , __a : Optional[int] , __a : int , __a : Dict ):
'''simple docstring'''
UpperCamelCase__ = checkpoints.load_tax_checkpoint(__a )
UpperCamelCase__ = convert_tax_to_pytorch(__a , num_layers=config.num_layers , is_encoder_only=__a )
UpperCamelCase__ = make_state_dict(__a , __a )
model.load_state_dict(__a , strict=__a )
def __magic_name__ ( __a : Optional[Any] , __a : Optional[Any] , __a : Any , __a : bool = False ):
'''simple docstring'''
UpperCamelCase__ = TaConfig.from_json_file(__a )
print(f"Building PyTorch model from configuration: {config}" )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
UpperCamelCase__ = TaEncoderModel(__a )
else:
UpperCamelCase__ = TaForConditionalGeneration(__a )
# Load weights from tf checkpoint
load_tax_weights_in_ta(__a , __a , __a , __a )
# Save pytorch-model
print(f"Save PyTorch model to {pytorch_dump_path}" )
model.save_pretrained(__a )
# Verify that we can load the checkpoint.
model.from_pretrained(__a )
print("""Done""" )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser(description='''Converts a native T5X checkpoint into a PyTorch checkpoint.''')
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path to the T5X checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--is_encoder_only''', action='''store_true''', help='''Check if the model is encoder-decoder model''', default=False
)
lowerCamelCase_ = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only
)
| 178 | 0 |
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class __snake_case ( lowerCamelCase_ , lowerCamelCase_ ):
@register_to_config
def __init__( self : List[str] , _lowercase : Dict = 1_28 , _lowercase : List[Any] = 2_56 , _lowercase : str = 20_00.0 , _lowercase : List[Any] = 7_68 , _lowercase : Union[str, Any] = 12 , _lowercase : Dict = 12 , _lowercase : Any = 64 , _lowercase : Union[str, Any] = 20_48 , _lowercase : List[str] = 0.1 , ):
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ = nn.Sequential(
nn.Linear(_a , d_model * 4 , bias=_a ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=_a ) , nn.SiLU() , )
SCREAMING_SNAKE_CASE__ = nn.Embedding(_a , _a )
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = nn.Linear(_a , _a , bias=_a )
SCREAMING_SNAKE_CASE__ = nn.Dropout(p=_a )
SCREAMING_SNAKE_CASE__ = nn.ModuleList()
for lyr_num in range(_a ):
# FiLM conditional T5 decoder
SCREAMING_SNAKE_CASE__ = DecoderLayer(d_model=_a , d_kv=_a , num_heads=_a , d_ff=_a , dropout_rate=_a )
self.decoders.append(_a )
SCREAMING_SNAKE_CASE__ = TaLayerNorm(_a )
SCREAMING_SNAKE_CASE__ = nn.Dropout(p=_a )
SCREAMING_SNAKE_CASE__ = nn.Linear(_a , _a , bias=_a )
def __a ( self : List[Any] , _lowercase : Dict , _lowercase : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def __a ( self : Optional[int] , _lowercase : str , _lowercase : Optional[int] , _lowercase : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
SCREAMING_SNAKE_CASE__ = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
SCREAMING_SNAKE_CASE__ = self.conditioning_emb(_a ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
SCREAMING_SNAKE_CASE__ = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
SCREAMING_SNAKE_CASE__ = torch.broadcast_to(
torch.arange(_a , device=decoder_input_tokens.device ) , (batch, seq_length) , )
SCREAMING_SNAKE_CASE__ = self.position_encoding(_a )
SCREAMING_SNAKE_CASE__ = self.continuous_inputs_projection(_a )
inputs += position_encodings
SCREAMING_SNAKE_CASE__ = self.dropout(_a )
# decoder: No padding present.
SCREAMING_SNAKE_CASE__ = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
SCREAMING_SNAKE_CASE__ = [(x, self.encoder_decoder_mask(_a , _a )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
SCREAMING_SNAKE_CASE__ = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
SCREAMING_SNAKE_CASE__ = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
SCREAMING_SNAKE_CASE__ = lyr(
_a , conditioning_emb=_a , encoder_hidden_states=_a , encoder_attention_mask=_a , )[0]
SCREAMING_SNAKE_CASE__ = self.decoder_norm(_a )
SCREAMING_SNAKE_CASE__ = self.post_dropout(_a )
SCREAMING_SNAKE_CASE__ = self.spec_out(_a )
return spec_out
class __snake_case ( nn.Module ):
def __init__( self : Tuple , _lowercase : Any , _lowercase : List[str] , _lowercase : Tuple , _lowercase : List[Any] , _lowercase : str , _lowercase : List[str]=1E-6 ):
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=_a , d_kv=_a , num_heads=_a , dropout_rate=_a ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=_a , d_kv=_a , num_heads=_a , dropout_rate=_a , layer_norm_epsilon=_a , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=_a , d_ff=_a , dropout_rate=_a , layer_norm_epsilon=_a ) )
def __a ( self : int , _lowercase : Optional[Any] , _lowercase : Union[str, Any]=None , _lowercase : Optional[int]=None , _lowercase : Dict=None , _lowercase : List[Any]=None , _lowercase : int=None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.layer[0](
_a , conditioning_emb=_a , attention_mask=_a , )
if encoder_hidden_states is not None:
SCREAMING_SNAKE_CASE__ = torch.where(encoder_attention_mask > 0 , 0 , -1E10 ).to(
encoder_hidden_states.dtype )
SCREAMING_SNAKE_CASE__ = self.layer[1](
_a , key_value_states=_a , attention_mask=_a , )
# Apply Film Conditional Feed Forward layer
SCREAMING_SNAKE_CASE__ = self.layer[-1](_a , _a )
return (hidden_states,)
class __snake_case ( nn.Module ):
def __init__( self : List[Any] , _lowercase : Any , _lowercase : Tuple , _lowercase : Dict , _lowercase : int ):
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ = TaLayerNorm(_a )
SCREAMING_SNAKE_CASE__ = TaFiLMLayer(in_features=d_model * 4 , out_features=_a )
SCREAMING_SNAKE_CASE__ = Attention(query_dim=_a , heads=_a , dim_head=_a , out_bias=_a , scale_qk=_a )
SCREAMING_SNAKE_CASE__ = nn.Dropout(_a )
def __a ( self : Dict , _lowercase : Any , _lowercase : Optional[int]=None , _lowercase : Optional[int]=None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.layer_norm(_a )
if conditioning_emb is not None:
SCREAMING_SNAKE_CASE__ = self.FiLMLayer(_a , _a )
# Self-attention block
SCREAMING_SNAKE_CASE__ = self.attention(_a )
SCREAMING_SNAKE_CASE__ = hidden_states + self.dropout(_a )
return hidden_states
class __snake_case ( nn.Module ):
def __init__( self : Any , _lowercase : Optional[int] , _lowercase : Tuple , _lowercase : Union[str, Any] , _lowercase : Union[str, Any] , _lowercase : Dict ):
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ = Attention(query_dim=_a , heads=_a , dim_head=_a , out_bias=_a , scale_qk=_a )
SCREAMING_SNAKE_CASE__ = TaLayerNorm(_a , eps=_a )
SCREAMING_SNAKE_CASE__ = nn.Dropout(_a )
def __a ( self : Tuple , _lowercase : Dict , _lowercase : List[Any]=None , _lowercase : Any=None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.layer_norm(_a )
SCREAMING_SNAKE_CASE__ = self.attention(
_a , encoder_hidden_states=_a , attention_mask=attention_mask.squeeze(1 ) , )
SCREAMING_SNAKE_CASE__ = hidden_states + self.dropout(_a )
return layer_output
class __snake_case ( nn.Module ):
def __init__( self : List[str] , _lowercase : int , _lowercase : int , _lowercase : str , _lowercase : List[Any] ):
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ = TaDenseGatedActDense(d_model=_a , d_ff=_a , dropout_rate=_a )
SCREAMING_SNAKE_CASE__ = TaFiLMLayer(in_features=d_model * 4 , out_features=_a )
SCREAMING_SNAKE_CASE__ = TaLayerNorm(_a , eps=_a )
SCREAMING_SNAKE_CASE__ = nn.Dropout(_a )
def __a ( self : Tuple , _lowercase : Union[str, Any] , _lowercase : int=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.layer_norm(_a )
if conditioning_emb is not None:
SCREAMING_SNAKE_CASE__ = self.film(_a , _a )
SCREAMING_SNAKE_CASE__ = self.DenseReluDense(_a )
SCREAMING_SNAKE_CASE__ = hidden_states + self.dropout(_a )
return hidden_states
class __snake_case ( nn.Module ):
def __init__( self : Union[str, Any] , _lowercase : Optional[int] , _lowercase : Optional[int] , _lowercase : Tuple ):
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ = nn.Linear(_a , _a , bias=_a )
SCREAMING_SNAKE_CASE__ = nn.Linear(_a , _a , bias=_a )
SCREAMING_SNAKE_CASE__ = nn.Linear(_a , _a , bias=_a )
SCREAMING_SNAKE_CASE__ = nn.Dropout(_a )
SCREAMING_SNAKE_CASE__ = NewGELUActivation()
def __a ( self : str , _lowercase : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.act(self.wi_a(_a ) )
SCREAMING_SNAKE_CASE__ = self.wi_a(_a )
SCREAMING_SNAKE_CASE__ = hidden_gelu * hidden_linear
SCREAMING_SNAKE_CASE__ = self.dropout(_a )
SCREAMING_SNAKE_CASE__ = self.wo(_a )
return hidden_states
class __snake_case ( nn.Module ):
def __init__( self : List[Any] , _lowercase : Any , _lowercase : List[str]=1E-6 ):
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ = nn.Parameter(torch.ones(_a ) )
SCREAMING_SNAKE_CASE__ = eps
def __a ( self : Optional[int] , _lowercase : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=_a )
SCREAMING_SNAKE_CASE__ = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
SCREAMING_SNAKE_CASE__ = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class __snake_case ( nn.Module ):
def __a ( self : Any , _lowercase : str ):
"""simple docstring"""
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.04_47_15 * torch.pow(_a , 3.0 )) ))
class __snake_case ( nn.Module ):
def __init__( self : List[Any] , _lowercase : Any , _lowercase : List[str] ):
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ = nn.Linear(_a , out_features * 2 , bias=_a )
def __a ( self : Union[str, Any] , _lowercase : Any , _lowercase : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.scale_bias(_a )
SCREAMING_SNAKE_CASE__ = torch.chunk(_a , 2 , -1 )
SCREAMING_SNAKE_CASE__ = x * (1 + scale) + shift
return x
| 219 |
import math
def lowerCAmelCase_ ( _snake_case : float , _snake_case : float ) -> float:
'''simple docstring'''
return math.pow(_snake_case , 2 ) - a
def lowerCAmelCase_ ( _snake_case : float ) -> float:
'''simple docstring'''
return 2 * x
def lowerCAmelCase_ ( _snake_case : float ) -> float:
'''simple docstring'''
__magic_name__ : Optional[int] = 2.0
while start <= a:
__magic_name__ : str = math.pow(_snake_case , 2 )
return start
def lowerCAmelCase_ ( _snake_case : float , _snake_case : int = 9999 , _snake_case : float = 0.00_000_000_000_001 ) -> float:
'''simple docstring'''
if a < 0:
raise ValueError("math domain error" )
__magic_name__ : Optional[int] = get_initial_point(_snake_case )
for _ in range(_snake_case ):
__magic_name__ : int = value
__magic_name__ : str = value - fx(_snake_case , _snake_case ) / fx_derivative(_snake_case )
if abs(prev_value - value ) < tolerance:
return value
return value
if __name__ == "__main__":
from doctest import testmod
testmod()
| 281 | 0 |
'''simple docstring'''
from ... import PretrainedConfig
lowerCAmelCase : List[str] = {
'sijunhe/nezha-cn-base': 'https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json',
}
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
lowerCAmelCase_ = """nezha"""
def __init__( self , A_=21128 , A_=768 , A_=12 , A_=12 , A_=3072 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=64 , A_=2 , A_=0.02 , A_=1e-12 , A_=0.1 , A_=0 , A_=2 , A_=3 , A_=True , **A_ , )-> List[str]:
'''simple docstring'''
super().__init__(pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , **A_ )
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = hidden_act
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = max_relative_position
UpperCamelCase = type_vocab_size
UpperCamelCase = initializer_range
UpperCamelCase = layer_norm_eps
UpperCamelCase = classifier_dropout
UpperCamelCase = use_cache
| 365 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase : Tuple = {
'configuration_instructblip': [
'INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'InstructBlipConfig',
'InstructBlipQFormerConfig',
'InstructBlipVisionConfig',
],
'processing_instructblip': ['InstructBlipProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : List[Any] = [
'INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'InstructBlipQFormerModel',
'InstructBlipPreTrainedModel',
'InstructBlipForConditionalGeneration',
'InstructBlipVisionModel',
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
lowerCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 251 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : Union[str, Any] = logging.get_logger(__name__)
lowercase__ : Optional[Any] = {
'tiiuae/falcon-40b': 'https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json',
'tiiuae/falcon-7b': 'https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json',
}
class __lowerCAmelCase ( __lowerCamelCase ):
"""simple docstring"""
_snake_case : Optional[Any] = 'falcon'
_snake_case : Union[str, Any] = ['past_key_values']
def __init__( self : List[Any] , lowerCAmelCase__ : Optional[Any]=65024 , lowerCAmelCase__ : str=4544 , lowerCAmelCase__ : Optional[int]=32 , lowerCAmelCase__ : str=71 , lowerCAmelCase__ : Tuple=1e-5 , lowerCAmelCase__ : List[Any]=0.02 , lowerCAmelCase__ : str=True , lowerCAmelCase__ : List[str]=0.0 , lowerCAmelCase__ : Union[str, Any]=0.0 , lowerCAmelCase__ : str=None , lowerCAmelCase__ : List[str]=False , lowerCAmelCase__ : Optional[int]=False , lowerCAmelCase__ : Union[str, Any]=True , lowerCAmelCase__ : Union[str, Any]=True , lowerCAmelCase__ : int=False , lowerCAmelCase__ : Any=11 , lowerCAmelCase__ : Union[str, Any]=11 , **lowerCAmelCase__ : Any , ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = vocab_size
# Backward compatibility with n_embed kwarg
_UpperCamelCase = kwargs.pop('''n_embed''' , SCREAMING_SNAKE_CASE_ )
_UpperCamelCase = hidden_size if n_embed is None else n_embed
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = layer_norm_epsilon
_UpperCamelCase = initializer_range
_UpperCamelCase = use_cache
_UpperCamelCase = hidden_dropout
_UpperCamelCase = attention_dropout
_UpperCamelCase = bos_token_id
_UpperCamelCase = eos_token_id
_UpperCamelCase = num_attention_heads if num_kv_heads is None else num_kv_heads
_UpperCamelCase = alibi
_UpperCamelCase = new_decoder_architecture
_UpperCamelCase = multi_query # Ignored when new_decoder_architecture is True
_UpperCamelCase = parallel_attn
_UpperCamelCase = bias
super().__init__(bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
@property
def snake_case__ ( self : str ) -> Optional[int]:
'''simple docstring'''
return self.hidden_size // self.num_attention_heads
@property
def snake_case__ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
return not self.alibi
| 324 |
import torch
from diffusers import StableDiffusionPipeline
lowerCamelCase_ = '''path-to-your-trained-model'''
lowerCamelCase_ = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to('''cuda''')
lowerCamelCase_ = '''A photo of sks dog in a bucket'''
lowerCamelCase_ = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save('''dog-bucket.png''')
| 244 | 0 |
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
_lowerCamelCase : List[str] = '''0.12''' # assumed parallelism: 8
if is_torch_available():
import torch
def _a ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int=None ) -> str:
'''simple docstring'''
if rng is None:
SCREAMING_SNAKE_CASE__ : Optional[int] = random.Random()
SCREAMING_SNAKE_CASE__ : List[str] = 1
for dim in shape:
total_dims *= dim
SCREAMING_SNAKE_CASE__ : Union[str, Any] = []
for _ in range(SCREAMING_SNAKE_CASE__ ):
values.append(rng.randint(0 , vocab_size - 1 ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = np.array(SCREAMING_SNAKE_CASE__ , dtype=jnp.intaa ).reshape(SCREAMING_SNAKE_CASE__ )
return output
def _a ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str=None ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = ids_tensor(SCREAMING_SNAKE_CASE__ , vocab_size=2 , rng=SCREAMING_SNAKE_CASE__ )
# make sure that at least one token is attended to for each batch
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 1
return attn_mask
@require_flax
class lowerCamelCase :
"""simple docstring"""
UpperCAmelCase_ = None
UpperCAmelCase_ = ()
def A_ ( self : str ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
SCREAMING_SNAKE_CASE__ : List[Any] = 2
SCREAMING_SNAKE_CASE__ : Union[str, Any] = inputs["input_ids"].shape[-1] // 2
SCREAMING_SNAKE_CASE__ : Union[str, Any] = inputs["input_ids"][:max_batch_size, :sequence_length]
SCREAMING_SNAKE_CASE__ : Optional[int] = jnp.ones_like(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Any = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
SCREAMING_SNAKE_CASE__ : Union[str, Any] = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
SCREAMING_SNAKE_CASE__ : str = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def A_ ( self : Dict ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Optional[Any] = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = False
SCREAMING_SNAKE_CASE__ : Any = max_length
SCREAMING_SNAKE_CASE__ : Optional[Any] = 0
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE__ : Optional[Any] = model_class(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Any = model_class.__name__[4:] # Skip the "Flax" at the beginning
SCREAMING_SNAKE_CASE__ : Union[str, Any] = getattr(_UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = pt_model_class(_UpperCAmelCase ).eval()
SCREAMING_SNAKE_CASE__ : Tuple = load_flax_weights_in_pytorch_model(_UpperCAmelCase, flax_model.params )
SCREAMING_SNAKE_CASE__ : Optional[Any] = flax_model.generate(_UpperCAmelCase ).sequences
SCREAMING_SNAKE_CASE__ : List[str] = pt_model.generate(torch.tensor(_UpperCAmelCase, dtype=torch.long ) )
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
SCREAMING_SNAKE_CASE__ : Dict = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist(), flax_generation_outputs.tolist() )
def A_ ( self : Tuple ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Optional[Any] = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE__ : Tuple = False
SCREAMING_SNAKE_CASE__ : Dict = max_length
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE__ : Optional[Any] = model_class(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : str = model.generate(_UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1], _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Any = jit(model.generate )
SCREAMING_SNAKE_CASE__ : Tuple = jit_generate(_UpperCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist() )
def A_ ( self : List[str] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : str = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE__ : Optional[Any] = True
SCREAMING_SNAKE_CASE__ : Union[str, Any] = max_length
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE__ : List[str] = model_class(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = model.generate(_UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1], _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[str] = jit(model.generate )
SCREAMING_SNAKE_CASE__ : Dict = jit_generate(_UpperCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist() )
def A_ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Union[str, Any] = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE__ : Any = False
SCREAMING_SNAKE_CASE__ : Union[str, Any] = max_length
SCREAMING_SNAKE_CASE__ : Optional[Any] = 2
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE__ : Optional[int] = model_class(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = model.generate(_UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1], _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = jit(model.generate )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = jit_generate(_UpperCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist() )
def A_ ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Optional[Any] = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE__ : Any = False
SCREAMING_SNAKE_CASE__ : List[str] = max_length
SCREAMING_SNAKE_CASE__ : List[Any] = 2
SCREAMING_SNAKE_CASE__ : Any = 2
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model_class(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model.generate(_UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[0], input_ids.shape[0] * config.num_return_sequences )
def A_ ( self : int ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : str = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE__ : int = True
SCREAMING_SNAKE_CASE__ : Optional[Any] = max_length
SCREAMING_SNAKE_CASE__ : List[Any] = 0.8
SCREAMING_SNAKE_CASE__ : Tuple = 1_0
SCREAMING_SNAKE_CASE__ : Dict = 0.3
SCREAMING_SNAKE_CASE__ : Any = 1
SCREAMING_SNAKE_CASE__ : int = 8
SCREAMING_SNAKE_CASE__ : Dict = 9
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE__ : Dict = model_class(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : int = model.generate(_UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1], _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = jit(model.generate )
SCREAMING_SNAKE_CASE__ : Optional[int] = jit_generate(_UpperCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist() )
def A_ ( self : str ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Optional[int] = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE__ : str = max_length
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 1
SCREAMING_SNAKE_CASE__ : Dict = 8
SCREAMING_SNAKE_CASE__ : str = 9
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE__ : Optional[int] = model_class(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Tuple = model.generate(_UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1], _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = jit(model.generate )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = jit_generate(_UpperCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist() )
def A_ ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Optional[Any] = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE__ : Tuple = max_length
SCREAMING_SNAKE_CASE__ : List[str] = 2
SCREAMING_SNAKE_CASE__ : Optional[int] = 1
SCREAMING_SNAKE_CASE__ : List[str] = 8
SCREAMING_SNAKE_CASE__ : str = 9
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE__ : str = model_class(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Dict = model.generate(_UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1], _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = jit(model.generate )
SCREAMING_SNAKE_CASE__ : Tuple = jit_generate(_UpperCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist() )
def A_ ( self : Tuple ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : int = self._get_input_ids_and_config()
# pad attention mask on the left
SCREAMING_SNAKE_CASE__ : Tuple = attention_mask.at[(0, 0)].set(0 )
SCREAMING_SNAKE_CASE__ : Tuple = False
SCREAMING_SNAKE_CASE__ : Any = max_length
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE__ : int = model_class(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : int = model.generate(_UpperCAmelCase, attention_mask=_UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1], _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : str = jit(model.generate )
SCREAMING_SNAKE_CASE__ : int = jit_generate(_UpperCAmelCase, attention_mask=_UpperCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist() )
def A_ ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : List[str] = self._get_input_ids_and_config()
# pad attention mask on the left
SCREAMING_SNAKE_CASE__ : Dict = attention_mask.at[(0, 0)].set(0 )
SCREAMING_SNAKE_CASE__ : List[Any] = True
SCREAMING_SNAKE_CASE__ : List[Any] = max_length
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE__ : List[str] = model_class(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = model.generate(_UpperCAmelCase, attention_mask=_UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1], _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = jit(model.generate )
SCREAMING_SNAKE_CASE__ : Optional[int] = jit_generate(_UpperCAmelCase, attention_mask=_UpperCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist() )
def A_ ( self : Any ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : List[Any] = self._get_input_ids_and_config()
# pad attention mask on the left
SCREAMING_SNAKE_CASE__ : Tuple = attention_mask.at[(0, 0)].set(0 )
SCREAMING_SNAKE_CASE__ : List[Any] = 2
SCREAMING_SNAKE_CASE__ : Optional[int] = max_length
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE__ : Optional[int] = model_class(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Tuple = model.generate(_UpperCAmelCase, attention_mask=_UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1], _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : str = jit(model.generate )
SCREAMING_SNAKE_CASE__ : Any = jit_generate(_UpperCAmelCase, attention_mask=_UpperCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist() )
@require_flax
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
def A_ ( self : Dict ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-bert" )
SCREAMING_SNAKE_CASE__ : Optional[int] = FlaxAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-bert-flax-only" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = "Hello world"
SCREAMING_SNAKE_CASE__ : Dict = tokenizer(_UpperCAmelCase, return_tensors="np" ).input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(_UpperCAmelCase, "do_samples" ):
model.generate(_UpperCAmelCase, do_samples=_UpperCAmelCase )
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(_UpperCAmelCase, "foo" ):
SCREAMING_SNAKE_CASE__ : int = {"foo": "bar"}
model.generate(_UpperCAmelCase, **_UpperCAmelCase )
| 191 |
from __future__ import annotations
def _a ( SCREAMING_SNAKE_CASE__ : list[float] , SCREAMING_SNAKE_CASE__ : list[float] ) -> float:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = sorted(numsa + numsa )
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Dict = divmod(len(SCREAMING_SNAKE_CASE__ ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCamelCase : List[str] = [float(x) for x in input('''Enter the elements of first array: ''').split()]
_lowerCamelCase : Any = [float(x) for x in input('''Enter the elements of second array: ''').split()]
print(f"The median of two arrays is: {median_of_two_arrays(array_a, array_a)}")
| 191 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_snake_case = {
'''configuration_longformer''': [
'''LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''LongformerConfig''',
'''LongformerOnnxConfig''',
],
'''tokenization_longformer''': ['''LongformerTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ['''LongformerTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'''LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LongformerForMaskedLM''',
'''LongformerForMultipleChoice''',
'''LongformerForQuestionAnswering''',
'''LongformerForSequenceClassification''',
'''LongformerForTokenClassification''',
'''LongformerModel''',
'''LongformerPreTrainedModel''',
'''LongformerSelfAttention''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'''TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFLongformerForMaskedLM''',
'''TFLongformerForMultipleChoice''',
'''TFLongformerForQuestionAnswering''',
'''TFLongformerForSequenceClassification''',
'''TFLongformerForTokenClassification''',
'''TFLongformerModel''',
'''TFLongformerPreTrainedModel''',
'''TFLongformerSelfAttention''',
]
if TYPE_CHECKING:
from .configuration_longformer import (
LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
LongformerConfig,
LongformerOnnxConfig,
)
from .tokenization_longformer import LongformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_longformer_fast import LongformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longformer import (
LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
LongformerForMaskedLM,
LongformerForMultipleChoice,
LongformerForQuestionAnswering,
LongformerForSequenceClassification,
LongformerForTokenClassification,
LongformerModel,
LongformerPreTrainedModel,
LongformerSelfAttention,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_longformer import (
TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLongformerForMaskedLM,
TFLongformerForMultipleChoice,
TFLongformerForQuestionAnswering,
TFLongformerForSequenceClassification,
TFLongformerForTokenClassification,
TFLongformerModel,
TFLongformerPreTrainedModel,
TFLongformerSelfAttention,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 157 |
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
_snake_case = logging.getLogger(__name__)
_snake_case = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
_snake_case = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _snake_case :
lowerCamelCase__: Optional[str] = field(
default=_lowercase , metadata={
"help": (
"The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."
)
} , )
lowerCamelCase__: Optional[str] = field(
default=_lowercase , metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(_lowercase )} , )
lowerCamelCase__: Optional[str] = field(
default=_lowercase , metadata={
"help": (
"Override some existing default config settings when a model is trained from scratch. Example: "
"n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"
)
} , )
lowerCamelCase__: Optional[str] = field(
default=_lowercase , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
lowerCamelCase__: Optional[str] = field(
default=_lowercase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
lowerCamelCase__: Optional[str] = field(
default=_lowercase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
lowerCamelCase__: bool = field(
default=_lowercase , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , )
lowerCamelCase__: str = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
lowerCamelCase__: bool = field(
default=_lowercase , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
def _lowerCamelCase ( self: str ) -> Tuple:
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
"--config_overrides can't be used in combination with --config_name or --model_name_or_path" )
@dataclass
class _snake_case :
lowerCamelCase__: Optional[str] = field(
default=_lowercase , metadata={"help": "The name of the dataset to use (via the datasets library)."} )
lowerCamelCase__: Optional[str] = field(
default=_lowercase , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
lowerCamelCase__: Optional[str] = field(default=_lowercase , metadata={"help": "The input training data file (a text file)."} )
lowerCamelCase__: Optional[str] = field(
default=_lowercase , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , )
lowerCamelCase__: Optional[str] = field(
default=_lowercase , metadata={"help": "An optional input train ref data file for whole word masking in Chinese."} , )
lowerCamelCase__: Optional[str] = field(
default=_lowercase , metadata={"help": "An optional input validation ref data file for whole word masking in Chinese."} , )
lowerCamelCase__: bool = field(
default=_lowercase , metadata={"help": "Overwrite the cached training and evaluation sets"} )
lowerCamelCase__: Optional[int] = field(
default=5 , metadata={
"help": "The percentage of the train set used as validation set in case there's no validation split"
} , )
lowerCamelCase__: Optional[int] = field(
default=_lowercase , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated. Default to the max input length of the model."
)
} , )
lowerCamelCase__: Optional[int] = field(
default=_lowercase , metadata={"help": "The number of processes to use for the preprocessing."} , )
lowerCamelCase__: float = field(
default=0.15 , metadata={"help": "Ratio of tokens to mask for masked language modeling loss"} )
lowerCamelCase__: bool = field(
default=_lowercase , metadata={
"help": (
"Whether to pad all samples to `max_seq_length`. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch."
)
} , )
def _lowerCamelCase ( self: Any ) -> Tuple:
if self.train_file is not None:
__UpperCAmelCase : Optional[int] = self.train_file.split("." )[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
__UpperCAmelCase : str = self.validation_file.split("." )[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def _UpperCamelCase ( snake_case__, snake_case__ ) -> Optional[int]:
with open(snake_case__, "r", encoding="utf-8" ) as f:
__UpperCAmelCase : List[str] = [json.loads(snake_case__ ) for line in f.read().splitlines() if (len(snake_case__ ) > 0 and not line.isspace())]
assert len(snake_case__ ) == len(snake_case__ )
__UpperCAmelCase : Optional[int] = {c: dataset[c] for c in dataset.column_names}
__UpperCAmelCase : Any = refs
return Dataset.from_dict(snake_case__ )
def _UpperCamelCase ( ) -> str:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__UpperCAmelCase : Optional[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : str = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Any = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
__UpperCAmelCase : int = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__UpperCAmelCase : List[Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None:
logger.info(
f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", handlers=[logging.StreamHandler(sys.stdout )], )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s", snake_case__ )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__UpperCAmelCase : Optional[Any] = load_dataset(data_args.dataset_name, data_args.dataset_config_name )
if "validation" not in datasets.keys():
__UpperCAmelCase : Dict = load_dataset(
data_args.dataset_name, data_args.dataset_config_name, split=f'''train[:{data_args.validation_split_percentage}%]''', )
__UpperCAmelCase : List[str] = load_dataset(
data_args.dataset_name, data_args.dataset_config_name, split=f'''train[{data_args.validation_split_percentage}%:]''', )
else:
__UpperCAmelCase : List[Any] = {}
if data_args.train_file is not None:
__UpperCAmelCase : Optional[int] = data_args.train_file
if data_args.validation_file is not None:
__UpperCAmelCase : List[str] = data_args.validation_file
__UpperCAmelCase : Tuple = data_args.train_file.split("." )[-1]
if extension == "txt":
__UpperCAmelCase : str = "text"
__UpperCAmelCase : List[Any] = load_dataset(snake_case__, data_files=snake_case__ )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__UpperCAmelCase : Tuple = {
"cache_dir": model_args.cache_dir,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.config_name:
__UpperCAmelCase : Any = AutoConfig.from_pretrained(model_args.config_name, **snake_case__ )
elif model_args.model_name_or_path:
__UpperCAmelCase : int = AutoConfig.from_pretrained(model_args.model_name_or_path, **snake_case__ )
else:
__UpperCAmelCase : str = CONFIG_MAPPING[model_args.model_type]()
logger.warning("You are instantiating a new config instance from scratch." )
if model_args.config_overrides is not None:
logger.info(f'''Overriding config: {model_args.config_overrides}''' )
config.update_from_string(model_args.config_overrides )
logger.info(f'''New config: {config}''' )
__UpperCAmelCase : List[Any] = {
"cache_dir": model_args.cache_dir,
"use_fast": model_args.use_fast_tokenizer,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
__UpperCAmelCase : List[str] = AutoTokenizer.from_pretrained(model_args.tokenizer_name, **snake_case__ )
elif model_args.model_name_or_path:
__UpperCAmelCase : Optional[int] = AutoTokenizer.from_pretrained(model_args.model_name_or_path, **snake_case__ )
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported by this script."
"You can do it from another script, save it, and load it from here, using --tokenizer_name." )
if model_args.model_name_or_path:
__UpperCAmelCase : int = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path, from_tf=bool(".ckpt" in model_args.model_name_or_path ), config=snake_case__, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
else:
logger.info("Training new model from scratch" )
__UpperCAmelCase : Any = AutoModelForMaskedLM.from_config(snake_case__ )
model.resize_token_embeddings(len(snake_case__ ) )
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
__UpperCAmelCase : List[str] = datasets["train"].column_names
else:
__UpperCAmelCase : Union[str, Any] = datasets["validation"].column_names
__UpperCAmelCase : Union[str, Any] = "text" if "text" in column_names else column_names[0]
__UpperCAmelCase : Any = "max_length" if data_args.pad_to_max_length else False
def tokenize_function(snake_case__ ):
# Remove empty lines
__UpperCAmelCase : Any = [line for line in examples["text"] if len(snake_case__ ) > 0 and not line.isspace()]
return tokenizer(examples["text"], padding=snake_case__, truncation=snake_case__, max_length=data_args.max_seq_length )
__UpperCAmelCase : List[str] = datasets.map(
snake_case__, batched=snake_case__, num_proc=data_args.preprocessing_num_workers, remove_columns=[text_column_name], load_from_cache_file=not data_args.overwrite_cache, )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
__UpperCAmelCase : str = add_chinese_references(tokenized_datasets["train"], data_args.train_ref_file )
if data_args.validation_ref_file is not None:
__UpperCAmelCase : List[str] = add_chinese_references(
tokenized_datasets["validation"], data_args.validation_ref_file )
# If we have ref files, need to avoid it removed by trainer
__UpperCAmelCase : List[str] = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
__UpperCAmelCase : Tuple = False
# Data collator
# This one will take care of randomly masking the tokens.
__UpperCAmelCase : Optional[Any] = DataCollatorForWholeWordMask(tokenizer=snake_case__, mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
__UpperCAmelCase : str = Trainer(
model=snake_case__, args=snake_case__, train_dataset=tokenized_datasets["train"] if training_args.do_train else None, eval_dataset=tokenized_datasets["validation"] if training_args.do_eval else None, tokenizer=snake_case__, data_collator=snake_case__, )
# Training
if training_args.do_train:
if last_checkpoint is not None:
__UpperCAmelCase : int = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ):
__UpperCAmelCase : Any = model_args.model_name_or_path
else:
__UpperCAmelCase : Tuple = None
__UpperCAmelCase : str = trainer.train(resume_from_checkpoint=snake_case__ )
trainer.save_model() # Saves the tokenizer too for easy upload
__UpperCAmelCase : str = os.path.join(training_args.output_dir, "train_results.txt" )
if trainer.is_world_process_zero():
with open(snake_case__, "w" ) as writer:
logger.info("***** Train results *****" )
for key, value in sorted(train_result.metrics.items() ):
logger.info(f''' {key} = {value}''' )
writer.write(f'''{key} = {value}\n''' )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir, "trainer_state.json" ) )
# Evaluation
__UpperCAmelCase : Dict = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
__UpperCAmelCase : List[Any] = trainer.evaluate()
__UpperCAmelCase : int = math.exp(eval_output["eval_loss"] )
__UpperCAmelCase : Union[str, Any] = perplexity
__UpperCAmelCase : List[Any] = os.path.join(training_args.output_dir, "eval_results_mlm_wwm.txt" )
if trainer.is_world_process_zero():
with open(snake_case__, "w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in sorted(results.items() ):
logger.info(f''' {key} = {value}''' )
writer.write(f'''{key} = {value}\n''' )
return results
def _UpperCamelCase ( snake_case__ ) -> Dict:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 157 | 1 |
import argparse
import collections
import os
import re
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/update_metadata.py
lowerCamelCase : Dict = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
lowerCamelCase : Tuple = direct_transformers_import(TRANSFORMERS_PATH)
# Regexes that match TF/Flax/PT model names.
lowerCamelCase : Tuple = re.compile(r'TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
lowerCamelCase : Optional[int] = re.compile(r'Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
lowerCamelCase : int = re.compile(r'(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Fill this with tuples (pipeline_tag, model_mapping, auto_model)
lowerCamelCase : List[str] = [
('pretraining', 'MODEL_FOR_PRETRAINING_MAPPING_NAMES', 'AutoModelForPreTraining'),
('feature-extraction', 'MODEL_MAPPING_NAMES', 'AutoModel'),
('audio-classification', 'MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForAudioClassification'),
('text-generation', 'MODEL_FOR_CAUSAL_LM_MAPPING_NAMES', 'AutoModelForCausalLM'),
('automatic-speech-recognition', 'MODEL_FOR_CTC_MAPPING_NAMES', 'AutoModelForCTC'),
('image-classification', 'MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForImageClassification'),
('image-segmentation', 'MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES', 'AutoModelForImageSegmentation'),
('fill-mask', 'MODEL_FOR_MASKED_LM_MAPPING_NAMES', 'AutoModelForMaskedLM'),
('object-detection', 'MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES', 'AutoModelForObjectDetection'),
(
'zero-shot-object-detection',
'MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES',
'AutoModelForZeroShotObjectDetection',
),
('question-answering', 'MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES', 'AutoModelForQuestionAnswering'),
('text2text-generation', 'MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES', 'AutoModelForSeq2SeqLM'),
('text-classification', 'MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForSequenceClassification'),
('automatic-speech-recognition', 'MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES', 'AutoModelForSpeechSeq2Seq'),
(
'table-question-answering',
'MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForTableQuestionAnswering',
),
('token-classification', 'MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForTokenClassification'),
('multiple-choice', 'MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES', 'AutoModelForMultipleChoice'),
(
'next-sentence-prediction',
'MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES',
'AutoModelForNextSentencePrediction',
),
(
'audio-frame-classification',
'MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES',
'AutoModelForAudioFrameClassification',
),
('audio-xvector', 'MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES', 'AutoModelForAudioXVector'),
(
'document-question-answering',
'MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForDocumentQuestionAnswering',
),
(
'visual-question-answering',
'MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForVisualQuestionAnswering',
),
('image-to-text', 'MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES', 'AutoModelForVision2Seq'),
(
'zero-shot-image-classification',
'MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES',
'AutoModelForZeroShotImageClassification',
),
('depth-estimation', 'MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES', 'AutoModelForDepthEstimation'),
('video-classification', 'MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForVideoClassification'),
('mask-generation', 'MODEL_FOR_MASK_GENERATION_MAPPING_NAMES', 'AutoModelForMaskGeneration'),
]
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Optional[int]:
snake_case : Optional[int] = re.finditer(""".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)""" ,lowercase )
return [m.group(0 ) for m in matches]
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[int]:
snake_case : List[str] = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
snake_case : Any = {
config.replace("""Config""" ,"""""" ): model_type for model_type, config in config_maping_names.items()
}
# Dictionaries flagging if each model prefix has a backend in PT/TF/Flax.
snake_case : Any = collections.defaultdict(lowercase )
snake_case : Tuple = collections.defaultdict(lowercase )
snake_case : Any = collections.defaultdict(lowercase )
# Let's lookup through all transformers object (once) and find if models are supported by a given backend.
for attr_name in dir(lowercase ):
snake_case : List[Any] = None
if _re_tf_models.match(lowercase ) is not None:
snake_case : List[Any] = tf_models
snake_case : List[Any] = _re_tf_models.match(lowercase ).groups()[0]
elif _re_flax_models.match(lowercase ) is not None:
snake_case : Any = flax_models
snake_case : Optional[Any] = _re_flax_models.match(lowercase ).groups()[0]
elif _re_pt_models.match(lowercase ) is not None:
snake_case : str = pt_models
snake_case : Dict = _re_pt_models.match(lowercase ).groups()[0]
if lookup_dict is not None:
while len(lowercase ) > 0:
if attr_name in model_prefix_to_model_type:
snake_case : Any = True
break
# Try again after removing the last word in the name
snake_case : List[Any] = """""".join(camel_case_split(lowercase )[:-1] )
snake_case : Any = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) )
snake_case : str = list(lowercase )
all_models.sort()
snake_case : Union[str, Any] = {"""model_type""": all_models}
snake_case : int = [pt_models[t] for t in all_models]
snake_case : Union[str, Any] = [tf_models[t] for t in all_models]
snake_case : List[Any] = [flax_models[t] for t in all_models]
# Now let's use the auto-mapping names to make sure
snake_case : Tuple = {}
for t in all_models:
if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES:
snake_case : Optional[int] = """AutoProcessor"""
elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES:
snake_case : Dict = """AutoTokenizer"""
elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES:
snake_case : Union[str, Any] = """AutoFeatureExtractor"""
else:
# Default to AutoTokenizer if a model has nothing, for backward compatibility.
snake_case : Tuple = """AutoTokenizer"""
snake_case : List[str] = [processors[t] for t in all_models]
return pd.DataFrame(lowercase )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> List[str]:
snake_case : List[Any] = [
transformers_module.models.auto.modeling_auto,
transformers_module.models.auto.modeling_tf_auto,
transformers_module.models.auto.modeling_flax_auto,
]
for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS:
snake_case : Dict = [model_mapping, f"""TF_{model_mapping}""", f"""FLAX_{model_mapping}"""]
snake_case : Optional[Any] = [auto_class, f"""TF_{auto_class}""", f"""Flax_{auto_class}"""]
# Loop through all three frameworks
for module, cls, mapping in zip(lowercase ,lowercase ,lowercase ):
# The type of pipeline may not exist in this framework
if not hasattr(lowercase ,lowercase ):
continue
# First extract all model_names
snake_case : Tuple = []
for name in getattr(lowercase ,lowercase ).values():
if isinstance(lowercase ,lowercase ):
model_names.append(lowercase )
else:
model_names.extend(list(lowercase ) )
# Add pipeline tag and auto model class for those models
table.update({model_name: (pipeline_tag, cls) for model_name in model_names} )
return table
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> Optional[int]:
snake_case : Union[str, Any] = get_frameworks_table()
snake_case : Optional[Any] = Dataset.from_pandas(lowercase )
snake_case : List[Any] = hf_hub_download(
"""huggingface/transformers-metadata""" ,"""pipeline_tags.json""" ,repo_type="""dataset""" ,token=lowercase )
snake_case : Union[str, Any] = Dataset.from_json(lowercase )
snake_case : Dict = {
tags_dataset[i]["""model_class"""]: (tags_dataset[i]["""pipeline_tag"""], tags_dataset[i]["""auto_class"""])
for i in range(len(lowercase ) )
}
snake_case : Optional[int] = update_pipeline_and_auto_class_table(lowercase )
# Sort the model classes to avoid some nondeterministic updates to create false update commits.
snake_case : Optional[int] = sorted(table.keys() )
snake_case : str = pd.DataFrame(
{
"""model_class""": model_classes,
"""pipeline_tag""": [table[m][0] for m in model_classes],
"""auto_class""": [table[m][1] for m in model_classes],
} )
snake_case : List[Any] = Dataset.from_pandas(lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
frameworks_dataset.to_json(os.path.join(lowercase ,"""frameworks.json""" ) )
tags_dataset.to_json(os.path.join(lowercase ,"""pipeline_tags.json""" ) )
if commit_sha is not None:
snake_case : Any = (
f"""Update with commit {commit_sha}\n\nSee: """
f"""https://github.com/huggingface/transformers/commit/{commit_sha}"""
)
else:
snake_case : List[str] = """Update"""
upload_folder(
repo_id="""huggingface/transformers-metadata""" ,folder_path=lowercase ,repo_type="""dataset""" ,token=lowercase ,commit_message=lowercase ,)
def SCREAMING_SNAKE_CASE__ ( ) -> List[Any]:
snake_case : Tuple = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS}
snake_case : Dict = transformers_module.pipelines.SUPPORTED_TASKS
snake_case : List[str] = []
for key in pipeline_tasks:
if key not in in_table:
snake_case : List[Any] = pipeline_tasks[key]["""pt"""]
if isinstance(lowercase ,(list, tuple) ):
snake_case : Tuple = model[0]
snake_case : List[str] = model.__name__
if model not in in_table.values():
missing.append(lowercase )
if len(lowercase ) > 0:
snake_case : List[str] = """, """.join(lowercase )
raise ValueError(
"""The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside """
f"""`utils/update_metadata.py`: {msg}. Please add them!""" )
if __name__ == "__main__":
lowerCamelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('--token', type=str, help='The token to use to push to the transformers-metadata dataset.')
parser.add_argument('--commit_sha', type=str, help='The sha of the commit going with this update.')
parser.add_argument('--check-only', action='store_true', help='Activate to just check all pipelines are present.')
lowerCamelCase : Union[str, Any] = parser.parse_args()
if args.check_only:
check_pipeline_tags()
else:
update_metadata(args.token, args.commit_sha)
| 358 |
# using dfs for finding eulerian path traversal
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ,lowercase=None ) -> Any:
snake_case : Union[str, Any] = (path or []) + [u]
for v in graph[u]:
if visited_edge[u][v] is False:
snake_case , snake_case : int = True, True
snake_case : List[Any] = dfs(lowercase ,lowercase ,lowercase ,lowercase )
return path
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> Tuple:
snake_case : Union[str, Any] = 0
snake_case : Union[str, Any] = -1
for i in range(lowercase ):
if i not in graph.keys():
continue
if len(graph[i] ) % 2 == 1:
odd_degree_nodes += 1
snake_case : str = i
if odd_degree_nodes == 0:
return 1, odd_node
if odd_degree_nodes == 2:
return 2, odd_node
return 3, odd_node
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> List[str]:
snake_case : Tuple = [[False for _ in range(max_node + 1 )] for _ in range(max_node + 1 )]
snake_case , snake_case : Any = check_circuit_or_path(lowercase ,lowercase )
if check == 3:
print("""graph is not Eulerian""" )
print("""no path""" )
return
snake_case : str = 1
if check == 2:
snake_case : Optional[int] = odd_node
print("""graph has a Euler path""" )
if check == 1:
print("""graph has a Euler cycle""" )
snake_case : Dict = dfs(lowercase ,lowercase ,lowercase )
print(lowercase )
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[Any]:
snake_case : Union[str, Any] = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]}
snake_case : Optional[int] = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]}
snake_case : Optional[int] = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]}
snake_case : List[Any] = {1: [2, 3], 2: [1, 3], 3: [1, 2]}
snake_case : Tuple = {
1: [],
2: []
# all degree is zero
}
snake_case : Tuple = 10
check_euler(lowercase ,lowercase )
check_euler(lowercase ,lowercase )
check_euler(lowercase ,lowercase )
check_euler(lowercase ,lowercase )
check_euler(lowercase ,lowercase )
if __name__ == "__main__":
main()
| 176 | 0 |
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def lowercase__ ( __snake_case : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Any = [False] * len(__snake_case )
UpperCAmelCase_ : Dict = [-1] * len(__snake_case )
def dfs(__snake_case : Dict , __snake_case : Tuple ):
UpperCAmelCase_ : Optional[Any] = True
UpperCAmelCase_ : Dict = c
for u in graph[v]:
if not visited[u]:
dfs(__snake_case , 1 - c )
for i in range(len(__snake_case ) ):
if not visited[i]:
dfs(__snake_case , 0 )
for i in range(len(__snake_case ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
__UpperCAmelCase = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 29 |
import os
# Precomputes a list of the 100 first triangular numbers
__UpperCAmelCase = [int(0.5 * n * (n + 1)) for n in range(1, 101)]
def lowercase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : Any = os.path.dirname(os.path.realpath(__snake_case ) )
UpperCAmelCase_ : Optional[Any] = os.path.join(__snake_case , 'words.txt' )
UpperCAmelCase_ : Union[str, Any] = ''
with open(__snake_case ) as f:
UpperCAmelCase_ : List[Any] = f.readline()
UpperCAmelCase_ : Optional[int] = [word.strip('"' ) for word in words.strip('\r\n' ).split(',' )]
UpperCAmelCase_ : Optional[int] = [
word
for word in [sum(ord(__snake_case ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(__snake_case )
if __name__ == "__main__":
print(solution())
| 29 | 1 |
def __snake_case ( __UpperCamelCase : int = 5000_0000 ):
"""simple docstring"""
A_ = set()
A_ = int((limit - 24) ** (1 / 2) )
A_ = set(range(3 ,prime_square_limit + 1 ,2 ) )
primes.add(2 )
for p in range(3 ,prime_square_limit + 1 ,2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p ,prime_square_limit + 1 ,__UpperCamelCase ) ) )
for primea in primes:
A_ = primea * primea
for primea in primes:
A_ = primea * primea * primea
if square + cube >= limit - 16:
break
for primea in primes:
A_ = primea * primea * primea * primea
A_ = square + cube + tetr
if total >= limit:
break
ret.add(__UpperCamelCase )
return len(__UpperCamelCase )
if __name__ == "__main__":
print(F"{solution() = }")
| 369 |
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class _a ( snake_case_ ):
"""simple docstring"""
_lowerCamelCase : torch.FloatTensor
_lowerCamelCase : Optional[torch.FloatTensor] = None
def __snake_case ( __UpperCamelCase : Tuple ,__UpperCamelCase : Any=0.999 ,__UpperCamelCase : Any="cosine" ,):
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(__UpperCamelCase : Any ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(__UpperCamelCase : int ):
return math.exp(t * -12.0 )
else:
raise ValueError(f'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
A_ = []
for i in range(__UpperCamelCase ):
A_ = i / num_diffusion_timesteps
A_ = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__UpperCamelCase ) / alpha_bar_fn(__UpperCamelCase ) ,__UpperCamelCase ) )
return torch.tensor(__UpperCamelCase ,dtype=torch.floataa )
class _a ( snake_case_ , snake_case_ ):
"""simple docstring"""
@register_to_config
def __init__( self : Optional[int] , UpperCAmelCase : int = 1000 , UpperCAmelCase : str = "fixed_small_log" , UpperCAmelCase : bool = True , UpperCAmelCase : Optional[float] = 1.0 , UpperCAmelCase : str = "epsilon" , UpperCAmelCase : str = "squaredcos_cap_v2" , ):
if beta_schedule != "squaredcos_cap_v2":
raise ValueError("UnCLIPScheduler only supports `beta_schedule`: 'squaredcos_cap_v2'" )
A_ = betas_for_alpha_bar(UpperCAmelCase )
A_ = 1.0 - self.betas
A_ = torch.cumprod(self.alphas , dim=0 )
A_ = torch.tensor(1.0 )
# standard deviation of the initial noise distribution
A_ = 1.0
# setable values
A_ = None
A_ = torch.from_numpy(np.arange(0 , UpperCAmelCase )[::-1].copy() )
A_ = variance_type
def __A ( self : Optional[Any] , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : Optional[int] = None ):
return sample
def __A ( self : List[Any] , UpperCAmelCase : int , UpperCAmelCase : Union[str, torch.device] = None ):
A_ = num_inference_steps
A_ = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
A_ = (np.arange(0 , UpperCAmelCase ) * step_ratio).round()[::-1].copy().astype(np.intaa )
A_ = torch.from_numpy(UpperCAmelCase ).to(UpperCAmelCase )
def __A ( self : List[Any] , UpperCAmelCase : Dict , UpperCAmelCase : str=None , UpperCAmelCase : Any=None , UpperCAmelCase : List[Any]=None ):
if prev_timestep is None:
A_ = t - 1
A_ = self.alphas_cumprod[t]
A_ = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
A_ = 1 - alpha_prod_t
A_ = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
A_ = self.betas[t]
else:
A_ = 1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
A_ = beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
A_ = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
A_ = torch.log(torch.clamp(UpperCAmelCase , min=1E-20 ) )
A_ = torch.exp(0.5 * variance )
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
A_ = variance.log()
A_ = beta.log()
A_ = (predicted_variance + 1) / 2
A_ = frac * max_log + (1 - frac) * min_log
return variance
def __A ( self : int , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : int , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Dict=None , UpperCAmelCase : bool = True , ):
A_ = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
A_ , A_ = torch.split(UpperCAmelCase , sample.shape[1] , dim=1 )
else:
A_ = None
# 1. compute alphas, betas
if prev_timestep is None:
A_ = t - 1
A_ = self.alphas_cumprod[t]
A_ = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
A_ = 1 - alpha_prod_t
A_ = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
A_ = self.betas[t]
A_ = self.alphas[t]
else:
A_ = 1 - alpha_prod_t / alpha_prod_t_prev
A_ = 1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
A_ = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
A_ = model_output
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`'''
" for the UnCLIPScheduler." )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
A_ = torch.clamp(
UpperCAmelCase , -self.config.clip_sample_range , self.config.clip_sample_range )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
A_ = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
A_ = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
A_ = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
A_ = 0
if t > 0:
A_ = randn_tensor(
model_output.shape , dtype=model_output.dtype , generator=UpperCAmelCase , device=model_output.device )
A_ = self._get_variance(
UpperCAmelCase , predicted_variance=UpperCAmelCase , prev_timestep=UpperCAmelCase , )
if self.variance_type == "fixed_small_log":
A_ = variance
elif self.variance_type == "learned_range":
A_ = (0.5 * variance).exp()
else:
raise ValueError(
f'''variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`'''
" for the UnCLIPScheduler." )
A_ = variance * variance_noise
A_ = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=UpperCAmelCase , pred_original_sample=UpperCAmelCase )
def __A ( self : Optional[Any] , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : torch.IntTensor , ):
# Make sure alphas_cumprod and timestep have same device and dtype as original_samples
A_ = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype )
A_ = timesteps.to(original_samples.device )
A_ = alphas_cumprod[timesteps] ** 0.5
A_ = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ):
A_ = sqrt_alpha_prod.unsqueeze(-1 )
A_ = (1 - alphas_cumprod[timesteps]) ** 0.5
A_ = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ):
A_ = sqrt_one_minus_alpha_prod.unsqueeze(-1 )
A_ = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
| 329 | 0 |
import unittest
from transformers import RoFormerTokenizer, RoFormerTokenizerFast
from transformers.testing_utils import require_rjieba, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_rjieba
@require_tokenizers
class lowercase__ ( _UpperCAmelCase , unittest.TestCase ):
A__ : Dict =RoFormerTokenizer
A__ : Any =RoFormerTokenizerFast
A__ : Any =True
A__ : int =True
def A_ ( self : List[Any] ):
super().setUp()
def A_ ( self : int , **UpperCAmelCase_ : int ):
return self.tokenizer_class.from_pretrained('junnyu/roformer_chinese_base' , **UpperCAmelCase_ )
def A_ ( self : Union[str, Any] , **UpperCAmelCase_ : int ):
return self.rust_tokenizer_class.from_pretrained('junnyu/roformer_chinese_base' , **UpperCAmelCase_ )
def A_ ( self : str ):
SCREAMING_SNAKE_CASE__ = '永和服装饰品有限公司,今天天气非常好'
SCREAMING_SNAKE_CASE__ = '永和 服装 饰品 有限公司 , 今 天 天 气 非常 好'
return input_text, output_text
def A_ ( self : int ):
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.get_chinese_input_output_texts()
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , output_text.split() )
SCREAMING_SNAKE_CASE__ = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE__ = [22943, 21332, 34431, 45904, 117, 306, 1231, 1231, 2653, 33994, 1266, 100]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , UpperCAmelCase_ )
def A_ ( self : str ):
SCREAMING_SNAKE_CASE__ = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.get_chinese_input_output_texts()
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , output_text.split() )
SCREAMING_SNAKE_CASE__ = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE__ = [22943, 21332, 34431, 45904, 117, 306, 1231, 1231, 2653, 33994, 1266, 100]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , UpperCAmelCase_ )
def A_ ( self : int ):
pass
def A_ ( self : Dict ):
pass
def A_ ( self : Tuple ):
pass
| 176 |
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
__snake_case = logging.get_logger(__name__)
__snake_case = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
# See all BART models at https://huggingface.co/models?filter=bart
__snake_case = {
"""vocab_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/vocab.json""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/vocab.json""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json""",
},
"""merges_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/merges.txt""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/merges.txt""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json""",
},
}
__snake_case = {
"""facebook/bart-base""": 10_24,
"""facebook/bart-large""": 10_24,
"""facebook/bart-large-mnli""": 10_24,
"""facebook/bart-large-cnn""": 10_24,
"""facebook/bart-large-xsum""": 10_24,
"""yjernite/bart_eli5""": 10_24,
}
class lowercase__ ( _UpperCAmelCase ):
A__ : Tuple =VOCAB_FILES_NAMES
A__ : Any =PRETRAINED_VOCAB_FILES_MAP
A__ : str =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : Tuple =["""input_ids""", """attention_mask"""]
A__ : Optional[int] =BartTokenizer
def __init__( self : str , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : int=None , UpperCAmelCase_ : str=None , UpperCAmelCase_ : Optional[Any]="replace" , UpperCAmelCase_ : int="<s>" , UpperCAmelCase_ : List[Any]="</s>" , UpperCAmelCase_ : int="</s>" , UpperCAmelCase_ : Tuple="<s>" , UpperCAmelCase_ : Any="<unk>" , UpperCAmelCase_ : Any="<pad>" , UpperCAmelCase_ : Optional[Any]="<mask>" , UpperCAmelCase_ : Optional[int]=False , UpperCAmelCase_ : List[str]=True , **UpperCAmelCase_ : List[Any] , ):
super().__init__(
UpperCAmelCase_ , UpperCAmelCase_ , tokenizer_file=UpperCAmelCase_ , errors=UpperCAmelCase_ , bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ , trim_offsets=UpperCAmelCase_ , **UpperCAmelCase_ , )
SCREAMING_SNAKE_CASE__ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , UpperCAmelCase_ ) != add_prefix_space:
SCREAMING_SNAKE_CASE__ = getattr(UpperCAmelCase_ , pre_tok_state.pop('type' ) )
SCREAMING_SNAKE_CASE__ = add_prefix_space
SCREAMING_SNAKE_CASE__ = pre_tok_class(**UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
SCREAMING_SNAKE_CASE__ = 'post_processor'
SCREAMING_SNAKE_CASE__ = getattr(self.backend_tokenizer , UpperCAmelCase_ , UpperCAmelCase_ )
if tokenizer_component_instance:
SCREAMING_SNAKE_CASE__ = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
SCREAMING_SNAKE_CASE__ = tuple(state['sep'] )
if "cls" in state:
SCREAMING_SNAKE_CASE__ = tuple(state['cls'] )
SCREAMING_SNAKE_CASE__ = False
if state.get('add_prefix_space' , UpperCAmelCase_ ) != add_prefix_space:
SCREAMING_SNAKE_CASE__ = add_prefix_space
SCREAMING_SNAKE_CASE__ = True
if state.get('trim_offsets' , UpperCAmelCase_ ) != trim_offsets:
SCREAMING_SNAKE_CASE__ = trim_offsets
SCREAMING_SNAKE_CASE__ = True
if changes_to_apply:
SCREAMING_SNAKE_CASE__ = getattr(UpperCAmelCase_ , state.pop('type' ) )
SCREAMING_SNAKE_CASE__ = component_class(**UpperCAmelCase_ )
setattr(self.backend_tokenizer , UpperCAmelCase_ , UpperCAmelCase_ )
@property
def A_ ( self : Tuple ):
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def A_ ( self : Any , UpperCAmelCase_ : List[Any] ):
SCREAMING_SNAKE_CASE__ = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else value
SCREAMING_SNAKE_CASE__ = value
def A_ ( self : List[str] , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : str ):
SCREAMING_SNAKE_CASE__ = kwargs.get('is_split_into_words' , UpperCAmelCase_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
'to use it with pretokenized inputs.' )
return super()._batch_encode_plus(*UpperCAmelCase_ , **UpperCAmelCase_ )
def A_ ( self : List[str] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : int ):
SCREAMING_SNAKE_CASE__ = kwargs.get('is_split_into_words' , UpperCAmelCase_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
'to use it with pretokenized inputs.' )
return super()._encode_plus(*UpperCAmelCase_ , **UpperCAmelCase_ )
def A_ ( self : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None ):
SCREAMING_SNAKE_CASE__ = self._tokenizer.model.save(UpperCAmelCase_ , name=UpperCAmelCase_ )
return tuple(UpperCAmelCase_ )
def A_ ( self : Optional[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Union[str, Any]=None ):
SCREAMING_SNAKE_CASE__ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def A_ ( self : str , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None ):
SCREAMING_SNAKE_CASE__ = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 176 | 1 |
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument(
'--txt2img_unclip',
default='kakaobrain/karlo-v1-alpha',
type=str,
required=False,
help='The pretrained txt2img unclip.',
)
lowerCAmelCase_ = parser.parse_args()
lowerCAmelCase_ = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
lowerCAmelCase_ = CLIPImageProcessor()
lowerCAmelCase_ = CLIPVisionModelWithProjection.from_pretrained('openai/clip-vit-large-patch14')
lowerCAmelCase_ = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 359 |
def snake_case( __magic_name__ , __magic_name__ ) -> bool:
'''simple docstring'''
lowercase : List[Any] = len(__magic_name__ )
lowercase : str = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
lowercase : List[str] = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
lowercase : str = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
lowercase : Optional[Any] = subset[i - 1][j]
if arr[i - 1] <= j:
lowercase : Dict = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 116 | 0 |
'''simple docstring'''
import os
import random
import sys
from . import cryptomath_module as cryptoMath # noqa: N812
from . import rabin_miller as rabinMiller # noqa: N812
def snake_case_ ( )-> None:
'''simple docstring'''
print("""Making key files...""" )
make_key_files("""rsa""" , 1024 )
print("""Key files generation successful.""" )
def snake_case_ ( lowerCAmelCase_ )-> tuple[tuple[int, int], tuple[int, int]]:
'''simple docstring'''
print("""Generating prime p...""" )
_UpperCAmelCase : Optional[Any] = rabinMiller.generate_large_prime(lowerCAmelCase_ )
print("""Generating prime q...""" )
_UpperCAmelCase : List[Any] = rabinMiller.generate_large_prime(lowerCAmelCase_ )
_UpperCAmelCase : int = p * q
print("""Generating e that is relatively prime to (p - 1) * (q - 1)...""" )
while True:
_UpperCAmelCase : List[str] = random.randrange(2 ** (key_size - 1) , 2 ** (key_size) )
if cryptoMath.gcd(lowerCAmelCase_ , (p - 1) * (q - 1) ) == 1:
break
print("""Calculating d that is mod inverse of e...""" )
_UpperCAmelCase : Any = cryptoMath.find_mod_inverse(lowerCAmelCase_ , (p - 1) * (q - 1) )
_UpperCAmelCase : Union[str, Any] = (n, e)
_UpperCAmelCase : List[Any] = (n, d)
return (public_key, private_key)
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ )-> None:
'''simple docstring'''
if os.path.exists(F'''{name}_pubkey.txt''' ) or os.path.exists(F'''{name}_privkey.txt''' ):
print("""\nWARNING:""" )
print(
F'''"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n'''
"""Use a different name or delete these files and re-run this program.""" )
sys.exit()
_UpperCAmelCase ,_UpperCAmelCase : Optional[int] = generate_key(lowerCAmelCase_ )
print(F'''\nWriting public key to file {name}_pubkey.txt...''' )
with open(F'''{name}_pubkey.txt''' , """w""" ) as out_file:
out_file.write(F'''{key_size},{public_key[0]},{public_key[1]}''' )
print(F'''Writing private key to file {name}_privkey.txt...''' )
with open(F'''{name}_privkey.txt''' , """w""" ) as out_file:
out_file.write(F'''{key_size},{private_key[0]},{private_key[1]}''' )
if __name__ == "__main__":
main()
| 215 |
'''simple docstring'''
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
A_ : List[str] = """src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
A_ : Optional[Any] = direct_transformers_import(PATH_TO_TRANSFORMERS)
A_ : Union[str, Any] = transformers.models.auto.configuration_auto.CONFIG_MAPPING
A_ : Dict = {
# used to compute the property `self.chunk_length`
"""EncodecConfig""": ["""overlap"""],
# used as `self.bert_model = BertModel(config, ...)`
"""DPRConfig""": True,
# not used in modeling files, but it's an important information
"""FSMTConfig""": ["""langs"""],
# used internally in the configuration class file
"""GPTNeoConfig""": ["""attention_types"""],
# used internally in the configuration class file
"""EsmConfig""": ["""is_folding_model"""],
# used during training (despite we don't have training script for these models yet)
"""Mask2FormerConfig""": ["""ignore_value"""],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
"""OneFormerConfig""": ["""ignore_value""", """norm"""],
# used during preprocessing and collation, see `collating_graphormer.py`
"""GraphormerConfig""": ["""spatial_pos_max"""],
# used internally in the configuration class file
"""T5Config""": ["""feed_forward_proj"""],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
"""MT5Config""": ["""feed_forward_proj""", """tokenizer_class"""],
"""UMT5Config""": ["""feed_forward_proj""", """tokenizer_class"""],
# used internally in the configuration class file
"""LongT5Config""": ["""feed_forward_proj"""],
# used internally in the configuration class file
"""SwitchTransformersConfig""": ["""feed_forward_proj"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""BioGptConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""GLPNConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""SegformerConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""CvtConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""PerceiverConfig""": ["""layer_norm_eps"""],
# used internally to calculate the feature size
"""InformerConfig""": ["""num_static_real_features""", """num_time_features"""],
# used internally to calculate the feature size
"""TimeSeriesTransformerConfig""": ["""num_static_real_features""", """num_time_features"""],
# used internally to calculate the feature size
"""AutoformerConfig""": ["""num_static_real_features""", """num_time_features"""],
# used internally to calculate `mlp_dim`
"""SamVisionConfig""": ["""mlp_ratio"""],
# For (head) training, but so far not implemented
"""ClapAudioConfig""": ["""num_classes"""],
# Not used, but providing useful information to users
"""SpeechT5HifiGanConfig""": ["""sampling_rate"""],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
"""CLIPSegConfig""": True,
"""DeformableDetrConfig""": True,
"""DetaConfig""": True,
"""DinatConfig""": True,
"""DonutSwinConfig""": True,
"""EfficientFormerConfig""": True,
"""FSMTConfig""": True,
"""JukeboxConfig""": True,
"""LayoutLMv2Config""": True,
"""MaskFormerSwinConfig""": True,
"""MT5Config""": True,
"""NatConfig""": True,
"""OneFormerConfig""": True,
"""PerceiverConfig""": True,
"""RagConfig""": True,
"""SpeechT5Config""": True,
"""SwinConfig""": True,
"""Swin2SRConfig""": True,
"""Swinv2Config""": True,
"""SwitchTransformersConfig""": True,
"""TableTransformerConfig""": True,
"""TapasConfig""": True,
"""TransfoXLConfig""": True,
"""UniSpeechConfig""": True,
"""UniSpeechSatConfig""": True,
"""WavLMConfig""": True,
"""WhisperConfig""": True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
"""JukeboxPriorConfig""": True,
# TODO: @Younes (for `is_decoder`)
"""Pix2StructTextConfig""": True,
}
)
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> Dict:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
F'''config.{attribute}''' in modeling_source
or F'''getattr(config, "{attribute}"''' in modeling_source
or F'''getattr(self.config, "{attribute}"''' in modeling_source
):
_UpperCAmelCase : Tuple = True
# Deal with multi-line cases
elif (
re.search(
RF'''getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*"{attribute}"''' , lowerCAmelCase_ , )
is not None
):
_UpperCAmelCase : Any = True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
_UpperCAmelCase : List[str] = True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
_UpperCAmelCase : Dict = [
"""bos_index""",
"""eos_index""",
"""pad_index""",
"""unk_index""",
"""mask_index""",
"""image_size""",
"""use_cache""",
"""out_features""",
"""out_indices""",
]
_UpperCAmelCase : int = ["""encoder_no_repeat_ngram_size"""]
# Special cases to be allowed
_UpperCAmelCase : Optional[Any] = True
if not attribute_used:
_UpperCAmelCase : List[Any] = False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
_UpperCAmelCase : Tuple = True
elif attribute in ["tie_word_embeddings"] and default_value is False:
_UpperCAmelCase : Any = True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
_UpperCAmelCase : Dict = True
elif attribute.endswith("""_token_id""" ):
_UpperCAmelCase : Optional[int] = True
# configuration class specific cases
if not case_allowed:
_UpperCAmelCase : int = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] )
_UpperCAmelCase : Union[str, Any] = allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def snake_case_ ( lowerCAmelCase_ )-> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = dict(inspect.signature(config_class.__init__ ).parameters )
_UpperCAmelCase : Optional[int] = [x for x in list(signature.keys() ) if x not in ["""self""", """kwargs"""]]
_UpperCAmelCase : Optional[int] = [signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
_UpperCAmelCase : List[Any] = {}
if len(config_class.attribute_map ) > 0:
_UpperCAmelCase : Optional[int] = {v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
_UpperCAmelCase : int = inspect.getsourcefile(lowerCAmelCase_ )
_UpperCAmelCase : str = os.path.dirname(lowerCAmelCase_ )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
_UpperCAmelCase : Optional[int] = [os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) for fn in os.listdir(lowerCAmelCase_ ) if fn.startswith("""modeling_""" )]
# Get the source code strings
_UpperCAmelCase : str = []
for path in modeling_paths:
if os.path.isfile(lowerCAmelCase_ ):
with open(lowerCAmelCase_ ) as fp:
modeling_sources.append(fp.read() )
_UpperCAmelCase : Any = []
for config_param, default_value in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
# `attributes` here is all the variant names for `config_param`
_UpperCAmelCase : List[str] = [config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
unused_attributes.append(attributes[0] )
return sorted(lowerCAmelCase_ )
def snake_case_ ( )-> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Dict = {}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
_UpperCAmelCase : List[Any] = [
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ) , lambda lowerCAmelCase_ : inspect.isclass(lowerCAmelCase_ )
and issubclass(lowerCAmelCase_ , lowerCAmelCase_ )
and inspect.getmodule(lowerCAmelCase_ ) == inspect.getmodule(_config_class ) , )
]
for config_class in config_classes_in_module:
_UpperCAmelCase : Optional[int] = check_config_attributes_being_used(lowerCAmelCase_ )
if len(lowerCAmelCase_ ) > 0:
_UpperCAmelCase : Tuple = unused_attributes
if len(lowerCAmelCase_ ) > 0:
_UpperCAmelCase : Dict = """The following configuration classes contain unused attributes in the corresponding modeling files:\n"""
for name, attributes in configs_with_unused_attributes.items():
error += F'''{name}: {attributes}\n'''
raise ValueError(lowerCAmelCase_ )
if __name__ == "__main__":
check_config_attributes()
| 215 | 1 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
'ut/deta': 'https://huggingface.co/ut/deta/resolve/main/config.json',
}
class a_ ( lowerCamelCase ):
lowercase = """deta"""
lowercase = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=900 , _SCREAMING_SNAKE_CASE=2048 , _SCREAMING_SNAKE_CASE=6 , _SCREAMING_SNAKE_CASE=2048 , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=6 , _SCREAMING_SNAKE_CASE=1024 , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE="relu" , _SCREAMING_SNAKE_CASE=256 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=1.0 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE="sine" , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=300 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.2_5 , **_SCREAMING_SNAKE_CASE , ) -> Optional[int]:
"""simple docstring"""
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
UpperCamelCase = CONFIG_MAPPING["""resnet"""](out_features=["""stage2""", """stage3""", """stage4"""] )
else:
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
UpperCamelCase = backbone_config.pop("""model_type""" )
UpperCamelCase = CONFIG_MAPPING[backbone_model_type]
UpperCamelCase = config_class.from_dict(_SCREAMING_SNAKE_CASE )
UpperCamelCase = backbone_config
UpperCamelCase = num_queries
UpperCamelCase = max_position_embeddings
UpperCamelCase = d_model
UpperCamelCase = encoder_ffn_dim
UpperCamelCase = encoder_layers
UpperCamelCase = encoder_attention_heads
UpperCamelCase = decoder_ffn_dim
UpperCamelCase = decoder_layers
UpperCamelCase = decoder_attention_heads
UpperCamelCase = dropout
UpperCamelCase = attention_dropout
UpperCamelCase = activation_dropout
UpperCamelCase = activation_function
UpperCamelCase = init_std
UpperCamelCase = init_xavier_std
UpperCamelCase = encoder_layerdrop
UpperCamelCase = auxiliary_loss
UpperCamelCase = position_embedding_type
# deformable attributes
UpperCamelCase = num_feature_levels
UpperCamelCase = encoder_n_points
UpperCamelCase = decoder_n_points
UpperCamelCase = two_stage
UpperCamelCase = two_stage_num_proposals
UpperCamelCase = with_box_refine
UpperCamelCase = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError("""If two_stage is True, with_box_refine must be True.""" )
# Hungarian matcher
UpperCamelCase = class_cost
UpperCamelCase = bbox_cost
UpperCamelCase = giou_cost
# Loss coefficients
UpperCamelCase = mask_loss_coefficient
UpperCamelCase = dice_loss_coefficient
UpperCamelCase = bbox_loss_coefficient
UpperCamelCase = giou_loss_coefficient
UpperCamelCase = eos_coefficient
UpperCamelCase = focal_alpha
super().__init__(is_encoder_decoder=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@property
def A__ ( self ) -> int:
"""simple docstring"""
return self.encoder_attention_heads
@property
def A__ ( self ) -> int:
"""simple docstring"""
return self.d_model
def A__ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = copy.deepcopy(self.__dict__ )
UpperCamelCase = self.backbone_config.to_dict()
UpperCamelCase = self.__class__.model_type
return output
| 183 |
'''simple docstring'''
import re
import string
import numpy as np
import datasets
SCREAMING_SNAKE_CASE__ = '\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n'
SCREAMING_SNAKE_CASE__ = '\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results["exact_match"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."]\n >>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 33.3\n\n'
SCREAMING_SNAKE_CASE__ = '\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a_ ( datasets.Metric ):
def A__ ( self ) -> Optional[Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , reference_urls=[] , )
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , ) -> List[Any]:
"""simple docstring"""
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
UpperCamelCase = np.array([re.sub(_SCREAMING_SNAKE_CASE , """""" , _SCREAMING_SNAKE_CASE ) for x in predictions] )
UpperCamelCase = np.array([re.sub(_SCREAMING_SNAKE_CASE , """""" , _SCREAMING_SNAKE_CASE ) for x in references] )
else:
UpperCamelCase = np.asarray(_SCREAMING_SNAKE_CASE )
UpperCamelCase = np.asarray(_SCREAMING_SNAKE_CASE )
if ignore_case:
UpperCamelCase = np.char.lower(_SCREAMING_SNAKE_CASE )
UpperCamelCase = np.char.lower(_SCREAMING_SNAKE_CASE )
if ignore_punctuation:
UpperCamelCase = string.punctuation.maketrans("""""" , """""" , string.punctuation )
UpperCamelCase = np.char.translate(_SCREAMING_SNAKE_CASE , table=_SCREAMING_SNAKE_CASE )
UpperCamelCase = np.char.translate(_SCREAMING_SNAKE_CASE , table=_SCREAMING_SNAKE_CASE )
if ignore_numbers:
UpperCamelCase = string.digits.maketrans("""""" , """""" , string.digits )
UpperCamelCase = np.char.translate(_SCREAMING_SNAKE_CASE , table=_SCREAMING_SNAKE_CASE )
UpperCamelCase = np.char.translate(_SCREAMING_SNAKE_CASE , table=_SCREAMING_SNAKE_CASE )
UpperCamelCase = predictions == references
return {"exact_match": np.mean(_SCREAMING_SNAKE_CASE ) * 100}
| 183 | 1 |
'''simple docstring'''
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
_lowerCAmelCase = imread(R'''digital_image_processing/image_data/lena_small.jpg''')
_lowerCAmelCase = cvtColor(img, COLOR_BGR2GRAY)
def __lowerCAmelCase ( ):
__UpperCamelCase : Tuple = cn.convert_to_negative(snake_case__ )
# assert negative_img array for at least one True
assert negative_img.any()
def __lowerCAmelCase ( ):
with Image.open("digital_image_processing/image_data/lena_small.jpg" ) as img:
# Work around assertion for response
assert str(cc.change_contrast(snake_case__ , 110 ) ).startswith(
"<PIL.Image.Image image mode=RGB size=100x100 at" )
def __lowerCAmelCase ( ):
__UpperCamelCase : int = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def __lowerCAmelCase ( ):
__UpperCamelCase : Tuple = imread("digital_image_processing/image_data/lena_small.jpg" , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
__UpperCamelCase : List[str] = canny.canny(snake_case__ )
# assert canny array for at least one True
assert canny_array.any()
def __lowerCAmelCase ( ):
assert gg.gaussian_filter(snake_case__ , 5 , sigma=0.9 ).all()
def __lowerCAmelCase ( ):
# laplace diagonals
__UpperCamelCase : str = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
__UpperCamelCase : Union[str, Any] = conv.img_convolve(snake_case__ , snake_case__ ).astype(snake_case__ )
assert res.any()
def __lowerCAmelCase ( ):
assert med.median_filter(snake_case__ , 3 ).any()
def __lowerCAmelCase ( ):
__UpperCamelCase , __UpperCamelCase : Dict = sob.sobel_filter(snake_case__ )
assert grad.any() and theta.any()
def __lowerCAmelCase ( ):
__UpperCamelCase : Optional[Any] = sp.make_sepia(snake_case__ , 20 )
assert sepia.all()
def __lowerCAmelCase ( snake_case__ = "digital_image_processing/image_data/lena_small.jpg" ):
__UpperCamelCase : int = bs.Burkes(imread(snake_case__ , 1 ) , 120 )
burkes.process()
assert burkes.output_img.any()
def __lowerCAmelCase ( snake_case__ = "digital_image_processing/image_data/lena_small.jpg" , ):
__UpperCamelCase : Tuple = rs.NearestNeighbour(imread(snake_case__ , 1 ) , 400 , 200 )
nn.process()
assert nn.output.any()
def __lowerCAmelCase ( ):
__UpperCamelCase : Dict = "digital_image_processing/image_data/lena.jpg"
# Reading the image and converting it to grayscale.
__UpperCamelCase : Tuple = imread(snake_case__ , 0 )
# Test for get_neighbors_pixel function() return not None
__UpperCamelCase : List[str] = 0
__UpperCamelCase : Any = 0
__UpperCamelCase : Optional[int] = image[x_coordinate][y_coordinate]
__UpperCamelCase : List[str] = lbp.get_neighbors_pixel(
snake_case__ , snake_case__ , snake_case__ , snake_case__ )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
__UpperCamelCase : Tuple = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
__UpperCamelCase : Union[str, Any] = lbp.local_binary_value(snake_case__ , snake_case__ , snake_case__ )
assert lbp_image.any()
| 298 |
'''simple docstring'''
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger()
@dataclass
class A :
'''simple docstring'''
A = 42
A = field(default_factory=SCREAMING_SNAKE_CASE__ )
A = field(default_factory=SCREAMING_SNAKE_CASE__ )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> List[str]:
__UpperCamelCase : str = len(list(m.modules() ) ) == 1 or isinstance(_UpperCAmelCase , nn.Convad ) or isinstance(_UpperCAmelCase , nn.BatchNormad )
if has_not_submodules:
self.traced.append(_UpperCAmelCase )
def __call__(self , _UpperCAmelCase ) -> Optional[int]:
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(_UpperCAmelCase )
[x.remove() for x in self.handles]
return self
@property
def a_ (self ) -> Tuple:
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda _UpperCAmelCase : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class A :
'''simple docstring'''
A = 42
A = 42
A = 0
A = field(default_factory=SCREAMING_SNAKE_CASE__ )
A = field(default_factory=SCREAMING_SNAKE_CASE__ )
def __call__(self , _UpperCAmelCase ) -> Any:
__UpperCamelCase : List[str] = Tracker(self.dest )(_UpperCAmelCase ).parametrized
__UpperCamelCase : List[Any] = Tracker(self.src )(_UpperCAmelCase ).parametrized
__UpperCamelCase : Optional[int] = list(filter(lambda _UpperCAmelCase : type(_UpperCAmelCase ) not in self.src_skip , _UpperCAmelCase ) )
__UpperCamelCase : List[Any] = list(filter(lambda _UpperCAmelCase : type(_UpperCAmelCase ) not in self.dest_skip , _UpperCAmelCase ) )
if len(_UpperCAmelCase ) != len(_UpperCAmelCase ):
raise Exception(
f"Numbers of operations are different. Source module has {len(_UpperCAmelCase )} operations while"
f" destination module has {len(_UpperCAmelCase )}." )
for dest_m, src_m in zip(_UpperCAmelCase , _UpperCAmelCase ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f"Transfered from={src_m} to={dest_m}" )
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ = True ):
print(F"Converting {name}..." )
with torch.no_grad():
__UpperCamelCase : int = timm.create_model(snake_case__ , pretrained=snake_case__ ).eval()
__UpperCamelCase : Union[str, Any] = ResNetForImageClassification(snake_case__ ).eval()
__UpperCamelCase : Tuple = ModuleTransfer(src=snake_case__ , dest=snake_case__ )
__UpperCamelCase : List[Any] = torch.randn((1, 3, 224, 224) )
module_transfer(snake_case__ )
assert torch.allclose(from_model(snake_case__ ) , our_model(snake_case__ ).logits ), "The model logits don't match the original one."
__UpperCamelCase : Any = F"resnet{'-'.join(name.split('resnet' ) )}"
print(snake_case__ )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="Add model" , use_temp_dir=snake_case__ , )
# we can use the convnext one
__UpperCamelCase : Union[str, Any] = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="Add image processor" , use_temp_dir=snake_case__ , )
print(F"Pushed {checkpoint_name}" )
def __lowerCAmelCase ( snake_case__ , snake_case__ = None , snake_case__ = True ):
__UpperCamelCase : str = "imagenet-1k-id2label.json"
__UpperCamelCase : Any = 1_000
__UpperCamelCase : List[str] = (1, num_labels)
__UpperCamelCase : List[str] = "huggingface/label-files"
__UpperCamelCase : str = num_labels
__UpperCamelCase : str = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type="dataset" ) , "r" ) )
__UpperCamelCase : List[str] = {int(snake_case__ ): v for k, v in idalabel.items()}
__UpperCamelCase : Any = idalabel
__UpperCamelCase : Optional[int] = {v: k for k, v in idalabel.items()}
__UpperCamelCase : Tuple = partial(snake_case__ , num_labels=snake_case__ , idalabel=snake_case__ , labelaid=snake_case__ )
__UpperCamelCase : Dict = {
"resnet18": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[64, 128, 256, 512] , layer_type="basic" ),
"resnet26": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type="bottleneck" ),
"resnet34": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[64, 128, 256, 512] , layer_type="basic" ),
"resnet50": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type="bottleneck" ),
"resnet101": ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type="bottleneck" ),
"resnet152": ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type="bottleneck" ),
}
if model_name:
convert_weight_and_push(snake_case__ , names_to_config[model_name] , snake_case__ , snake_case__ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
return config, expected_shape
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help=(
'''The name of the model you wish to convert, it must be one of the supported resnet* architecture,'''
''' currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=Path,
required=True,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
default=True,
type=bool,
required=False,
help='''If True, push model and image processor to the hub.''',
)
_lowerCAmelCase = parser.parse_args()
_lowerCAmelCase = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 298 | 1 |
import os
import sys
import unittest
_UpperCAmelCase : str = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
_UpperCAmelCase : Tuple = os.path.join(git_repo_path, "src", "diffusers")
class lowercase ( unittest.TestCase ):
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = find_backend(' if not is_torch_available():' )
self.assertEqual(A_ , 'torch' )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
UpperCamelCase = find_backend(' if not (is_torch_available() and is_transformers_available()):' )
self.assertEqual(A_ , 'torch_and_transformers' )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
UpperCamelCase = find_backend(
' if not (is_torch_available() and is_transformers_available() and is_onnx_available()):' )
self.assertEqual(A_ , 'torch_and_transformers_and_onnx' )
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('torch' , A_ )
self.assertIn('torch_and_transformers' , A_ )
self.assertIn('flax_and_transformers' , A_ )
self.assertIn('torch_and_transformers_and_onnx' , A_ )
# Likewise, we can't assert on the exact content of a key
self.assertIn('UNet2DModel' , objects['torch'] )
self.assertIn('FlaxUNet2DConditionModel' , objects['flax'] )
self.assertIn('StableDiffusionPipeline' , objects['torch_and_transformers'] )
self.assertIn('FlaxStableDiffusionPipeline' , objects['flax_and_transformers'] )
self.assertIn('LMSDiscreteScheduler' , objects['torch_and_scipy'] )
self.assertIn('OnnxStableDiffusionPipeline' , objects['torch_and_transformers_and_onnx'] )
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
UpperCamelCase = create_dummy_object('CONSTANT' , '\'torch\'' )
self.assertEqual(A_ , '\nCONSTANT = None\n' )
UpperCamelCase = create_dummy_object('function' , '\'torch\'' )
self.assertEqual(
A_ , '\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n' )
UpperCamelCase = '\nclass FakeClass(metaclass=DummyObject):\n _backends = \'torch\'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, \'torch\')\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, \'torch\')\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, \'torch\')\n'
UpperCamelCase = create_dummy_object('FakeClass' , '\'torch\'' )
self.assertEqual(A_ , A_ )
def __UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = '# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, ["torch"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = ["torch"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, ["torch"])\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, ["torch"])\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, ["torch"])\n'
UpperCamelCase = create_dummy_files({'torch': ['CONSTANT', 'function', 'FakeClass']} )
self.assertEqual(dummy_files['torch'] , A_ )
| 110 |
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowercase :
def __init__( self , A_ , A_=2 , A_=3 , A_=4 , A_=2 , A_=7 , A_=True , A_=True , A_=True , A_=True , A_=99 , A_=36 , A_=3 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=16 , A_=2 , A_=0.02 , A_=6 , A_=6 , A_=3 , A_=4 , A_=None , A_=1_000 , ) -> str:
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = num_channels
UpperCamelCase = image_size
UpperCamelCase = patch_size
UpperCamelCase = text_seq_length
UpperCamelCase = is_training
UpperCamelCase = use_input_mask
UpperCamelCase = use_token_type_ids
UpperCamelCase = use_labels
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = type_vocab_size
UpperCamelCase = type_sequence_label_size
UpperCamelCase = initializer_range
UpperCamelCase = coordinate_size
UpperCamelCase = shape_size
UpperCamelCase = num_labels
UpperCamelCase = num_choices
UpperCamelCase = scope
UpperCamelCase = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
UpperCamelCase = text_seq_length
UpperCamelCase = (image_size // patch_size) ** 2 + 1
UpperCamelCase = self.text_seq_length + self.image_seq_length
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
UpperCamelCase = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
UpperCamelCase = bbox[i, j, 3]
UpperCamelCase = bbox[i, j, 1]
UpperCamelCase = t
if bbox[i, j, 2] < bbox[i, j, 0]:
UpperCamelCase = bbox[i, j, 2]
UpperCamelCase = bbox[i, j, 0]
UpperCamelCase = t
UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase = None
if self.use_input_mask:
UpperCamelCase = random_attention_mask([self.batch_size, self.text_seq_length] )
UpperCamelCase = None
if self.use_token_type_ids:
UpperCamelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
UpperCamelCase = None
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
UpperCamelCase = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def __UpperCamelCase ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> int:
"""simple docstring"""
UpperCamelCase = LayoutLMvaModel(config=A_ )
model.to(A_ )
model.eval()
# text + image
UpperCamelCase = model(A_ , pixel_values=A_ )
UpperCamelCase = model(
A_ , bbox=A_ , pixel_values=A_ , attention_mask=A_ , token_type_ids=A_ )
UpperCamelCase = model(A_ , bbox=A_ , pixel_values=A_ , token_type_ids=A_ )
UpperCamelCase = model(A_ , bbox=A_ , pixel_values=A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
UpperCamelCase = model(A_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
UpperCamelCase = model(pixel_values=A_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def __UpperCamelCase ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = self.num_labels
UpperCamelCase = LayoutLMvaForSequenceClassification(A_ )
model.to(A_ )
model.eval()
UpperCamelCase = model(
A_ , bbox=A_ , pixel_values=A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCamelCase ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Dict:
"""simple docstring"""
UpperCamelCase = self.num_labels
UpperCamelCase = LayoutLMvaForTokenClassification(config=A_ )
model.to(A_ )
model.eval()
UpperCamelCase = model(
A_ , bbox=A_ , pixel_values=A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def __UpperCamelCase ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> List[str]:
"""simple docstring"""
UpperCamelCase = LayoutLMvaForQuestionAnswering(config=A_ )
model.to(A_ )
model.eval()
UpperCamelCase = model(
A_ , bbox=A_ , pixel_values=A_ , attention_mask=A_ , token_type_ids=A_ , start_positions=A_ , end_positions=A_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) = config_and_inputs
UpperCamelCase = {
'input_ids': input_ids,
'bbox': bbox,
'pixel_values': pixel_values,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_torch
class lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
__lowercase : Tuple = False
__lowercase : List[Any] = False
__lowercase : str = False
__lowercase : Tuple = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
__lowercase : List[str] = (
{"document-question-answering": LayoutLMvaForQuestionAnswering, "feature-extraction": LayoutLMvaModel}
if is_torch_available()
else {}
)
def __UpperCamelCase ( self , A_ , A_ , A_ , A_ , A_ ) -> int:
"""simple docstring"""
# `DocumentQuestionAnsweringPipeline` is expected to work with this model, but it combines the text and visual
# embedding along the sequence dimension (dim 1), which causes an error during post-processing as `p_mask` has
# the sequence dimension of the text embedding only.
# (see the line `embedding_output = torch.cat([embedding_output, visual_embeddings], dim=1)`)
return True
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
UpperCamelCase = LayoutLMvaModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=A_ , hidden_size=37 )
def __UpperCamelCase ( self , A_ , A_ , A_=False ) -> int:
"""simple docstring"""
UpperCamelCase = copy.deepcopy(A_ )
if model_class in get_values(A_ ):
UpperCamelCase = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(A_ , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(A_ ):
UpperCamelCase = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=A_ )
elif model_class in get_values(A_ ):
UpperCamelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=A_ )
UpperCamelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=A_ )
elif model_class in [
*get_values(A_ ),
]:
UpperCamelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=A_ )
elif model_class in [
*get_values(A_ ),
]:
UpperCamelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=A_ , )
return inputs_dict
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCamelCase = type
self.model_tester.create_and_check_model(*A_ )
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A_ )
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A_ )
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A_ )
@slow
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase = LayoutLMvaModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
def A ( ) -> int:
'''simple docstring'''
UpperCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
class lowercase ( unittest.TestCase ):
@cached_property
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
return LayoutLMvaImageProcessor(apply_ocr=A_ ) if is_vision_available() else None
@slow
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = LayoutLMvaModel.from_pretrained('microsoft/layoutlmv3-base' ).to(A_ )
UpperCamelCase = self.default_image_processor
UpperCamelCase = prepare_img()
UpperCamelCase = image_processor(images=A_ , return_tensors='pt' ).pixel_values.to(A_ )
UpperCamelCase = torch.tensor([[1, 2]] )
UpperCamelCase = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
UpperCamelCase = model(
input_ids=input_ids.to(A_ ) , bbox=bbox.to(A_ ) , pixel_values=pixel_values.to(A_ ) , )
# verify the logits
UpperCamelCase = torch.Size((1, 199, 768) )
self.assertEqual(outputs.last_hidden_state.shape , A_ )
UpperCamelCase = torch.tensor(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] ).to(A_ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , A_ , atol=1e-4 ) )
| 110 | 1 |
'''simple docstring'''
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
snake_case_ : List[Any] = logging.get_logger(__name__)
snake_case_ : Tuple = {'vocab_file': 'spiece.model'}
snake_case_ : int = {
'vocab_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/spiece.model',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/spiece.model',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/spiece.model',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/spiece.model',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model',
}
}
snake_case_ : Any = {
'albert-base-v1': 5_12,
'albert-large-v1': 5_12,
'albert-xlarge-v1': 5_12,
'albert-xxlarge-v1': 5_12,
'albert-base-v2': 5_12,
'albert-large-v2': 5_12,
'albert-xlarge-v2': 5_12,
'albert-xxlarge-v2': 5_12,
}
snake_case_ : Dict = '▁'
class __a (_lowerCamelCase ):
__a : Tuple = VOCAB_FILES_NAMES
__a : Tuple = PRETRAINED_VOCAB_FILES_MAP
__a : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Dict , __magic_name__ : Optional[int] , __magic_name__ : Optional[int]=True , __magic_name__ : Optional[int]=True , __magic_name__ : Dict=False , __magic_name__ : Optional[int]="[CLS]" , __magic_name__ : Dict="[SEP]" , __magic_name__ : str="<unk>" , __magic_name__ : Dict="[SEP]" , __magic_name__ : Optional[Any]="<pad>" , __magic_name__ : Union[str, Any]="[CLS]" , __magic_name__ : Tuple="[MASK]" , __magic_name__ : Optional[Dict[str, Any]] = None , **__magic_name__ : Dict , ) -> None:
"""simple docstring"""
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
UpperCAmelCase_ : List[Any] = (
AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase , normalized=_UpperCamelCase )
if isinstance(_UpperCamelCase , _UpperCamelCase )
else mask_token
)
UpperCAmelCase_ : str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_UpperCamelCase , remove_space=_UpperCamelCase , keep_accents=_UpperCamelCase , bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , unk_token=_UpperCamelCase , sep_token=_UpperCamelCase , pad_token=_UpperCamelCase , cls_token=_UpperCamelCase , mask_token=_UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCamelCase , )
UpperCAmelCase_ : List[Any] = do_lower_case
UpperCAmelCase_ : Union[str, Any] = remove_space
UpperCAmelCase_ : Dict = keep_accents
UpperCAmelCase_ : Dict = vocab_file
UpperCAmelCase_ : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_UpperCamelCase )
@property
def UpperCAmelCase__ ( self : str ) -> Tuple:
"""simple docstring"""
return len(self.sp_model )
def UpperCAmelCase__ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = {self.convert_ids_to_tokens(_UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = self.__dict__.copy()
UpperCAmelCase_ : Dict = None
return state
def __setstate__( self : Dict , __magic_name__ : str ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
UpperCAmelCase_ : int = {}
UpperCAmelCase_ : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase__ ( self : Optional[Any] , __magic_name__ : Any ) -> List[Any]:
"""simple docstring"""
if self.remove_space:
UpperCAmelCase_ : int = ''' '''.join(inputs.strip().split() )
else:
UpperCAmelCase_ : Union[str, Any] = inputs
UpperCAmelCase_ : Union[str, Any] = outputs.replace('''``''' , '''"''' ).replace('''\'\'''' , '''"''' )
if not self.keep_accents:
UpperCAmelCase_ : List[str] = unicodedata.normalize('''NFKD''' , _UpperCamelCase )
UpperCAmelCase_ : str = ''''''.join([c for c in outputs if not unicodedata.combining(_UpperCamelCase )] )
if self.do_lower_case:
UpperCAmelCase_ : Union[str, Any] = outputs.lower()
return outputs
def UpperCAmelCase__ ( self : Any , __magic_name__ : str ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = self.preprocess_text(_UpperCamelCase )
UpperCAmelCase_ : Tuple = self.sp_model.encode(_UpperCamelCase , out_type=_UpperCamelCase )
UpperCAmelCase_ : Any = []
for piece in pieces:
if len(_UpperCamelCase ) > 1 and piece[-1] == str(''',''' ) and piece[-2].isdigit():
UpperCAmelCase_ : List[Any] = self.sp_model.EncodeAsPieces(piece[:-1].replace(_UpperCamelCase , '''''' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
UpperCAmelCase_ : int = cur_pieces[1:]
else:
UpperCAmelCase_ : Any = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(_UpperCamelCase )
else:
new_pieces.append(_UpperCamelCase )
return new_pieces
def UpperCAmelCase__ ( self : Any , __magic_name__ : int ) -> Optional[Any]:
"""simple docstring"""
return self.sp_model.PieceToId(_UpperCamelCase )
def UpperCAmelCase__ ( self : Tuple , __magic_name__ : int ) -> Union[str, Any]:
"""simple docstring"""
return self.sp_model.IdToPiece(_UpperCamelCase )
def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : Tuple ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : int = []
UpperCAmelCase_ : Dict = ''''''
UpperCAmelCase_ : int = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_UpperCamelCase ) + token
UpperCAmelCase_ : Any = True
UpperCAmelCase_ : int = []
else:
current_sub_tokens.append(_UpperCamelCase )
UpperCAmelCase_ : int = False
out_string += self.sp_model.decode(_UpperCamelCase )
return out_string.strip()
def UpperCAmelCase__ ( self : Tuple , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = [self.sep_token_id]
UpperCAmelCase_ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None , __magic_name__ : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCamelCase , token_ids_a=_UpperCamelCase , already_has_special_tokens=_UpperCamelCase )
if token_ids_a is not None:
return [1] + ([0] * len(_UpperCamelCase )) + [1] + ([0] * len(_UpperCamelCase )) + [1]
return [1] + ([0] * len(_UpperCamelCase )) + [1]
def UpperCAmelCase__ ( self : Optional[Any] , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = [self.sep_token_id]
UpperCAmelCase_ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase__ ( self : Union[str, Any] , __magic_name__ : str , __magic_name__ : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(_UpperCamelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase_ : Union[str, Any] = os.path.join(
_UpperCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_UpperCamelCase , '''wb''' ) as fi:
UpperCAmelCase_ : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(_UpperCamelCase )
return (out_vocab_file,)
| 125 |
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxCrossAttnUpBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
FlaxUpBlockaD,
)
@flax.struct.dataclass
class A_ ( _lowerCamelCase ):
lowerCAmelCase__ = 42
@flax_register_to_config
class A_ ( nn.Module , _lowerCamelCase , _lowerCamelCase ):
lowerCAmelCase__ = 32
lowerCAmelCase__ = 4
lowerCAmelCase__ = 4
lowerCAmelCase__ = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
lowerCAmelCase__ = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")
lowerCAmelCase__ = False
lowerCAmelCase__ = (320, 640, 1280, 1280)
lowerCAmelCase__ = 2
lowerCAmelCase__ = 8
lowerCAmelCase__ = None
lowerCAmelCase__ = 1280
lowerCAmelCase__ = 0.0
lowerCAmelCase__ = False
lowerCAmelCase__ = jnp.floataa
lowerCAmelCase__ = True
lowerCAmelCase__ = 0
lowerCAmelCase__ = False
def _lowerCAmelCase (self :Tuple , _UpperCamelCase :jax.random.KeyArray )-> FrozenDict:
# init input tensors
__A = (1, self.in_channels, self.sample_size, self.sample_size)
__A = jnp.zeros(_UpperCamelCase , dtype=jnp.floataa )
__A = jnp.ones((1,) , dtype=jnp.intaa )
__A = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
__A , __A = jax.random.split(_UpperCamelCase )
__A = {'''params''': params_rng, '''dropout''': dropout_rng}
return self.init(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )["params"]
def _lowerCAmelCase (self :Tuple )-> Optional[int]:
__A = self.block_out_channels
__A = block_out_channels[0] * 4
if self.num_attention_heads is not None:
raise ValueError(
'''At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.''' )
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
__A = self.num_attention_heads or self.attention_head_dim
# input
__A = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
__A = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
__A = FlaxTimestepEmbedding(_UpperCamelCase , dtype=self.dtype )
__A = self.only_cross_attention
if isinstance(_UpperCamelCase , _UpperCamelCase ):
__A = (only_cross_attention,) * len(self.down_block_types )
if isinstance(_UpperCamelCase , _UpperCamelCase ):
__A = (num_attention_heads,) * len(self.down_block_types )
# down
__A = []
__A = block_out_channels[0]
for i, down_block_type in enumerate(self.down_block_types ):
__A = output_channel
__A = block_out_channels[i]
__A = i == len(_UpperCamelCase ) - 1
if down_block_type == "CrossAttnDownBlock2D":
__A = FlaxCrossAttnDownBlockaD(
in_channels=_UpperCamelCase , out_channels=_UpperCamelCase , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
__A = FlaxDownBlockaD(
in_channels=_UpperCamelCase , out_channels=_UpperCamelCase , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(_UpperCamelCase )
__A = down_blocks
# mid
__A = FlaxUNetMidBlockaDCrossAttn(
in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
# up
__A = []
__A = list(reversed(_UpperCamelCase ) )
__A = list(reversed(_UpperCamelCase ) )
__A = list(reversed(_UpperCamelCase ) )
__A = reversed_block_out_channels[0]
for i, up_block_type in enumerate(self.up_block_types ):
__A = output_channel
__A = reversed_block_out_channels[i]
__A = reversed_block_out_channels[min(i + 1 , len(_UpperCamelCase ) - 1 )]
__A = i == len(_UpperCamelCase ) - 1
if up_block_type == "CrossAttnUpBlock2D":
__A = FlaxCrossAttnUpBlockaD(
in_channels=_UpperCamelCase , out_channels=_UpperCamelCase , prev_output_channel=_UpperCamelCase , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
__A = FlaxUpBlockaD(
in_channels=_UpperCamelCase , out_channels=_UpperCamelCase , prev_output_channel=_UpperCamelCase , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , )
up_blocks.append(_UpperCamelCase )
__A = output_channel
__A = up_blocks
# out
__A = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
__A = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__(self :Union[str, Any] , _UpperCamelCase :Any , _UpperCamelCase :List[str] , _UpperCamelCase :str , _UpperCamelCase :int=None , _UpperCamelCase :Optional[int]=None , _UpperCamelCase :bool = True , _UpperCamelCase :bool = False , )-> Union[FlaxUNetaDConditionOutput, Tuple]:
# 1. time
if not isinstance(_UpperCamelCase , jnp.ndarray ):
__A = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(_UpperCamelCase , jnp.ndarray ) and len(timesteps.shape ) == 0:
__A = timesteps.astype(dtype=jnp.floataa )
__A = jnp.expand_dims(_UpperCamelCase , 0 )
__A = self.time_proj(_UpperCamelCase )
__A = self.time_embedding(_UpperCamelCase )
# 2. pre-process
__A = jnp.transpose(_UpperCamelCase , (0, 2, 3, 1) )
__A = self.conv_in(_UpperCamelCase )
# 3. down
__A = (sample,)
for down_block in self.down_blocks:
if isinstance(_UpperCamelCase , _UpperCamelCase ):
__A , __A = down_block(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , deterministic=not train )
else:
__A , __A = down_block(_UpperCamelCase , _UpperCamelCase , deterministic=not train )
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
__A = ()
for down_block_res_sample, down_block_additional_residual in zip(
_UpperCamelCase , _UpperCamelCase ):
down_block_res_sample += down_block_additional_residual
new_down_block_res_samples += (down_block_res_sample,)
__A = new_down_block_res_samples
# 4. mid
__A = self.mid_block(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , deterministic=not train )
if mid_block_additional_residual is not None:
sample += mid_block_additional_residual
# 5. up
for up_block in self.up_blocks:
__A = down_block_res_samples[-(self.layers_per_block + 1) :]
__A = down_block_res_samples[: -(self.layers_per_block + 1)]
if isinstance(_UpperCamelCase , _UpperCamelCase ):
__A = up_block(
_UpperCamelCase , temb=_UpperCamelCase , encoder_hidden_states=_UpperCamelCase , res_hidden_states_tuple=_UpperCamelCase , deterministic=not train , )
else:
__A = up_block(_UpperCamelCase , temb=_UpperCamelCase , res_hidden_states_tuple=_UpperCamelCase , deterministic=not train )
# 6. post-process
__A = self.conv_norm_out(_UpperCamelCase )
__A = nn.silu(_UpperCamelCase )
__A = self.conv_out(_UpperCamelCase )
__A = jnp.transpose(_UpperCamelCase , (0, 3, 1, 2) )
if not return_dict:
return (sample,)
return FlaxUNetaDConditionOutput(sample=_UpperCamelCase )
| 117 | 0 |
def snake_case_ ( snake_case ) -> bool:
if not all(x.isalpha() for x in string ):
raise ValueError('String must only contain alphabetic characters.' )
lowercase__: Any = sorted(string.lower() )
return len(snake_case ) == len(set(snake_case ) )
if __name__ == "__main__":
__lowerCAmelCase = input('''Enter a string ''').strip()
__lowerCAmelCase = is_isogram(input_str)
print(F'''{input_str} is {"an" if isogram else "not an"} isogram.''')
| 288 |
def snake_case_ ( snake_case ) -> list[int]:
lowercase__: Dict = [0 for i in range(len(snake_case ) )]
# initialize interval's left pointer and right pointer
lowercase__ , lowercase__: Union[str, Any] = 0, 0
for i in range(1 , len(snake_case ) ):
# case when current index is inside the interval
if i <= right_pointer:
lowercase__: List[Any] = min(right_pointer - i + 1 , z_result[i - left_pointer] )
lowercase__: List[str] = min_edge
while go_next(snake_case , snake_case , snake_case ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
lowercase__ , lowercase__: List[Any] = i, i + z_result[i] - 1
return z_result
def snake_case_ ( snake_case , snake_case , snake_case ) -> bool:
return i + z_result[i] < len(snake_case ) and s[z_result[i]] == s[i + z_result[i]]
def snake_case_ ( snake_case , snake_case ) -> int:
lowercase__: Tuple = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
lowercase__: Any = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(snake_case ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod()
| 288 | 1 |
'''simple docstring'''
import copy
import json
import os
import tempfile
from transformers import is_torch_available
from .test_configuration_utils import config_common_kwargs
class UpperCAmelCase__ ( UpperCAmelCase_):
def __init__( self , lowercase , lowercase=None , lowercase=True , lowercase=None , **lowercase ) -> Union[str, Any]:
__UpperCamelCase = parent
__UpperCamelCase = config_class
__UpperCamelCase = has_text_modality
__UpperCamelCase = kwargs
__UpperCamelCase = common_properties
def __lowerCamelCase ( self ) -> Optional[int]:
__UpperCamelCase = self.config_class(**self.inputs_dict )
__UpperCamelCase = (
["""hidden_size""", """num_attention_heads""", """num_hidden_layers"""]
if self.common_properties is None
else self.common_properties
)
# Add common fields for text models
if self.has_text_modality:
common_properties.extend(["""vocab_size"""] )
# Test that config has the common properties as getters
for prop in common_properties:
self.parent.assertTrue(hasattr(lowercase , lowercase ) , msg=f"`{prop}` does not exist" )
# Test that config has the common properties as setter
for idx, name in enumerate(lowercase ):
try:
setattr(lowercase , lowercase , lowercase )
self.parent.assertEqual(
getattr(lowercase , lowercase ) , lowercase , msg=f"`{name} value {idx} expected, but was {getattr(lowercase , lowercase )}" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
# Test if config class can be called with Config(prop_name=..)
for idx, name in enumerate(lowercase ):
try:
__UpperCamelCase = self.config_class(**{name: idx} )
self.parent.assertEqual(
getattr(lowercase , lowercase ) , lowercase , msg=f"`{name} value {idx} expected, but was {getattr(lowercase , lowercase )}" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
def __lowerCamelCase ( self ) -> List[str]:
__UpperCamelCase = self.config_class(**self.inputs_dict )
__UpperCamelCase = json.loads(config.to_json_string() )
for key, value in self.inputs_dict.items():
self.parent.assertEqual(obj[key] , lowercase )
def __lowerCamelCase ( self ) -> Union[str, Any]:
__UpperCamelCase = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCamelCase = os.path.join(lowercase , """config.json""" )
config_first.to_json_file(lowercase )
__UpperCamelCase = self.config_class.from_json_file(lowercase )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def __lowerCamelCase ( self ) -> Any:
__UpperCamelCase = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
config_first.save_pretrained(lowercase )
__UpperCamelCase = self.config_class.from_pretrained(lowercase )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def __lowerCamelCase ( self ) -> Optional[int]:
__UpperCamelCase = self.config_class(**self.inputs_dict )
__UpperCamelCase = """test"""
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCamelCase = os.path.join(lowercase , lowercase )
config_first.save_pretrained(lowercase )
__UpperCamelCase = self.config_class.from_pretrained(lowercase , subfolder=lowercase )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def __lowerCamelCase ( self ) -> List[Any]:
__UpperCamelCase = self.config_class(**self.inputs_dict , num_labels=5 )
self.parent.assertEqual(len(config.idalabel ) , 5 )
self.parent.assertEqual(len(config.labelaid ) , 5 )
__UpperCamelCase = 3
self.parent.assertEqual(len(config.idalabel ) , 3 )
self.parent.assertEqual(len(config.labelaid ) , 3 )
def __lowerCamelCase ( self ) -> List[str]:
if self.config_class.is_composition:
return
__UpperCamelCase = self.config_class()
self.parent.assertIsNotNone(lowercase )
def __lowerCamelCase ( self ) -> Union[str, Any]:
__UpperCamelCase = copy.deepcopy(lowercase )
__UpperCamelCase = self.config_class(**lowercase )
__UpperCamelCase = []
for key, value in config_common_kwargs.items():
if key == "torch_dtype":
if not is_torch_available():
continue
else:
import torch
if config.torch_dtype != torch.floataa:
wrong_values.append(("""torch_dtype""", config.torch_dtype, torch.floataa) )
elif getattr(lowercase , lowercase ) != value:
wrong_values.append((key, getattr(lowercase , lowercase ), value) )
if len(lowercase ) > 0:
__UpperCamelCase = """\n""".join([f"- {v[0]}: got {v[1]} instead of {v[2]}" for v in wrong_values] )
raise ValueError(f"The following keys were not properly set in the config:\n{errors}" )
def __lowerCamelCase ( self ) -> List[Any]:
self.create_and_test_config_common_properties()
self.create_and_test_config_to_json_string()
self.create_and_test_config_to_json_file()
self.create_and_test_config_from_and_save_pretrained()
self.create_and_test_config_from_and_save_pretrained_subfolder()
self.create_and_test_config_with_num_labels()
self.check_config_can_be_init_without_params()
self.check_config_arguments_init()
| 349 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
a__ : Any = logging.get_logger(__name__)
a__ : Optional[int] = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'adapter_layer': 'encoder.layers.*.adapter_layer',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
'pooling_layer.linear': 'projector',
'pooling_layer.projection': 'classifier',
}
a__ : List[str] = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
'projector',
'classifier',
]
def _lowercase ( __A ):
'''simple docstring'''
__UpperCamelCase = {}
with open(__A ,"""r""" ) as file:
for line_number, line in enumerate(__A ):
__UpperCamelCase = line.strip()
if line:
__UpperCamelCase = line.split()
__UpperCamelCase = line_number
__UpperCamelCase = words[0]
__UpperCamelCase = value
return result
def _lowercase ( __A ,__A ,__A ,__A ,__A ):
'''simple docstring'''
for attribute in key.split(""".""" ):
__UpperCamelCase = getattr(__A ,__A )
__UpperCamelCase = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(__A ):
__UpperCamelCase = PARAM_MAPPING[full_name.split(""".""" )[-1]]
__UpperCamelCase = """param"""
if weight_type is not None and weight_type != "param":
__UpperCamelCase = getattr(__A ,__A ).shape
elif weight_type is not None and weight_type == "param":
__UpperCamelCase = hf_pointer
for attribute in hf_param_name.split(""".""" ):
__UpperCamelCase = getattr(__A ,__A )
__UpperCamelCase = shape_pointer.shape
# let's reduce dimension
__UpperCamelCase = value[0]
else:
__UpperCamelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}" )
if weight_type == "weight":
__UpperCamelCase = value
elif weight_type == "weight_g":
__UpperCamelCase = value
elif weight_type == "weight_v":
__UpperCamelCase = value
elif weight_type == "bias":
__UpperCamelCase = value
elif weight_type == "param":
for attribute in hf_param_name.split(""".""" ):
__UpperCamelCase = getattr(__A ,__A )
__UpperCamelCase = value
else:
__UpperCamelCase = value
logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def _lowercase ( __A ,__A ,__A ,__A ,__A ):
'''simple docstring'''
__UpperCamelCase = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(__A ):
__UpperCamelCase = PARAM_MAPPING[full_name.split(""".""" )[-1]]
__UpperCamelCase = """param"""
if weight_type is not None and weight_type != "param":
__UpperCamelCase = """.""".join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
__UpperCamelCase = """.""".join([key, hf_param_name] )
else:
__UpperCamelCase = key
__UpperCamelCase = value if """lm_head""" in full_key else value[0]
a__ : Dict = {
'W_a': 'linear_1.weight',
'W_b': 'linear_2.weight',
'b_a': 'linear_1.bias',
'b_b': 'linear_2.bias',
'ln_W': 'norm.weight',
'ln_b': 'norm.bias',
}
def _lowercase ( __A ,__A ,__A=None ,__A=None ):
'''simple docstring'''
__UpperCamelCase = False
for key, mapped_key in MAPPING.items():
__UpperCamelCase = """wav2vec2.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
__UpperCamelCase = True
if "*" in mapped_key:
__UpperCamelCase = name.split(__A )[0].split(""".""" )[-2]
__UpperCamelCase = mapped_key.replace("""*""" ,__A )
if "weight_g" in name:
__UpperCamelCase = """weight_g"""
elif "weight_v" in name:
__UpperCamelCase = """weight_v"""
elif "bias" in name:
__UpperCamelCase = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__UpperCamelCase = """weight"""
else:
__UpperCamelCase = None
if hf_dict is not None:
rename_dict(__A ,__A ,__A ,__A ,__A )
else:
set_recursively(__A ,__A ,__A ,__A ,__A )
return is_used
return is_used
def _lowercase ( __A ,__A ,__A ):
'''simple docstring'''
__UpperCamelCase = []
__UpperCamelCase = fairseq_model.state_dict()
__UpperCamelCase = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
__UpperCamelCase = False
if "conv_layers" in name:
load_conv_layer(
__A ,__A ,__A ,__A ,hf_model.config.feat_extract_norm == """group""" ,)
__UpperCamelCase = True
else:
__UpperCamelCase = load_wavaveca_layer(__A ,__A ,__A )
if not is_used:
unused_weights.append(__A )
logger.warning(f"Unused weights: {unused_weights}" )
def _lowercase ( __A ,__A ,__A ,__A ,__A ):
'''simple docstring'''
__UpperCamelCase = full_name.split("""conv_layers.""" )[-1]
__UpperCamelCase = name.split(""".""" )
__UpperCamelCase = int(items[0] )
__UpperCamelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." )
__UpperCamelCase = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." )
__UpperCamelCase = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found." )
__UpperCamelCase = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found." )
__UpperCamelCase = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(__A )
@torch.no_grad()
def _lowercase ( __A ,__A ,__A=None ,__A=None ,__A=True ,__A=False ):
'''simple docstring'''
if config_path is not None:
__UpperCamelCase = WavaVecaConfig.from_pretrained(__A )
else:
__UpperCamelCase = WavaVecaConfig()
if is_seq_class:
__UpperCamelCase = read_txt_into_dict(__A )
__UpperCamelCase = idalabel
__UpperCamelCase = WavaVecaForSequenceClassification(__A )
__UpperCamelCase = WavaVecaFeatureExtractor(
feature_size=1 ,sampling_rate=16_000 ,padding_value=0 ,do_normalize=__A ,return_attention_mask=__A ,)
feature_extractor.save_pretrained(__A )
elif is_finetuned:
if dict_path:
__UpperCamelCase = Dictionary.load(__A )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__UpperCamelCase = target_dict.pad_index
__UpperCamelCase = target_dict.bos_index
__UpperCamelCase = target_dict.eos_index
__UpperCamelCase = len(target_dict.symbols )
__UpperCamelCase = os.path.join(__A ,"""vocab.json""" )
if not os.path.isdir(__A ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(__A ) )
return
os.makedirs(__A ,exist_ok=__A )
__UpperCamelCase = target_dict.indices
# fairseq has the <pad> and <s> switched
__UpperCamelCase = 0
__UpperCamelCase = 1
with open(__A ,"""w""" ,encoding="""utf-8""" ) as vocab_handle:
json.dump(__A ,__A )
__UpperCamelCase = WavaVecaCTCTokenizer(
__A ,unk_token=target_dict.unk_word ,pad_token=target_dict.pad_word ,bos_token=target_dict.bos_word ,eos_token=target_dict.eos_word ,word_delimiter_token="""|""" ,do_lower_case=__A ,)
__UpperCamelCase = True if config.feat_extract_norm == """layer""" else False
__UpperCamelCase = WavaVecaFeatureExtractor(
feature_size=1 ,sampling_rate=16_000 ,padding_value=0 ,do_normalize=__A ,return_attention_mask=__A ,)
__UpperCamelCase = WavaVecaProcessor(feature_extractor=__A ,tokenizer=__A )
processor.save_pretrained(__A )
__UpperCamelCase = WavaVecaForCTC(__A )
else:
__UpperCamelCase = WavaVecaForPreTraining(__A )
if is_finetuned or is_seq_class:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] ,arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
__UpperCamelCase = argparse.Namespace(task="""audio_pretraining""" )
__UpperCamelCase = fairseq.tasks.setup_task(__A )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ,task=__A )
__UpperCamelCase = model[0].eval()
recursively_load_weights(__A ,__A ,not is_finetuned )
hf_wavavec.save_pretrained(__A )
if __name__ == "__main__":
a__ : int = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
parser.add_argument(
'--is_seq_class',
action='store_true',
help='Whether the model to convert is a fine-tuned sequence classification model or not',
)
a__ : Optional[int] = parser.parse_args()
a__ : str = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 349 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase_ = {'''configuration_reformer''': ['''REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ReformerConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = ['''ReformerTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = ['''ReformerTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'''REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ReformerAttention''',
'''ReformerForMaskedLM''',
'''ReformerForQuestionAnswering''',
'''ReformerForSequenceClassification''',
'''ReformerLayer''',
'''ReformerModel''',
'''ReformerModelWithLMHead''',
'''ReformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 174 |
'''simple docstring'''
def __lowercase ( __lowercase ) -> int:
'''simple docstring'''
if not isinstance(__lowercase , __lowercase ):
raise ValueError("multiplicative_persistence() only accepts integral values" )
if num < 0:
raise ValueError("multiplicative_persistence() does not accept negative values" )
_A = 0
_A = str(__lowercase )
while len(__lowercase ) != 1:
_A = [int(__lowercase ) for i in num_string]
_A = 1
for i in range(0 , len(__lowercase ) ):
total *= numbers[i]
_A = str(__lowercase )
steps += 1
return steps
def __lowercase ( __lowercase ) -> int:
'''simple docstring'''
if not isinstance(__lowercase , __lowercase ):
raise ValueError("additive_persistence() only accepts integral values" )
if num < 0:
raise ValueError("additive_persistence() does not accept negative values" )
_A = 0
_A = str(__lowercase )
while len(__lowercase ) != 1:
_A = [int(__lowercase ) for i in num_string]
_A = 0
for i in range(0 , len(__lowercase ) ):
total += numbers[i]
_A = str(__lowercase )
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 174 | 1 |
"""simple docstring"""
_lowerCamelCase : Optional[Any] = [
'DownloadConfig',
'DownloadManager',
'DownloadMode',
'StreamingDownloadManager',
]
from .download_config import DownloadConfig
from .download_manager import DownloadManager, DownloadMode
from .streaming_download_manager import StreamingDownloadManager
| 167 |
"""simple docstring"""
import os
def lowercase_ ( _UpperCAmelCase = "input.txt" ):
"""simple docstring"""
with open(os.path.join(os.path.dirname(_UpperCAmelCase ) , _UpperCAmelCase ) ) as input_file:
A_ : List[Any] = [
[int(_UpperCAmelCase ) for element in line.split(''',''' )]
for line in input_file.readlines()
]
A_ : Dict = len(_UpperCAmelCase )
A_ : Union[str, Any] = len(matrix[0] )
A_ : Optional[Any] = [[-1 for _ in range(_UpperCAmelCase )] for _ in range(_UpperCAmelCase )]
for i in range(_UpperCAmelCase ):
A_ : str = matrix[i][0]
for j in range(1 , _UpperCAmelCase ):
for i in range(_UpperCAmelCase ):
A_ : Any = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , _UpperCAmelCase ):
A_ : Optional[int] = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
A_ : int = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(f'{solution() = }')
| 167 | 1 |
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
_lowerCamelCase : Dict = re.compile(r'''\b(a|an|the)\b''', re.UNICODE)
_lowerCamelCase : Optional[int] = None
def __lowerCamelCase ():
SCREAMING_SNAKE_CASE = argparse.ArgumentParser("Official evaluation script for SQuAD version 2.0." )
parser.add_argument("data_file" , metavar="data.json" , help="Input data JSON file." )
parser.add_argument("pred_file" , metavar="pred.json" , help="Model predictions." )
parser.add_argument(
"--out-file" , "-o" , metavar="eval.json" , help="Write accuracy metrics to file (default is stdout)." )
parser.add_argument(
"--na-prob-file" , "-n" , metavar="na_prob.json" , help="Model estimates of probability of no answer." )
parser.add_argument(
"--na-prob-thresh" , "-t" , type=UpperCAmelCase__ , default=1.0 , help="Predict \"\" if no-answer probability exceeds this (default = 1.0)." , )
parser.add_argument(
"--out-image-dir" , "-p" , metavar="out_images" , default=UpperCAmelCase__ , help="Save precision-recall curves to directory." )
parser.add_argument("--verbose" , "-v" , action="store_true" )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def __lowerCamelCase (UpperCAmelCase__ : Optional[int] ):
SCREAMING_SNAKE_CASE = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
SCREAMING_SNAKE_CASE = bool(qa["answers"]["text"] )
return qid_to_has_ans
def __lowerCamelCase (UpperCAmelCase__ : Union[str, Any] ):
def remove_articles(UpperCAmelCase__ : List[str] ):
return ARTICLES_REGEX.sub(" " , UpperCAmelCase__ )
def white_space_fix(UpperCAmelCase__ : Dict ):
return " ".join(text.split() )
def remove_punc(UpperCAmelCase__ : str ):
SCREAMING_SNAKE_CASE = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(UpperCAmelCase__ : Dict ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(UpperCAmelCase__ ) ) ) )
def __lowerCamelCase (UpperCAmelCase__ : List[str] ):
if not s:
return []
return normalize_answer(UpperCAmelCase__ ).split()
def __lowerCamelCase (UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] ):
return int(normalize_answer(UpperCAmelCase__ ) == normalize_answer(UpperCAmelCase__ ) )
def __lowerCamelCase (UpperCAmelCase__ : Any , UpperCAmelCase__ : str ):
SCREAMING_SNAKE_CASE = get_tokens(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = get_tokens(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = collections.Counter(UpperCAmelCase__ ) & collections.Counter(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = sum(common.values() )
if len(UpperCAmelCase__ ) == 0 or len(UpperCAmelCase__ ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
SCREAMING_SNAKE_CASE = 1.0 * num_same / len(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = 1.0 * num_same / len(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = (2 * precision * recall) / (precision + recall)
return fa
def __lowerCamelCase (UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Tuple ):
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
SCREAMING_SNAKE_CASE = qa["id"]
SCREAMING_SNAKE_CASE = [t for t in qa["answers"]["text"] if normalize_answer(UpperCAmelCase__ )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
SCREAMING_SNAKE_CASE = [""]
if qid not in preds:
print(F"Missing prediction for {qid}" )
continue
SCREAMING_SNAKE_CASE = preds[qid]
# Take max over all gold answers
SCREAMING_SNAKE_CASE = max(compute_exact(UpperCAmelCase__ , UpperCAmelCase__ ) for a in gold_answers )
SCREAMING_SNAKE_CASE = max(compute_fa(UpperCAmelCase__ , UpperCAmelCase__ ) for a in gold_answers )
return exact_scores, fa_scores
def __lowerCamelCase (UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Dict , UpperCAmelCase__ : str ):
SCREAMING_SNAKE_CASE = {}
for qid, s in scores.items():
SCREAMING_SNAKE_CASE = na_probs[qid] > na_prob_thresh
if pred_na:
SCREAMING_SNAKE_CASE = float(not qid_to_has_ans[qid] )
else:
SCREAMING_SNAKE_CASE = s
return new_scores
def __lowerCamelCase (UpperCAmelCase__ : List[str] , UpperCAmelCase__ : str , UpperCAmelCase__ : Dict=None ):
if not qid_list:
SCREAMING_SNAKE_CASE = len(UpperCAmelCase__ )
return collections.OrderedDict(
[
("exact", 100.0 * sum(exact_scores.values() ) / total),
("f1", 100.0 * sum(fa_scores.values() ) / total),
("total", total),
] )
else:
SCREAMING_SNAKE_CASE = len(UpperCAmelCase__ )
return collections.OrderedDict(
[
("exact", 100.0 * sum(exact_scores[k] for k in qid_list ) / total),
("f1", 100.0 * sum(fa_scores[k] for k in qid_list ) / total),
("total", total),
] )
def __lowerCamelCase (UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[str] ):
for k in new_eval:
SCREAMING_SNAKE_CASE = new_eval[k]
def __lowerCamelCase (UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[str] ):
plt.step(UpperCAmelCase__ , UpperCAmelCase__ , color="b" , alpha=0.2 , where="post" )
plt.fill_between(UpperCAmelCase__ , UpperCAmelCase__ , step="post" , alpha=0.2 , color="b" )
plt.xlabel("Recall" )
plt.ylabel("Precision" )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(UpperCAmelCase__ )
plt.savefig(UpperCAmelCase__ )
plt.clf()
def __lowerCamelCase (UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : int=None , UpperCAmelCase__ : str=None ):
SCREAMING_SNAKE_CASE = sorted(UpperCAmelCase__ , key=lambda UpperCAmelCase__ : na_probs[k] )
SCREAMING_SNAKE_CASE = 0.0
SCREAMING_SNAKE_CASE = 1.0
SCREAMING_SNAKE_CASE = 0.0
SCREAMING_SNAKE_CASE = [1.0]
SCREAMING_SNAKE_CASE = [0.0]
SCREAMING_SNAKE_CASE = 0.0
for i, qid in enumerate(UpperCAmelCase__ ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
SCREAMING_SNAKE_CASE = true_pos / float(i + 1 )
SCREAMING_SNAKE_CASE = true_pos / float(UpperCAmelCase__ )
if i == len(UpperCAmelCase__ ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(UpperCAmelCase__ )
recalls.append(UpperCAmelCase__ )
if out_image:
plot_pr_curve(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
return {"ap": 100.0 * avg_prec}
def __lowerCamelCase (UpperCAmelCase__ : Dict , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[str] ):
if out_image_dir and not os.path.exists(UpperCAmelCase__ ):
os.makedirs(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
SCREAMING_SNAKE_CASE = make_precision_recall_eval(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , out_image=os.path.join(UpperCAmelCase__ , "pr_exact.png" ) , title="Precision-Recall curve for Exact Match score" , )
SCREAMING_SNAKE_CASE = make_precision_recall_eval(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , out_image=os.path.join(UpperCAmelCase__ , "pr_f1.png" ) , title="Precision-Recall curve for F1 score" , )
SCREAMING_SNAKE_CASE = {k: float(UpperCAmelCase__ ) for k, v in qid_to_has_ans.items()}
SCREAMING_SNAKE_CASE = make_precision_recall_eval(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , out_image=os.path.join(UpperCAmelCase__ , "pr_oracle.png" ) , title="Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)" , )
merge_eval(UpperCAmelCase__ , UpperCAmelCase__ , "pr_exact" )
merge_eval(UpperCAmelCase__ , UpperCAmelCase__ , "pr_f1" )
merge_eval(UpperCAmelCase__ , UpperCAmelCase__ , "pr_oracle" )
def __lowerCamelCase (UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : int ):
if not qid_list:
return
SCREAMING_SNAKE_CASE = [na_probs[k] for k in qid_list]
SCREAMING_SNAKE_CASE = np.ones_like(UpperCAmelCase__ ) / float(len(UpperCAmelCase__ ) )
plt.hist(UpperCAmelCase__ , weights=UpperCAmelCase__ , bins=2_0 , range=(0.0, 1.0) )
plt.xlabel("Model probability of no-answer" )
plt.ylabel("Proportion of dataset" )
plt.title(F"Histogram of no-answer probability: {name}" )
plt.savefig(os.path.join(UpperCAmelCase__ , F"na_prob_hist_{name}.png" ) )
plt.clf()
def __lowerCamelCase (UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict ):
SCREAMING_SNAKE_CASE = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
SCREAMING_SNAKE_CASE = num_no_ans
SCREAMING_SNAKE_CASE = cur_score
SCREAMING_SNAKE_CASE = 0.0
SCREAMING_SNAKE_CASE = sorted(UpperCAmelCase__ , key=lambda UpperCAmelCase__ : na_probs[k] )
for i, qid in enumerate(UpperCAmelCase__ ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
SCREAMING_SNAKE_CASE = scores[qid]
else:
if preds[qid]:
SCREAMING_SNAKE_CASE = -1
else:
SCREAMING_SNAKE_CASE = 0
cur_score += diff
if cur_score > best_score:
SCREAMING_SNAKE_CASE = cur_score
SCREAMING_SNAKE_CASE = na_probs[qid]
return 100.0 * best_score / len(UpperCAmelCase__ ), best_thresh
def __lowerCamelCase (UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[Any] ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = find_best_thresh(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = find_best_thresh(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = best_exact
SCREAMING_SNAKE_CASE = exact_thresh
SCREAMING_SNAKE_CASE = best_fa
SCREAMING_SNAKE_CASE = fa_thresh
def __lowerCamelCase ():
with open(OPTS.data_file ) as f:
SCREAMING_SNAKE_CASE = json.load(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = dataset_json["data"]
with open(OPTS.pred_file ) as f:
SCREAMING_SNAKE_CASE = json.load(UpperCAmelCase__ )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
SCREAMING_SNAKE_CASE = json.load(UpperCAmelCase__ )
else:
SCREAMING_SNAKE_CASE = {k: 0.0 for k in preds}
SCREAMING_SNAKE_CASE = make_qid_to_has_ans(UpperCAmelCase__ ) # maps qid to True/False
SCREAMING_SNAKE_CASE = [k for k, v in qid_to_has_ans.items() if v]
SCREAMING_SNAKE_CASE = [k for k, v in qid_to_has_ans.items() if not v]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = get_raw_scores(UpperCAmelCase__ , UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = apply_no_ans_threshold(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , OPTS.na_prob_thresh )
SCREAMING_SNAKE_CASE = apply_no_ans_threshold(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , OPTS.na_prob_thresh )
SCREAMING_SNAKE_CASE = make_eval_dict(UpperCAmelCase__ , UpperCAmelCase__ )
if has_ans_qids:
SCREAMING_SNAKE_CASE = make_eval_dict(UpperCAmelCase__ , UpperCAmelCase__ , qid_list=UpperCAmelCase__ )
merge_eval(UpperCAmelCase__ , UpperCAmelCase__ , "HasAns" )
if no_ans_qids:
SCREAMING_SNAKE_CASE = make_eval_dict(UpperCAmelCase__ , UpperCAmelCase__ , qid_list=UpperCAmelCase__ )
merge_eval(UpperCAmelCase__ , UpperCAmelCase__ , "NoAns" )
if OPTS.na_prob_file:
find_all_best_thresh(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , OPTS.out_image_dir )
histogram_na_prob(UpperCAmelCase__ , UpperCAmelCase__ , OPTS.out_image_dir , "hasAns" )
histogram_na_prob(UpperCAmelCase__ , UpperCAmelCase__ , OPTS.out_image_dir , "noAns" )
if OPTS.out_file:
with open(OPTS.out_file , "w" ) as f:
json.dump(UpperCAmelCase__ , UpperCAmelCase__ )
else:
print(json.dumps(UpperCAmelCase__ , indent=2 ) )
if __name__ == "__main__":
_lowerCamelCase : Optional[Any] = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use('''Agg''')
import matplotlib.pyplot as plt
main()
| 206 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class lowercase ( a ):
lowercase__ : Dict = (UniPCMultistepScheduler,)
lowercase__ : Optional[int] = (("""num_inference_steps""", 25),)
def __snake_case( self : List[str] , **_UpperCamelCase : Tuple ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {
"num_train_timesteps": 1_000,
"beta_start": 0.0_0_0_1,
"beta_end": 0.0_2,
"beta_schedule": "linear",
"solver_order": 2,
"solver_type": "bh2",
}
config.update(**_UpperCamelCase )
return config
def __snake_case( self : List[str] , _UpperCamelCase : Dict=0 , **_UpperCamelCase : List[str] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = dict(self.forward_default_kwargs )
SCREAMING_SNAKE_CASE = kwargs.pop("num_inference_steps" , _UpperCamelCase )
SCREAMING_SNAKE_CASE = self.dummy_sample
SCREAMING_SNAKE_CASE = 0.1 * sample
SCREAMING_SNAKE_CASE = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
SCREAMING_SNAKE_CASE = self.get_scheduler_config(**_UpperCamelCase )
SCREAMING_SNAKE_CASE = scheduler_class(**_UpperCamelCase )
scheduler.set_timesteps(_UpperCamelCase )
# copy over dummy past residuals
SCREAMING_SNAKE_CASE = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_UpperCamelCase )
SCREAMING_SNAKE_CASE = scheduler_class.from_pretrained(_UpperCamelCase )
new_scheduler.set_timesteps(_UpperCamelCase )
# copy over dummy past residuals
SCREAMING_SNAKE_CASE = dummy_past_residuals[: new_scheduler.config.solver_order]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = sample, sample
for t in range(_UpperCamelCase , time_step + scheduler.config.solver_order + 1 ):
SCREAMING_SNAKE_CASE = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase ).prev_sample
SCREAMING_SNAKE_CASE = new_scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __snake_case( self : Any , _UpperCamelCase : Union[str, Any]=0 , **_UpperCamelCase : Dict ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = dict(self.forward_default_kwargs )
SCREAMING_SNAKE_CASE = kwargs.pop("num_inference_steps" , _UpperCamelCase )
SCREAMING_SNAKE_CASE = self.dummy_sample
SCREAMING_SNAKE_CASE = 0.1 * sample
SCREAMING_SNAKE_CASE = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
SCREAMING_SNAKE_CASE = self.get_scheduler_config()
SCREAMING_SNAKE_CASE = scheduler_class(**_UpperCamelCase )
scheduler.set_timesteps(_UpperCamelCase )
# copy over dummy past residuals (must be after setting timesteps)
SCREAMING_SNAKE_CASE = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_UpperCamelCase )
SCREAMING_SNAKE_CASE = scheduler_class.from_pretrained(_UpperCamelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(_UpperCamelCase )
# copy over dummy past residual (must be after setting timesteps)
SCREAMING_SNAKE_CASE = dummy_past_residuals[: new_scheduler.config.solver_order]
SCREAMING_SNAKE_CASE = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase ).prev_sample
SCREAMING_SNAKE_CASE = new_scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __snake_case( self : List[str] , _UpperCamelCase : Tuple=None , **_UpperCamelCase : List[Any] ) -> str:
'''simple docstring'''
if scheduler is None:
SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE = self.get_scheduler_config(**_UpperCamelCase )
SCREAMING_SNAKE_CASE = scheduler_class(**_UpperCamelCase )
SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE = self.get_scheduler_config(**_UpperCamelCase )
SCREAMING_SNAKE_CASE = scheduler_class(**_UpperCamelCase )
SCREAMING_SNAKE_CASE = 10
SCREAMING_SNAKE_CASE = self.dummy_model()
SCREAMING_SNAKE_CASE = self.dummy_sample_deter
scheduler.set_timesteps(_UpperCamelCase )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE = model(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ).prev_sample
return sample
def __snake_case( self : int ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = dict(self.forward_default_kwargs )
SCREAMING_SNAKE_CASE = kwargs.pop("num_inference_steps" , _UpperCamelCase )
for scheduler_class in self.scheduler_classes:
SCREAMING_SNAKE_CASE = self.get_scheduler_config()
SCREAMING_SNAKE_CASE = scheduler_class(**_UpperCamelCase )
SCREAMING_SNAKE_CASE = self.dummy_sample
SCREAMING_SNAKE_CASE = 0.1 * sample
if num_inference_steps is not None and hasattr(_UpperCamelCase , "set_timesteps" ):
scheduler.set_timesteps(_UpperCamelCase )
elif num_inference_steps is not None and not hasattr(_UpperCamelCase , "set_timesteps" ):
SCREAMING_SNAKE_CASE = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
SCREAMING_SNAKE_CASE = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
SCREAMING_SNAKE_CASE = dummy_past_residuals[: scheduler.config.solver_order]
SCREAMING_SNAKE_CASE = scheduler.timesteps[5]
SCREAMING_SNAKE_CASE = scheduler.timesteps[6]
SCREAMING_SNAKE_CASE = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase ).prev_sample
SCREAMING_SNAKE_CASE = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def __snake_case( self : str ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = UniPCMultistepScheduler(**self.get_scheduler_config() )
SCREAMING_SNAKE_CASE = self.full_loop(scheduler=_UpperCamelCase )
SCREAMING_SNAKE_CASE = torch.mean(torch.abs(_UpperCamelCase ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1e-3
SCREAMING_SNAKE_CASE = DPMSolverSinglestepScheduler.from_config(scheduler.config )
SCREAMING_SNAKE_CASE = DEISMultistepScheduler.from_config(scheduler.config )
SCREAMING_SNAKE_CASE = DPMSolverMultistepScheduler.from_config(scheduler.config )
SCREAMING_SNAKE_CASE = UniPCMultistepScheduler.from_config(scheduler.config )
SCREAMING_SNAKE_CASE = self.full_loop(scheduler=_UpperCamelCase )
SCREAMING_SNAKE_CASE = torch.mean(torch.abs(_UpperCamelCase ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1e-3
def __snake_case( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
for timesteps in [25, 50, 100, 999, 1_000]:
self.check_over_configs(num_train_timesteps=_UpperCamelCase )
def __snake_case( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
self.check_over_configs(thresholding=_UpperCamelCase )
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=_UpperCamelCase , prediction_type=_UpperCamelCase , sample_max_value=_UpperCamelCase , solver_order=_UpperCamelCase , solver_type=_UpperCamelCase , )
def __snake_case( self : Tuple ) -> int:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_UpperCamelCase )
def __snake_case( self : Dict ) -> int:
'''simple docstring'''
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=_UpperCamelCase , solver_type=_UpperCamelCase , prediction_type=_UpperCamelCase , )
SCREAMING_SNAKE_CASE = self.full_loop(
solver_order=_UpperCamelCase , solver_type=_UpperCamelCase , prediction_type=_UpperCamelCase , )
assert not torch.isnan(_UpperCamelCase ).any(), "Samples have nan numbers"
def __snake_case( self : List[str] ) -> Tuple:
'''simple docstring'''
self.check_over_configs(lower_order_final=_UpperCamelCase )
self.check_over_configs(lower_order_final=_UpperCamelCase )
def __snake_case( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1_000]:
self.check_over_forward(num_inference_steps=_UpperCamelCase , time_step=0 )
def __snake_case( self : Tuple ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.full_loop()
SCREAMING_SNAKE_CASE = torch.mean(torch.abs(_UpperCamelCase ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1e-3
def __snake_case( self : List[str] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.full_loop(prediction_type="v_prediction" )
SCREAMING_SNAKE_CASE = torch.mean(torch.abs(_UpperCamelCase ) )
assert abs(result_mean.item() - 0.1_0_1_4 ) < 1e-3
def __snake_case( self : int ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE = self.get_scheduler_config(thresholding=_UpperCamelCase , dynamic_thresholding_ratio=0 )
SCREAMING_SNAKE_CASE = scheduler_class(**_UpperCamelCase )
SCREAMING_SNAKE_CASE = 10
SCREAMING_SNAKE_CASE = self.dummy_model()
SCREAMING_SNAKE_CASE = self.dummy_sample_deter.half()
scheduler.set_timesteps(_UpperCamelCase )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE = model(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ).prev_sample
assert sample.dtype == torch.floataa
def __snake_case( self : List[str] , **_UpperCamelCase : Dict ) -> Union[str, Any]:
'''simple docstring'''
for scheduler_class in self.scheduler_classes:
SCREAMING_SNAKE_CASE = self.get_scheduler_config(**_UpperCamelCase )
SCREAMING_SNAKE_CASE = scheduler_class(**_UpperCamelCase )
scheduler.set_timesteps(scheduler.config.num_train_timesteps )
assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
| 206 | 1 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
lowerCAmelCase__ : Optional[int] = logging.get_logger(__name__)
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = "upernet"
def __init__( self : int ,lowerCamelCase__ : Any=None ,lowerCamelCase__ : List[Any]=512 ,lowerCamelCase__ : str=0.0_2 ,lowerCamelCase__ : List[Any]=[1, 2, 3, 6] ,lowerCamelCase__ : Optional[Any]=True ,lowerCamelCase__ : Optional[Any]=0.4 ,lowerCamelCase__ : int=384 ,lowerCamelCase__ : int=256 ,lowerCamelCase__ : int=1 ,lowerCamelCase__ : Optional[int]=False ,lowerCamelCase__ : Union[str, Any]=255 ,**lowerCamelCase__ : str ,):
super().__init__(**lowerCamelCase__ )
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
UpperCAmelCase__ = CONFIG_MAPPING['resnet'](out_features=['stage1', 'stage2', 'stage3', 'stage4'] )
elif isinstance(lowerCamelCase__ ,lowerCamelCase__ ):
UpperCAmelCase__ = backbone_config.get('model_type' )
UpperCAmelCase__ = CONFIG_MAPPING[backbone_model_type]
UpperCAmelCase__ = config_class.from_dict(lowerCamelCase__ )
UpperCAmelCase__ = backbone_config
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = pool_scales
UpperCAmelCase__ = use_auxiliary_head
UpperCAmelCase__ = auxiliary_loss_weight
UpperCAmelCase__ = auxiliary_in_channels
UpperCAmelCase__ = auxiliary_channels
UpperCAmelCase__ = auxiliary_num_convs
UpperCAmelCase__ = auxiliary_concat_input
UpperCAmelCase__ = loss_ignore_index
def __lowerCAmelCase ( self : Optional[Any] ):
UpperCAmelCase__ = copy.deepcopy(self.__dict__ )
UpperCAmelCase__ = self.backbone_config.to_dict()
UpperCAmelCase__ = self.__class__.model_type
return output
| 98 |
"""simple docstring"""
import gc
import threading
import time
import psutil
import torch
class A__ :
'''simple docstring'''
def __init__( self: str) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = psutil.Process()
__lowerCAmelCase : str = False
def _SCREAMING_SNAKE_CASE ( self: int) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = -1
while True:
__lowerCAmelCase : str = max(self.process.memory_info().rss , self.cpu_memory_peak)
# can't sleep or will not catch the peak right (this comment is here on purpose)
if not self.peak_monitoring:
break
def _SCREAMING_SNAKE_CASE ( self: Tuple) -> int:
"""simple docstring"""
__lowerCAmelCase : List[str] = True
__lowerCAmelCase : str = threading.Thread(target=self.peak_monitor)
__lowerCAmelCase : Tuple = True
self.thread.start()
def _SCREAMING_SNAKE_CASE ( self: Optional[Any]) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Tuple = False
self.thread.join()
return self.cpu_memory_peak
__snake_case : Tuple = PeakCPUMemory()
def _lowercase ( ) -> str:
# Time
__lowerCAmelCase : str = {"time": time.time()}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
__lowerCAmelCase : Optional[Any] = psutil.Process().memory_info().rss
cpu_peak_tracker.start()
# GPU mem
for i in range(torch.cuda.device_count() ):
__lowerCAmelCase : Union[str, Any] = torch.cuda.memory_allocated(__snake_case )
torch.cuda.reset_peak_memory_stats()
return measures
def _lowercase ( __snake_case ) -> Optional[Any]:
# Time
__lowerCAmelCase : str = {"time": time.time() - start_measures["time"]}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
__lowerCAmelCase : str = (psutil.Process().memory_info().rss - start_measures["cpu"]) / 2**20
__lowerCAmelCase : List[str] = (cpu_peak_tracker.stop() - start_measures["cpu"]) / 2**20
# GPU mem
for i in range(torch.cuda.device_count() ):
__lowerCAmelCase : Union[str, Any] = (torch.cuda.memory_allocated(__snake_case ) - start_measures[str(__snake_case )]) / 2**20
__lowerCAmelCase : Any = (torch.cuda.max_memory_allocated(__snake_case ) - start_measures[str(__snake_case )]) / 2**20
return measures
def _lowercase ( __snake_case ,__snake_case ) -> Dict:
print(F"""{description}:""" )
print(F"""- Time: {measures['time']:.2f}s""" )
for i in range(torch.cuda.device_count() ):
print(F"""- GPU {i} allocated: {measures[str(__snake_case )]:.2f}MiB""" )
__lowerCAmelCase : Optional[Any] = measures[F"""{i}-peak"""]
print(F"""- GPU {i} peak: {peak:.2f}MiB""" )
print(F"""- CPU RAM allocated: {measures['cpu']:.2f}MiB""" )
print(F"""- CPU RAM peak: {measures['cpu-peak']:.2f}MiB""" )
| 269 | 0 |
from __future__ import annotations
import pandas as pd
def UpperCAmelCase_ ( __UpperCAmelCase : list[int] , __UpperCAmelCase : list[int] , __UpperCAmelCase : int ) -> list[int]:
SCREAMING_SNAKE_CASE_ = [0] * no_of_processes
SCREAMING_SNAKE_CASE_ = [0] * no_of_processes
# Copy the burst time into remaining_time[]
for i in range(a__ ):
SCREAMING_SNAKE_CASE_ = burst_time[i]
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 9_99_99_99_99
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = False
# Process until all processes are completed
while complete != no_of_processes:
for j in range(a__ ):
if arrival_time[j] <= increment_time and remaining_time[j] > 0:
if remaining_time[j] < minm:
SCREAMING_SNAKE_CASE_ = remaining_time[j]
SCREAMING_SNAKE_CASE_ = j
SCREAMING_SNAKE_CASE_ = True
if not check:
increment_time += 1
continue
remaining_time[short] -= 1
SCREAMING_SNAKE_CASE_ = remaining_time[short]
if minm == 0:
SCREAMING_SNAKE_CASE_ = 9_99_99_99_99
if remaining_time[short] == 0:
complete += 1
SCREAMING_SNAKE_CASE_ = False
# Find finish time of current process
SCREAMING_SNAKE_CASE_ = increment_time + 1
# Calculate waiting time
SCREAMING_SNAKE_CASE_ = finish_time - arrival_time[short]
SCREAMING_SNAKE_CASE_ = finar - burst_time[short]
if waiting_time[short] < 0:
SCREAMING_SNAKE_CASE_ = 0
# Increment time
increment_time += 1
return waiting_time
def UpperCAmelCase_ ( __UpperCAmelCase : list[int] , __UpperCAmelCase : int , __UpperCAmelCase : list[int] ) -> list[int]:
SCREAMING_SNAKE_CASE_ = [0] * no_of_processes
for i in range(a__ ):
SCREAMING_SNAKE_CASE_ = burst_time[i] + waiting_time[i]
return turn_around_time
def UpperCAmelCase_ ( __UpperCAmelCase : list[int] , __UpperCAmelCase : list[int] , __UpperCAmelCase : int ) -> None:
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 0
for i in range(a__ ):
SCREAMING_SNAKE_CASE_ = total_waiting_time + waiting_time[i]
SCREAMING_SNAKE_CASE_ = total_turn_around_time + turn_around_time[i]
print(f"Average waiting time = {total_waiting_time / no_of_processes:.5f}" )
print('Average turn around time =' , total_turn_around_time / no_of_processes )
if __name__ == "__main__":
print('Enter how many process you want to analyze')
lowerCamelCase__ : Union[str, Any] = int(input())
lowerCamelCase__ : List[Any] = [0] * no_of_processes
lowerCamelCase__ : Tuple = [0] * no_of_processes
lowerCamelCase__ : Union[str, Any] = list(range(1, no_of_processes + 1))
for i in range(no_of_processes):
print('Enter the arrival time and burst time for process:--' + str(i + 1))
lowerCamelCase__ , lowerCamelCase__ : Dict = map(int, input().split())
lowerCamelCase__ : Tuple = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
lowerCamelCase__ : List[str] = burst_time
lowerCamelCase__ : Union[str, Any] = no_of_processes
lowerCamelCase__ : List[Any] = waiting_time
lowerCamelCase__ : Dict = calculate_turnaroundtime(bt, n, wt)
calculate_average_times(waiting_time, turn_around_time, no_of_processes)
lowerCamelCase__ : Union[str, Any] = pd.DataFrame(
list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)),
columns=[
'Process',
'BurstTime',
'ArrivalTime',
'WaitingTime',
'TurnAroundTime',
],
)
# Printing the dataFrame
pd.set_option('display.max_rows', fcfs.shape[0] + 1)
print(fcfs)
| 360 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : Any , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[Any]=2 , _lowerCAmelCase : List[str]=True , _lowerCAmelCase : Any=False , _lowerCAmelCase : Tuple=10 , _lowerCAmelCase : Optional[int]=3 , _lowerCAmelCase : Dict=32 * 8 , _lowerCAmelCase : List[str]=32 * 8 , _lowerCAmelCase : List[Any]=4 , _lowerCAmelCase : Optional[Any]=64 , ):
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = is_training
SCREAMING_SNAKE_CASE_ = use_auxiliary_loss
SCREAMING_SNAKE_CASE_ = num_queries
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = min_size
SCREAMING_SNAKE_CASE_ = max_size
SCREAMING_SNAKE_CASE_ = num_labels
SCREAMING_SNAKE_CASE_ = hidden_dim
SCREAMING_SNAKE_CASE_ = hidden_dim
def lowerCAmelCase_ ( self : str ):
SCREAMING_SNAKE_CASE_ = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = torch.ones([self.batch_size, self.min_size, self.max_size] , device=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=_lowerCAmelCase ) > 0.5
).float()
SCREAMING_SNAKE_CASE_ = (torch.rand((self.batch_size, self.num_labels) , device=_lowerCAmelCase ) > 0.5).long()
SCREAMING_SNAKE_CASE_ = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def lowerCAmelCase_ ( self : List[Any] ):
SCREAMING_SNAKE_CASE_ = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
SCREAMING_SNAKE_CASE_ = self.num_queries
SCREAMING_SNAKE_CASE_ = self.num_labels
SCREAMING_SNAKE_CASE_ = [1, 1, 1, 1]
SCREAMING_SNAKE_CASE_ = self.num_channels
SCREAMING_SNAKE_CASE_ = 64
SCREAMING_SNAKE_CASE_ = 128
SCREAMING_SNAKE_CASE_ = self.hidden_dim
SCREAMING_SNAKE_CASE_ = self.hidden_dim
SCREAMING_SNAKE_CASE_ = self.hidden_dim
return config
def lowerCAmelCase_ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ = {'pixel_values': pixel_values, 'pixel_mask': pixel_mask}
return config, inputs_dict
def lowerCAmelCase_ ( self : Optional[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Tuple ):
SCREAMING_SNAKE_CASE_ = output.encoder_hidden_states
SCREAMING_SNAKE_CASE_ = output.pixel_decoder_hidden_states
SCREAMING_SNAKE_CASE_ = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(_lowerCAmelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_lowerCAmelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_lowerCAmelCase ) , config.decoder_layers )
def lowerCAmelCase_ ( self : Optional[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Tuple , _lowerCAmelCase : Any , _lowerCAmelCase : Any=False ):
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = MaskaFormerModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_ = model(pixel_values=_lowerCAmelCase , pixel_mask=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase , output_hidden_states=_lowerCAmelCase )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(_lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase_ ( self : str , _lowerCAmelCase : Dict , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : str , _lowerCAmelCase : Any ):
SCREAMING_SNAKE_CASE_ = MaskaFormerForUniversalSegmentation(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
def comm_check_on_output(_lowerCAmelCase : List[str] ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(pixel_values=_lowerCAmelCase , pixel_mask=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase )
comm_check_on_output(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = model(
pixel_values=_lowerCAmelCase , pixel_mask=_lowerCAmelCase , mask_labels=_lowerCAmelCase , class_labels=_lowerCAmelCase )
comm_check_on_output(_lowerCAmelCase )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
lowercase_ = {"feature-extraction": MaskaFormerModel} if is_torch_available() else {}
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = False
def lowerCAmelCase_ ( self : List[Any] ):
SCREAMING_SNAKE_CASE_ = MaskaFormerModelTester(self )
SCREAMING_SNAKE_CASE_ = ConfigTester(self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase )
def lowerCAmelCase_ ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self : Tuple ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(_lowerCAmelCase , **_lowerCAmelCase , output_hidden_states=_lowerCAmelCase )
def lowerCAmelCase_ ( self : Any ):
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*_lowerCAmelCase )
@unittest.skip(reason='Mask2Former does not use inputs_embeds' )
def lowerCAmelCase_ ( self : Optional[int] ):
pass
@unittest.skip(reason='Mask2Former does not have a get_input_embeddings method' )
def lowerCAmelCase_ ( self : Tuple ):
pass
@unittest.skip(reason='Mask2Former is not a generative model' )
def lowerCAmelCase_ ( self : List[Any] ):
pass
@unittest.skip(reason='Mask2Former does not use token embeddings' )
def lowerCAmelCase_ ( self : Tuple ):
pass
@require_torch_multi_gpu
@unittest.skip(
reason='Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def lowerCAmelCase_ ( self : Any ):
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowerCAmelCase_ ( self : int ):
pass
def lowerCAmelCase_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_ = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_ = ['pixel_values']
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
@slow
def lowerCAmelCase_ ( self : Any ):
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
SCREAMING_SNAKE_CASE_ = MaskaFormerModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
def lowerCAmelCase_ ( self : List[Any] ):
SCREAMING_SNAKE_CASE_ = (self.model_tester.min_size,) * 2
SCREAMING_SNAKE_CASE_ = {
'pixel_values': torch.randn((2, 3, *size) , device=_lowerCAmelCase ),
'mask_labels': torch.randn((2, 10, *size) , device=_lowerCAmelCase ),
'class_labels': torch.zeros(2 , 10 , device=_lowerCAmelCase ).long(),
}
SCREAMING_SNAKE_CASE_ = self.model_tester.get_config()
SCREAMING_SNAKE_CASE_ = MaskaFormerForUniversalSegmentation(_lowerCAmelCase ).to(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = model(**_lowerCAmelCase )
self.assertTrue(outputs.loss is not None )
def lowerCAmelCase_ ( self : List[Any] ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(_lowerCAmelCase , **_lowerCAmelCase , output_hidden_states=_lowerCAmelCase )
def lowerCAmelCase_ ( self : str ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(_lowerCAmelCase ).to(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = model(**_lowerCAmelCase , output_attentions=_lowerCAmelCase )
self.assertTrue(outputs.attentions is not None )
def lowerCAmelCase_ ( self : List[str] ):
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE_ = self.all_model_classes[1]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.train()
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase , mask_labels=_lowerCAmelCase , class_labels=_lowerCAmelCase ).loss
loss.backward()
def lowerCAmelCase_ ( self : str ):
SCREAMING_SNAKE_CASE_ = self.all_model_classes[1]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = model_class(_lowerCAmelCase ).to(_lowerCAmelCase )
model.train()
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase , mask_labels=_lowerCAmelCase , class_labels=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
SCREAMING_SNAKE_CASE_ = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
SCREAMING_SNAKE_CASE_ = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
SCREAMING_SNAKE_CASE_ = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=_lowerCAmelCase )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
lowerCamelCase__ : Tuple = 1E-4
def UpperCAmelCase_ ( ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_vision
@slow
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase_ ( self : Optional[int] ):
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def lowerCAmelCase_ ( self : int ):
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def lowerCAmelCase_ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE_ = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = self.default_image_processor
SCREAMING_SNAKE_CASE_ = prepare_img()
SCREAMING_SNAKE_CASE_ = image_processor(_lowerCAmelCase , return_tensors='pt' ).to(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_lowerCAmelCase , (1, 3, 384, 384) )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(**_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = torch.tensor(
[[-0.2790, -1.0717, -1.1668], [-0.5128, -0.3128, -0.4987], [-0.5832, 0.1971, -0.0197]] ).to(_lowerCAmelCase )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , _lowerCAmelCase , atol=_lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_ = torch.tensor(
[[0.8973, 1.1847, 1.1776], [1.1934, 1.5040, 1.5128], [1.1153, 1.4486, 1.4951]] ).to(_lowerCAmelCase )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , _lowerCAmelCase , atol=_lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_ = torch.tensor(
[[2.1152, 1.7000, -0.8603], [1.5808, 1.8004, -0.9353], [1.6043, 1.7495, -0.5999]] ).to(_lowerCAmelCase )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , _lowerCAmelCase , atol=_lowerCAmelCase ) )
def lowerCAmelCase_ ( self : List[Any] ):
SCREAMING_SNAKE_CASE_ = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_lowerCAmelCase ).eval()
SCREAMING_SNAKE_CASE_ = self.default_image_processor
SCREAMING_SNAKE_CASE_ = prepare_img()
SCREAMING_SNAKE_CASE_ = image_processor(_lowerCAmelCase , return_tensors='pt' ).to(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_lowerCAmelCase , (1, 3, 384, 384) )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(**_lowerCAmelCase )
# masks_queries_logits
SCREAMING_SNAKE_CASE_ = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
SCREAMING_SNAKE_CASE_ = [
[-8.7839, -9.0056, -8.8121],
[-7.4104, -7.0313, -6.5401],
[-6.6105, -6.3427, -6.4675],
]
SCREAMING_SNAKE_CASE_ = torch.tensor(_lowerCAmelCase ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _lowerCAmelCase , atol=_lowerCAmelCase ) )
# class_queries_logits
SCREAMING_SNAKE_CASE_ = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
SCREAMING_SNAKE_CASE_ = torch.tensor(
[
[1.8324, -8.0835, -4.1922],
[0.8450, -9.0050, -3.6053],
[0.3045, -7.7293, -3.0275],
] ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _lowerCAmelCase , atol=_lowerCAmelCase ) )
def lowerCAmelCase_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_lowerCAmelCase ).eval()
SCREAMING_SNAKE_CASE_ = self.default_image_processor
SCREAMING_SNAKE_CASE_ = image_processor(
[np.zeros((3, 800, 1_333) ), np.zeros((3, 800, 1_333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors='pt' , )
SCREAMING_SNAKE_CASE_ = inputs['pixel_values'].to(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = [el.to(_lowerCAmelCase ) for el in inputs['mask_labels']]
SCREAMING_SNAKE_CASE_ = [el.to(_lowerCAmelCase ) for el in inputs['class_labels']]
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(**_lowerCAmelCase )
self.assertTrue(outputs.loss is not None )
| 210 | 0 |
"""simple docstring"""
import math
def _snake_case ( lowercase__ : int = 1_0_0 ) -> int:
'''simple docstring'''
lowerCAmelCase_ :int = sum(i * i for i in range(1 , n + 1 ) )
lowerCAmelCase_ :Tuple = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(F"""{solution() = }""")
| 84 |
"""simple docstring"""
def _snake_case ( lowercase__ : List[Any] , lowercase__ : int , lowercase__ : Optional[int] , lowercase__ : Any ) -> int:
'''simple docstring'''
lowerCAmelCase_ :int = [False] * len(lowercase__ )
lowerCAmelCase_ :str = []
queue.append(lowercase__ )
lowerCAmelCase_ :Any = True
while queue:
lowerCAmelCase_ :Optional[int] = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(lowercase__ )
lowerCAmelCase_ :Union[str, Any] = True
lowerCAmelCase_ :int = u
return visited[t]
def _snake_case ( lowercase__ : int , lowercase__ : Optional[int] , lowercase__ : str ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ :List[Any] = [-1] * (len(lowercase__ ))
lowerCAmelCase_ :str = 0
while bfs(lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
lowerCAmelCase_ :List[str] = float("""Inf""" )
lowerCAmelCase_ :List[str] = sink
while s != source:
# Find the minimum value in select path
lowerCAmelCase_ :Any = min(lowercase__ , graph[parent[s]][s] )
lowerCAmelCase_ :Union[str, Any] = parent[s]
max_flow += path_flow
lowerCAmelCase_ :Tuple = sink
while v != source:
lowerCAmelCase_ :List[str] = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
lowerCAmelCase_ :Union[str, Any] = parent[v]
return max_flow
__UpperCAmelCase = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
__UpperCAmelCase , __UpperCAmelCase = 0, 5
print(ford_fulkerson(graph, source, sink))
| 84 | 1 |
"""simple docstring"""
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
A : List[Any] = "CompVis/stable-diffusion-v1-1"
A : Dict = "CompVis/stable-diffusion-v1-2"
A : Optional[Any] = "CompVis/stable-diffusion-v1-3"
A : str = "CompVis/stable-diffusion-v1-4"
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , __a , __a , __a , __a , __a , __a , __a , __a = True , ):
super()._init_()
__lowerCAmelCase = StableDiffusionPipeline.from_pretrained(__a )
__lowerCAmelCase = StableDiffusionPipeline.from_pretrained(__a )
__lowerCAmelCase = StableDiffusionPipeline.from_pretrained(__a )
__lowerCAmelCase = StableDiffusionPipeline(
vae=__a , text_encoder=__a , tokenizer=__a , unet=__a , scheduler=__a , safety_checker=__a , feature_extractor=__a , requires_safety_checker=__a , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def snake_case ( self ):
return {k: getattr(self , __a ) for k in self.config.keys() if not k.startswith("_" )}
def snake_case ( self , __a = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__lowerCAmelCase = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__a )
def snake_case ( self ):
self.enable_attention_slicing(__a )
@torch.no_grad()
def snake_case ( self , __a , __a = 5_12 , __a = 5_12 , __a = 50 , __a = 7.5 , __a = None , __a = 1 , __a = 0.0 , __a = None , __a = None , __a = "pil" , __a = True , __a = None , __a = 1 , **__a , ):
return self.pipea(
prompt=__a , height=__a , width=__a , num_inference_steps=__a , guidance_scale=__a , negative_prompt=__a , num_images_per_prompt=__a , eta=__a , generator=__a , latents=__a , output_type=__a , return_dict=__a , callback=__a , callback_steps=__a , **__a , )
@torch.no_grad()
def snake_case ( self , __a , __a = 5_12 , __a = 5_12 , __a = 50 , __a = 7.5 , __a = None , __a = 1 , __a = 0.0 , __a = None , __a = None , __a = "pil" , __a = True , __a = None , __a = 1 , **__a , ):
return self.pipea(
prompt=__a , height=__a , width=__a , num_inference_steps=__a , guidance_scale=__a , negative_prompt=__a , num_images_per_prompt=__a , eta=__a , generator=__a , latents=__a , output_type=__a , return_dict=__a , callback=__a , callback_steps=__a , **__a , )
@torch.no_grad()
def snake_case ( self , __a , __a = 5_12 , __a = 5_12 , __a = 50 , __a = 7.5 , __a = None , __a = 1 , __a = 0.0 , __a = None , __a = None , __a = "pil" , __a = True , __a = None , __a = 1 , **__a , ):
return self.pipea(
prompt=__a , height=__a , width=__a , num_inference_steps=__a , guidance_scale=__a , negative_prompt=__a , num_images_per_prompt=__a , eta=__a , generator=__a , latents=__a , output_type=__a , return_dict=__a , callback=__a , callback_steps=__a , **__a , )
@torch.no_grad()
def snake_case ( self , __a , __a = 5_12 , __a = 5_12 , __a = 50 , __a = 7.5 , __a = None , __a = 1 , __a = 0.0 , __a = None , __a = None , __a = "pil" , __a = True , __a = None , __a = 1 , **__a , ):
return self.pipea(
prompt=__a , height=__a , width=__a , num_inference_steps=__a , guidance_scale=__a , negative_prompt=__a , num_images_per_prompt=__a , eta=__a , generator=__a , latents=__a , output_type=__a , return_dict=__a , callback=__a , callback_steps=__a , **__a , )
@torch.no_grad()
def snake_case ( self , __a , __a = 5_12 , __a = 5_12 , __a = 50 , __a = 7.5 , __a = None , __a = 1 , __a = 0.0 , __a = None , __a = None , __a = "pil" , __a = True , __a = None , __a = 1 , **__a , ):
__lowerCAmelCase = "cuda" if torch.cuda.is_available() else "cpu"
self.to(__a )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"`height` and `width` must be divisible by 8 but are {height} and {width}." )
# Get first result from Stable Diffusion Checkpoint v1.1
__lowerCAmelCase = self.textaimg_sda_a(
prompt=__a , height=__a , width=__a , num_inference_steps=__a , guidance_scale=__a , negative_prompt=__a , num_images_per_prompt=__a , eta=__a , generator=__a , latents=__a , output_type=__a , return_dict=__a , callback=__a , callback_steps=__a , **__a , )
# Get first result from Stable Diffusion Checkpoint v1.2
__lowerCAmelCase = self.textaimg_sda_a(
prompt=__a , height=__a , width=__a , num_inference_steps=__a , guidance_scale=__a , negative_prompt=__a , num_images_per_prompt=__a , eta=__a , generator=__a , latents=__a , output_type=__a , return_dict=__a , callback=__a , callback_steps=__a , **__a , )
# Get first result from Stable Diffusion Checkpoint v1.3
__lowerCAmelCase = self.textaimg_sda_a(
prompt=__a , height=__a , width=__a , num_inference_steps=__a , guidance_scale=__a , negative_prompt=__a , num_images_per_prompt=__a , eta=__a , generator=__a , latents=__a , output_type=__a , return_dict=__a , callback=__a , callback_steps=__a , **__a , )
# Get first result from Stable Diffusion Checkpoint v1.4
__lowerCAmelCase = self.textaimg_sda_a(
prompt=__a , height=__a , width=__a , num_inference_steps=__a , guidance_scale=__a , negative_prompt=__a , num_images_per_prompt=__a , eta=__a , generator=__a , latents=__a , output_type=__a , return_dict=__a , callback=__a , callback_steps=__a , **__a , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 259 |
"""simple docstring"""
import string
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = ""
for i in sequence:
__lowerCAmelCase = ord(_UpperCamelCase )
if 65 <= extract <= 90:
output += chr(155 - extract )
elif 97 <= extract <= 122:
output += chr(219 - extract )
else:
output += i
return output
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = string.ascii_letters
__lowerCAmelCase = string.ascii_lowercase[::-1] + string.ascii_uppercase[::-1]
return "".join(
letters_reversed[letters.index(_UpperCamelCase )] if c in letters else c for c in sequence )
def _lowerCamelCase ( ):
'''simple docstring'''
from timeit import timeit
print("Running performance benchmarks..." )
__lowerCAmelCase = "from string import printable ; from __main__ import atbash, atbash_slow"
print(f"> atbash_slow(): {timeit('atbash_slow(printable)' , setup=_UpperCamelCase )} seconds" )
print(f"> atbash(): {timeit('atbash(printable)' , setup=_UpperCamelCase )} seconds" )
if __name__ == "__main__":
for example in ("ABCDEFGH", "123GGjj", "testStringtest", "with space"):
print(f'''{example} encrypted in atbash: {atbash(example)}''')
benchmark()
| 259 | 1 |
'''simple docstring'''
def a ( __a , __a ) -> float:
'''simple docstring'''
if digit_amount > 0:
return round(number - int(__a ) , __a )
return number - int(__a )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.345, 1))
print(decimal_isolate(35.345, 2))
print(decimal_isolate(35.345, 3))
print(decimal_isolate(-14.789, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.123, 1))
print(decimal_isolate(-14.123, 2))
print(decimal_isolate(-14.123, 3))
| 97 |
'''simple docstring'''
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
__UpperCamelCase : Dict = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError("All input parameters must be positive" )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError("Relative densities cannot be greater than one" )
else:
__UpperCamelCase : str = 1 - (matter_density + radiation_density + dark_energy)
__UpperCamelCase : List[Any] = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
__UpperCamelCase : Optional[Any] = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
_lowerCAmelCase = 0.3
print(
hubble_parameter(
hubble_constant=68.3,
radiation_density=1E-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 298 | 0 |
'''simple docstring'''
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class _UpperCAmelCase :
def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=13,__SCREAMING_SNAKE_CASE=7,__SCREAMING_SNAKE_CASE=6,__SCREAMING_SNAKE_CASE=17,__SCREAMING_SNAKE_CASE=23,__SCREAMING_SNAKE_CASE=11,__SCREAMING_SNAKE_CASE=True,):
'''simple docstring'''
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = seq_length
__lowerCAmelCase = act_dim
__lowerCAmelCase = state_dim
__lowerCAmelCase = hidden_size
__lowerCAmelCase = max_length
__lowerCAmelCase = is_training
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = floats_tensor((self.batch_size, self.seq_length, self.state_dim) )
__lowerCAmelCase = floats_tensor((self.batch_size, self.seq_length, self.act_dim) )
__lowerCAmelCase = floats_tensor((self.batch_size, self.seq_length, 1) )
__lowerCAmelCase = floats_tensor((self.batch_size, self.seq_length, 1) )
__lowerCAmelCase = ids_tensor((self.batch_size, self.seq_length),vocab_size=10_00 )
__lowerCAmelCase = random_attention_mask((self.batch_size, self.seq_length) )
__lowerCAmelCase = self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def lowerCamelCase__ ( self ):
'''simple docstring'''
return DecisionTransformerConfig(
batch_size=self.batch_size,seq_length=self.seq_length,act_dim=self.act_dim,state_dim=self.state_dim,hidden_size=self.hidden_size,max_length=self.max_length,)
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,):
'''simple docstring'''
__lowerCAmelCase = DecisionTransformerModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase = model(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.state_preds.shape,states.shape )
self.parent.assertEqual(result.action_preds.shape,actions.shape )
self.parent.assertEqual(result.return_preds.shape,returns_to_go.shape )
self.parent.assertEqual(
result.last_hidden_state.shape,(self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) = config_and_inputs
__lowerCAmelCase = {
"""states""": states,
"""actions""": actions,
"""rewards""": rewards,
"""returns_to_go""": returns_to_go,
"""timesteps""": timesteps,
"""attention_mask""": attention_mask,
}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
a : Any =(DecisionTransformerModel,) if is_torch_available() else ()
a : Tuple =()
a : Dict ={"""feature-extraction""": DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
a : List[Any] =False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
a : Dict =False
a : str =False
a : Optional[int] =False
a : int =False
a : Tuple =False
a : List[str] =False
a : Any =False
a : Dict =False
a : List[Any] =False
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = DecisionTransformerModelTester(self )
__lowerCAmelCase = ConfigTester(self,config_class=__SCREAMING_SNAKE_CASE,hidden_size=37 )
def lowerCamelCase__ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
@slow
def lowerCamelCase__ ( self ):
'''simple docstring'''
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase = DecisionTransformerModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase = [*signature.parameters.keys()]
__lowerCAmelCase = [
"""states""",
"""actions""",
"""rewards""",
"""returns_to_go""",
"""timesteps""",
"""attention_mask""",
]
self.assertListEqual(arg_names[: len(__SCREAMING_SNAKE_CASE )],__SCREAMING_SNAKE_CASE )
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
@slow
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = 2 # number of steps of autoregressive prediction we will perform
__lowerCAmelCase = 10 # defined by the RL environment, may be normalized
__lowerCAmelCase = DecisionTransformerModel.from_pretrained("""edbeeching/decision-transformer-gym-hopper-expert""" )
__lowerCAmelCase = model.to(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = model.config
torch.manual_seed(0 )
__lowerCAmelCase = torch.randn(1,1,config.state_dim ).to(device=__SCREAMING_SNAKE_CASE,dtype=torch.floataa ) # env.reset()
__lowerCAmelCase = torch.tensor(
[[0.24_2793, -0.2869_3074, 0.874_2613], [0.6781_5274, -0.0810_1085, -0.1295_2147]],device=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = torch.tensor(__SCREAMING_SNAKE_CASE,device=__SCREAMING_SNAKE_CASE,dtype=torch.floataa ).reshape(1,1,1 )
__lowerCAmelCase = state
__lowerCAmelCase = torch.zeros(1,0,config.act_dim,device=__SCREAMING_SNAKE_CASE,dtype=torch.floataa )
__lowerCAmelCase = torch.zeros(1,0,device=__SCREAMING_SNAKE_CASE,dtype=torch.floataa )
__lowerCAmelCase = torch.tensor(0,device=__SCREAMING_SNAKE_CASE,dtype=torch.long ).reshape(1,1 )
for step in range(__SCREAMING_SNAKE_CASE ):
__lowerCAmelCase = torch.cat([actions, torch.zeros(1,1,config.act_dim,device=__SCREAMING_SNAKE_CASE )],dim=1 )
__lowerCAmelCase = torch.cat([rewards, torch.zeros(1,1,device=__SCREAMING_SNAKE_CASE )],dim=1 )
__lowerCAmelCase = torch.ones(1,states.shape[1] ).to(dtype=torch.long,device=states.device )
with torch.no_grad():
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = model(
states=__SCREAMING_SNAKE_CASE,actions=__SCREAMING_SNAKE_CASE,rewards=__SCREAMING_SNAKE_CASE,returns_to_go=__SCREAMING_SNAKE_CASE,timesteps=__SCREAMING_SNAKE_CASE,attention_mask=__SCREAMING_SNAKE_CASE,return_dict=__SCREAMING_SNAKE_CASE,)
self.assertEqual(action_pred.shape,actions.shape )
self.assertTrue(torch.allclose(action_pred[0, -1],expected_outputs[step],atol=1e-4 ) )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = ( # env.step(action)
torch.randn(1,1,config.state_dim ).to(device=__SCREAMING_SNAKE_CASE,dtype=torch.floataa ),
1.0,
False,
{},
)
__lowerCAmelCase = action_pred[0, -1]
__lowerCAmelCase = torch.cat([states, state],dim=1 )
__lowerCAmelCase = returns_to_go[0, -1] - reward
__lowerCAmelCase = torch.cat([returns_to_go, pred_return.reshape(1,1,1 )],dim=1 )
__lowerCAmelCase = torch.cat(
[timesteps, torch.ones((1, 1),device=__SCREAMING_SNAKE_CASE,dtype=torch.long ) * (step + 1)],dim=1 )
| 46 |
'''simple docstring'''
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in_graph
class _UpperCAmelCase ( nn.Module ):
def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=0.0,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = "geglu",__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = False,__SCREAMING_SNAKE_CASE = False,__SCREAMING_SNAKE_CASE = False,__SCREAMING_SNAKE_CASE = False,__SCREAMING_SNAKE_CASE = True,__SCREAMING_SNAKE_CASE = "layer_norm",__SCREAMING_SNAKE_CASE = False,):
'''simple docstring'''
super().__init__()
__lowerCAmelCase = only_cross_attention
__lowerCAmelCase = (num_embeds_ada_norm is not None) and norm_type == """ada_norm_zero"""
__lowerCAmelCase = (num_embeds_ada_norm is not None) and norm_type == """ada_norm"""
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
f'`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to'
f' define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.' )
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
__lowerCAmelCase = AdaLayerNorm(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
elif self.use_ada_layer_norm_zero:
__lowerCAmelCase = AdaLayerNormZero(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase = nn.LayerNorm(__SCREAMING_SNAKE_CASE,elementwise_affine=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = Attention(
query_dim=__SCREAMING_SNAKE_CASE,heads=__SCREAMING_SNAKE_CASE,dim_head=__SCREAMING_SNAKE_CASE,dropout=__SCREAMING_SNAKE_CASE,bias=__SCREAMING_SNAKE_CASE,cross_attention_dim=cross_attention_dim if only_cross_attention else None,upcast_attention=__SCREAMING_SNAKE_CASE,)
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
__lowerCAmelCase = (
AdaLayerNorm(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
if self.use_ada_layer_norm
else nn.LayerNorm(__SCREAMING_SNAKE_CASE,elementwise_affine=__SCREAMING_SNAKE_CASE )
)
__lowerCAmelCase = Attention(
query_dim=__SCREAMING_SNAKE_CASE,cross_attention_dim=cross_attention_dim if not double_self_attention else None,heads=__SCREAMING_SNAKE_CASE,dim_head=__SCREAMING_SNAKE_CASE,dropout=__SCREAMING_SNAKE_CASE,bias=__SCREAMING_SNAKE_CASE,upcast_attention=__SCREAMING_SNAKE_CASE,) # is self-attn if encoder_hidden_states is none
else:
__lowerCAmelCase = None
__lowerCAmelCase = None
# 3. Feed-forward
__lowerCAmelCase = nn.LayerNorm(__SCREAMING_SNAKE_CASE,elementwise_affine=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = FeedForward(__SCREAMING_SNAKE_CASE,dropout=__SCREAMING_SNAKE_CASE,activation_fn=__SCREAMING_SNAKE_CASE,final_dropout=__SCREAMING_SNAKE_CASE )
# let chunk size default to None
__lowerCAmelCase = None
__lowerCAmelCase = 0
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = chunk_size
__lowerCAmelCase = dim
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = None,):
'''simple docstring'''
if self.use_ada_layer_norm:
__lowerCAmelCase = self.norma(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
elif self.use_ada_layer_norm_zero:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self.norma(
__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,hidden_dtype=hidden_states.dtype )
else:
__lowerCAmelCase = self.norma(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = cross_attention_kwargs if cross_attention_kwargs is not None else {}
__lowerCAmelCase = self.attna(
__SCREAMING_SNAKE_CASE,encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None,attention_mask=__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE,)
if self.use_ada_layer_norm_zero:
__lowerCAmelCase = gate_msa.unsqueeze(1 ) * attn_output
__lowerCAmelCase = attn_output + hidden_states
# 2. Cross-Attention
if self.attna is not None:
__lowerCAmelCase = (
self.norma(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) if self.use_ada_layer_norm else self.norma(__SCREAMING_SNAKE_CASE )
)
__lowerCAmelCase = self.attna(
__SCREAMING_SNAKE_CASE,encoder_hidden_states=__SCREAMING_SNAKE_CASE,attention_mask=__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE,)
__lowerCAmelCase = attn_output + hidden_states
# 3. Feed-forward
__lowerCAmelCase = self.norma(__SCREAMING_SNAKE_CASE )
if self.use_ada_layer_norm_zero:
__lowerCAmelCase = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
f'`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.' )
__lowerCAmelCase = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
__lowerCAmelCase = torch.cat(
[self.ff(__SCREAMING_SNAKE_CASE ) for hid_slice in norm_hidden_states.chunk(__SCREAMING_SNAKE_CASE,dim=self._chunk_dim )],dim=self._chunk_dim,)
else:
__lowerCAmelCase = self.ff(__SCREAMING_SNAKE_CASE )
if self.use_ada_layer_norm_zero:
__lowerCAmelCase = gate_mlp.unsqueeze(1 ) * ff_output
__lowerCAmelCase = ff_output + hidden_states
return hidden_states
class _UpperCAmelCase ( nn.Module ):
def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = 4,__SCREAMING_SNAKE_CASE = 0.0,__SCREAMING_SNAKE_CASE = "geglu",__SCREAMING_SNAKE_CASE = False,):
'''simple docstring'''
super().__init__()
__lowerCAmelCase = int(dim * mult )
__lowerCAmelCase = dim_out if dim_out is not None else dim
if activation_fn == "gelu":
__lowerCAmelCase = GELU(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
if activation_fn == "gelu-approximate":
__lowerCAmelCase = GELU(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,approximate="""tanh""" )
elif activation_fn == "geglu":
__lowerCAmelCase = GEGLU(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
elif activation_fn == "geglu-approximate":
__lowerCAmelCase = ApproximateGELU(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = nn.ModuleList([] )
# project in
self.net.append(__SCREAMING_SNAKE_CASE )
# project dropout
self.net.append(nn.Dropout(__SCREAMING_SNAKE_CASE ) )
# project out
self.net.append(nn.Linear(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) )
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(__SCREAMING_SNAKE_CASE ) )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
for module in self.net:
__lowerCAmelCase = module(__SCREAMING_SNAKE_CASE )
return hidden_states
class _UpperCAmelCase ( nn.Module ):
def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = "none" ):
'''simple docstring'''
super().__init__()
__lowerCAmelCase = nn.Linear(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = approximate
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if gate.device.type != "mps":
return F.gelu(__SCREAMING_SNAKE_CASE,approximate=self.approximate )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ),approximate=self.approximate ).to(dtype=gate.dtype )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = self.proj(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.gelu(__SCREAMING_SNAKE_CASE )
return hidden_states
class _UpperCAmelCase ( nn.Module ):
def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
super().__init__()
__lowerCAmelCase = nn.Linear(__SCREAMING_SNAKE_CASE,dim_out * 2 )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if gate.device.type != "mps":
return F.gelu(__SCREAMING_SNAKE_CASE )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase = self.proj(__SCREAMING_SNAKE_CASE ).chunk(2,dim=-1 )
return hidden_states * self.gelu(__SCREAMING_SNAKE_CASE )
class _UpperCAmelCase ( nn.Module ):
def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
super().__init__()
__lowerCAmelCase = nn.Linear(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = self.proj(__SCREAMING_SNAKE_CASE )
return x * torch.sigmoid(1.702 * x )
class _UpperCAmelCase ( nn.Module ):
def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
super().__init__()
__lowerCAmelCase = nn.Embedding(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = nn.SiLU()
__lowerCAmelCase = nn.Linear(__SCREAMING_SNAKE_CASE,embedding_dim * 2 )
__lowerCAmelCase = nn.LayerNorm(__SCREAMING_SNAKE_CASE,elementwise_affine=__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = self.linear(self.silu(self.emb(__SCREAMING_SNAKE_CASE ) ) )
__lowerCAmelCase , __lowerCAmelCase = torch.chunk(__SCREAMING_SNAKE_CASE,2 )
__lowerCAmelCase = self.norm(__SCREAMING_SNAKE_CASE ) * (1 + scale) + shift
return x
class _UpperCAmelCase ( nn.Module ):
def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
super().__init__()
__lowerCAmelCase = CombinedTimestepLabelEmbeddings(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = nn.SiLU()
__lowerCAmelCase = nn.Linear(__SCREAMING_SNAKE_CASE,6 * embedding_dim,bias=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = nn.LayerNorm(__SCREAMING_SNAKE_CASE,elementwise_affine=__SCREAMING_SNAKE_CASE,eps=1e-6 )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=None ):
'''simple docstring'''
__lowerCAmelCase = self.linear(self.silu(self.emb(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,hidden_dtype=__SCREAMING_SNAKE_CASE ) ) )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = emb.chunk(6,dim=1 )
__lowerCAmelCase = self.norm(__SCREAMING_SNAKE_CASE ) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class _UpperCAmelCase ( nn.Module ):
def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = 1e-5 ):
'''simple docstring'''
super().__init__()
__lowerCAmelCase = num_groups
__lowerCAmelCase = eps
if act_fn is None:
__lowerCAmelCase = None
else:
__lowerCAmelCase = get_activation(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = nn.Linear(__SCREAMING_SNAKE_CASE,out_dim * 2 )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if self.act:
__lowerCAmelCase = self.act(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.linear(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = emb[:, :, None, None]
__lowerCAmelCase , __lowerCAmelCase = emb.chunk(2,dim=1 )
__lowerCAmelCase = F.group_norm(__SCREAMING_SNAKE_CASE,self.num_groups,eps=self.eps )
__lowerCAmelCase = x * (1 + scale) + shift
return x
| 46 | 1 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class lowerCamelCase ( _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
_A : Union[str, Any] = BlenderbotSmallTokenizer
_A : List[Any] = False
def lowerCAmelCase_ ( self: Any ) -> Dict:
super().setUp()
snake_case_ :Dict = ["""__start__""", """adapt""", """act""", """ap@@""", """te""", """__end__""", """__unk__"""]
snake_case_ :Dict = dict(zip(snake_case , range(len(snake_case ) ) ) )
snake_case_ :Union[str, Any] = ["""#version: 0.2""", """a p""", """t e</w>""", """ap t</w>""", """a d""", """ad apt</w>""", """a c""", """ac t</w>""", """"""]
snake_case_ :Dict = {"""unk_token""": """__unk__""", """bos_token""": """__start__""", """eos_token""": """__end__"""}
snake_case_ :Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
snake_case_ :str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(snake_case ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(snake_case ) )
def lowerCAmelCase_ ( self: List[Any] , **snake_case: Optional[int] ) -> str:
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **snake_case )
def lowerCAmelCase_ ( self: Any , snake_case: Optional[Any] ) -> Any:
snake_case_ :Any = """adapt act apte"""
snake_case_ :int = """adapt act apte"""
return input_text, output_text
def lowerCAmelCase_ ( self: Optional[int] ) -> Optional[int]:
snake_case_ :Any = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
snake_case_ :Dict = """adapt act apte"""
snake_case_ :Optional[Any] = ["""adapt""", """act""", """ap@@""", """te"""]
snake_case_ :List[str] = tokenizer.tokenize(snake_case )
self.assertListEqual(snake_case , snake_case )
snake_case_ :int = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
snake_case_ :str = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case ) , snake_case )
def lowerCAmelCase_ ( self: Optional[Any] ) -> Union[str, Any]:
snake_case_ :List[Any] = BlenderbotSmallTokenizer.from_pretrained("""facebook/blenderbot-90M""" )
assert tok("""sam""" ).input_ids == [1_384]
snake_case_ :str = """I am a small frog."""
snake_case_ :Dict = tok([src_text] , padding=snake_case , truncation=snake_case )["""input_ids"""]
snake_case_ :Optional[Any] = tok.batch_decode(snake_case , skip_special_tokens=snake_case , clean_up_tokenization_spaces=snake_case )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def lowerCAmelCase_ ( self: str ) -> str:
snake_case_ :Optional[int] = BlenderbotSmallTokenizer.from_pretrained("""facebook/blenderbot-90M""" )
snake_case_ :str = """I am a small frog ."""
snake_case_ :Dict = """."""
snake_case_ :int = tok(snake_case )["""input_ids"""]
snake_case_ :List[Any] = tok(snake_case )["""input_ids"""]
assert encoded[-1] == encoded_dot[0]
| 66 |
'''simple docstring'''
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
a_ : Any = logging.get_logger(__name__)
@add_end_docstrings(lowerCamelCase__ )
class __UpperCamelCase ( lowerCamelCase__ ):
def __init__( self, **lowerCAmelCase ):
"""simple docstring"""
super().__init__(**lowerCAmelCase )
if self.framework != "pt":
raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' )
# No specific FOR_XXX available yet
def __call__( self, lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
return super().__call__(lowerCAmelCase, **lowerCAmelCase )
def lowercase__ ( self, **lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ ={}
if "candidate_labels" in kwargs:
lowerCamelCase_ =kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
lowerCamelCase_ =kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=None, lowerCAmelCase="This is a sound of {}." ):
"""simple docstring"""
if isinstance(lowerCAmelCase, lowerCAmelCase ):
if audio.startswith('''http://''' ) or audio.startswith('''https://''' ):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
lowerCamelCase_ =requests.get(lowerCAmelCase ).content
else:
with open(lowerCAmelCase, '''rb''' ) as f:
lowerCamelCase_ =f.read()
if isinstance(lowerCAmelCase, lowerCAmelCase ):
lowerCamelCase_ =ffmpeg_read(lowerCAmelCase, self.feature_extractor.sampling_rate )
if not isinstance(lowerCAmelCase, np.ndarray ):
raise ValueError('''We expect a numpy ndarray as input''' )
if len(audio.shape ) != 1:
raise ValueError('''We expect a single channel audio input for ZeroShotAudioClassificationPipeline''' )
lowerCamelCase_ =self.feature_extractor(
[audio], sampling_rate=self.feature_extractor.sampling_rate, return_tensors='''pt''' )
lowerCamelCase_ =candidate_labels
lowerCamelCase_ =[hypothesis_template.format(lowerCAmelCase ) for x in candidate_labels]
lowerCamelCase_ =self.tokenizer(lowerCAmelCase, return_tensors=self.framework, padding=lowerCAmelCase )
lowerCamelCase_ =[text_inputs]
return inputs
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =model_inputs.pop('''candidate_labels''' )
lowerCamelCase_ =model_inputs.pop('''text_inputs''' )
if isinstance(text_inputs[0], lowerCAmelCase ):
lowerCamelCase_ =text_inputs[0]
else:
# Batching case.
lowerCamelCase_ =text_inputs[0][0]
lowerCamelCase_ =self.model(**lowerCAmelCase, **lowerCAmelCase )
lowerCamelCase_ ={
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_audio,
}
return model_outputs
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =model_outputs.pop('''candidate_labels''' )
lowerCamelCase_ =model_outputs['''logits'''][0]
if self.framework == "pt":
lowerCamelCase_ =logits.softmax(dim=0 )
lowerCamelCase_ =probs.tolist()
else:
raise ValueError('''`tf` framework not supported.''' )
lowerCamelCase_ =[
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(lowerCAmelCase, lowerCAmelCase ), key=lambda lowerCAmelCase : -x[0] )
]
return result
| 75 | 0 |
"""simple docstring"""
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class lowerCAmelCase ( lowerCamelCase_ ):
'''simple docstring'''
def __init__( self , lowerCAmelCase__=0.01 , lowerCAmelCase__=1_000 ) -> Dict:
SCREAMING_SNAKE_CASE = p_stop
SCREAMING_SNAKE_CASE = max_length
def __iter__( self ) -> List[Any]:
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = False
while not stop and count < self.max_length:
yield count
count += 1
SCREAMING_SNAKE_CASE = random.random() < self.p_stop
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False , lowerCAmelCase__=True ) -> List[str]:
SCREAMING_SNAKE_CASE = [
BatchSamplerShard(lowerCAmelCase__ , 2 , lowerCAmelCase__ , split_batches=lowerCAmelCase__ , even_batches=lowerCAmelCase__ )
for i in range(2 )
]
SCREAMING_SNAKE_CASE = [list(lowerCAmelCase__ ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(lowerCAmelCase__ ) for shard in batch_sampler_shards] , [len(lowerCAmelCase__ ) for e in expected] )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def __A ( self ) -> str:
# Check the shards when the dataset is a round multiple of total batch size.
SCREAMING_SNAKE_CASE = BatchSampler(range(24 ) , batch_size=3 , drop_last=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = BatchSampler(range(24 ) , batch_size=3 , drop_last=lowerCAmelCase__ )
# Expected shouldn't change
self.check_batch_sampler_shards(lowerCAmelCase__ , lowerCAmelCase__ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
SCREAMING_SNAKE_CASE = BatchSampler(range(21 ) , batch_size=3 , drop_last=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = BatchSampler(range(21 ) , batch_size=3 , drop_last=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowerCAmelCase__ , lowerCAmelCase__ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
SCREAMING_SNAKE_CASE = BatchSampler(range(22 ) , batch_size=3 , drop_last=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = BatchSampler(range(22 ) , batch_size=3 , drop_last=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowerCAmelCase__ , lowerCAmelCase__ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
SCREAMING_SNAKE_CASE = BatchSampler(range(20 ) , batch_size=3 , drop_last=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = BatchSampler(range(20 ) , batch_size=3 , drop_last=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowerCAmelCase__ , lowerCAmelCase__ )
# Check the shards when the dataset is very small.
SCREAMING_SNAKE_CASE = BatchSampler(range(2 ) , batch_size=3 , drop_last=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = BatchSampler(range(2 ) , batch_size=3 , drop_last=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = [[], []]
self.check_batch_sampler_shards(lowerCAmelCase__ , lowerCAmelCase__ )
def __A ( self ) -> str:
# Check the shards when the dataset is a round multiple of batch size.
SCREAMING_SNAKE_CASE = BatchSampler(range(24 ) , batch_size=4 , drop_last=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(lowerCAmelCase__ , lowerCAmelCase__ , split_batches=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = BatchSampler(range(24 ) , batch_size=4 , drop_last=lowerCAmelCase__ )
# Expected shouldn't change
self.check_batch_sampler_shards(lowerCAmelCase__ , lowerCAmelCase__ , split_batches=lowerCAmelCase__ )
# Check the shards when the dataset is not a round multiple of batch size.
SCREAMING_SNAKE_CASE = BatchSampler(range(22 ) , batch_size=4 , drop_last=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(lowerCAmelCase__ , lowerCAmelCase__ , split_batches=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = BatchSampler(range(22 ) , batch_size=4 , drop_last=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(lowerCAmelCase__ , lowerCAmelCase__ , split_batches=lowerCAmelCase__ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
SCREAMING_SNAKE_CASE = BatchSampler(range(21 ) , batch_size=4 , drop_last=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(lowerCAmelCase__ , lowerCAmelCase__ , split_batches=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = BatchSampler(range(21 ) , batch_size=4 , drop_last=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(lowerCAmelCase__ , lowerCAmelCase__ , split_batches=lowerCAmelCase__ )
# Check the shards when the dataset is very small.
SCREAMING_SNAKE_CASE = BatchSampler(range(2 ) , batch_size=4 , drop_last=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(lowerCAmelCase__ , lowerCAmelCase__ , split_batches=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = BatchSampler(range(2 ) , batch_size=4 , drop_last=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = [[], []]
self.check_batch_sampler_shards(lowerCAmelCase__ , lowerCAmelCase__ , split_batches=lowerCAmelCase__ )
def __A ( self ) -> Union[str, Any]:
# Check the shards when the dataset is a round multiple of total batch size.
SCREAMING_SNAKE_CASE = BatchSampler(range(24 ) , batch_size=3 , drop_last=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(lowerCAmelCase__ , lowerCAmelCase__ , even_batches=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = BatchSampler(range(24 ) , batch_size=3 , drop_last=lowerCAmelCase__ )
# Expected shouldn't change
self.check_batch_sampler_shards(lowerCAmelCase__ , lowerCAmelCase__ , even_batches=lowerCAmelCase__ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
SCREAMING_SNAKE_CASE = BatchSampler(range(21 ) , batch_size=3 , drop_last=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowerCAmelCase__ , lowerCAmelCase__ , even_batches=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = BatchSampler(range(21 ) , batch_size=3 , drop_last=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowerCAmelCase__ , lowerCAmelCase__ , even_batches=lowerCAmelCase__ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
SCREAMING_SNAKE_CASE = BatchSampler(range(22 ) , batch_size=3 , drop_last=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(lowerCAmelCase__ , lowerCAmelCase__ , even_batches=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = BatchSampler(range(22 ) , batch_size=3 , drop_last=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowerCAmelCase__ , lowerCAmelCase__ , even_batches=lowerCAmelCase__ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
SCREAMING_SNAKE_CASE = BatchSampler(range(20 ) , batch_size=3 , drop_last=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowerCAmelCase__ , lowerCAmelCase__ , even_batches=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = BatchSampler(range(20 ) , batch_size=3 , drop_last=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowerCAmelCase__ , lowerCAmelCase__ , even_batches=lowerCAmelCase__ )
# Check the shards when the dataset is very small.
SCREAMING_SNAKE_CASE = BatchSampler(range(2 ) , batch_size=3 , drop_last=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = [[[0, 1]], []]
self.check_batch_sampler_shards(lowerCAmelCase__ , lowerCAmelCase__ , even_batches=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = BatchSampler(range(2 ) , batch_size=3 , drop_last=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = [[], []]
self.check_batch_sampler_shards(lowerCAmelCase__ , lowerCAmelCase__ , even_batches=lowerCAmelCase__ )
def __A ( self ) -> Optional[int]:
# Check the shards when the dataset is a round multiple of batch size.
SCREAMING_SNAKE_CASE = BatchSampler(range(24 ) , batch_size=4 , drop_last=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(lowerCAmelCase__ , lowerCAmelCase__ , split_batches=lowerCAmelCase__ , even_batches=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = BatchSampler(range(24 ) , batch_size=4 , drop_last=lowerCAmelCase__ )
# Expected shouldn't change
self.check_batch_sampler_shards(lowerCAmelCase__ , lowerCAmelCase__ , split_batches=lowerCAmelCase__ , even_batches=lowerCAmelCase__ )
# Check the shards when the dataset is not a round multiple of batch size.
SCREAMING_SNAKE_CASE = BatchSampler(range(22 ) , batch_size=4 , drop_last=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(lowerCAmelCase__ , lowerCAmelCase__ , split_batches=lowerCAmelCase__ , even_batches=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = BatchSampler(range(22 ) , batch_size=4 , drop_last=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(lowerCAmelCase__ , lowerCAmelCase__ , split_batches=lowerCAmelCase__ , even_batches=lowerCAmelCase__ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
SCREAMING_SNAKE_CASE = BatchSampler(range(21 ) , batch_size=4 , drop_last=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(lowerCAmelCase__ , lowerCAmelCase__ , split_batches=lowerCAmelCase__ , even_batches=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = BatchSampler(range(21 ) , batch_size=4 , drop_last=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(lowerCAmelCase__ , lowerCAmelCase__ , split_batches=lowerCAmelCase__ , even_batches=lowerCAmelCase__ )
# Check the shards when the dataset is very small.
SCREAMING_SNAKE_CASE = BatchSampler(range(2 ) , batch_size=4 , drop_last=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = [[[0, 1]], []]
self.check_batch_sampler_shards(lowerCAmelCase__ , lowerCAmelCase__ , split_batches=lowerCAmelCase__ , even_batches=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = BatchSampler(range(2 ) , batch_size=4 , drop_last=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = [[], []]
self.check_batch_sampler_shards(lowerCAmelCase__ , lowerCAmelCase__ , split_batches=lowerCAmelCase__ , even_batches=lowerCAmelCase__ )
def __A ( self ) -> Dict:
SCREAMING_SNAKE_CASE = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
SCREAMING_SNAKE_CASE = [BatchSamplerShard(lowerCAmelCase__ , 2 , lowerCAmelCase__ , even_batches=lowerCAmelCase__ ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] )
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False , lowerCAmelCase__=2 , lowerCAmelCase__=False ) -> List[str]:
random.seed(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = list(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = [
IterableDatasetShard(
lowerCAmelCase__ , batch_size=lowerCAmelCase__ , drop_last=lowerCAmelCase__ , num_processes=lowerCAmelCase__ , process_index=lowerCAmelCase__ , split_batches=lowerCAmelCase__ , )
for i in range(lowerCAmelCase__ )
]
SCREAMING_SNAKE_CASE = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(lowerCAmelCase__ )
iterable_dataset_lists.append(list(lowerCAmelCase__ ) )
SCREAMING_SNAKE_CASE = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
SCREAMING_SNAKE_CASE = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) )
self.assertTrue(len(lowerCAmelCase__ ) % shard_batch_size == 0 )
SCREAMING_SNAKE_CASE = []
for idx in range(0 , len(lowerCAmelCase__ ) , lowerCAmelCase__ ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(lowerCAmelCase__ ) < len(lowerCAmelCase__ ):
reference += reference
self.assertListEqual(lowerCAmelCase__ , reference[: len(lowerCAmelCase__ )] )
def __A ( self ) -> Tuple:
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = RandomIterableDataset()
self.check_iterable_dataset_shards(lowerCAmelCase__ , lowerCAmelCase__ , batch_size=4 , drop_last=lowerCAmelCase__ , split_batches=lowerCAmelCase__ )
self.check_iterable_dataset_shards(lowerCAmelCase__ , lowerCAmelCase__ , batch_size=4 , drop_last=lowerCAmelCase__ , split_batches=lowerCAmelCase__ )
self.check_iterable_dataset_shards(lowerCAmelCase__ , lowerCAmelCase__ , batch_size=4 , drop_last=lowerCAmelCase__ , split_batches=lowerCAmelCase__ )
self.check_iterable_dataset_shards(lowerCAmelCase__ , lowerCAmelCase__ , batch_size=4 , drop_last=lowerCAmelCase__ , split_batches=lowerCAmelCase__ )
# Edge case with a very small dataset
SCREAMING_SNAKE_CASE = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(lowerCAmelCase__ , lowerCAmelCase__ , batch_size=4 , drop_last=lowerCAmelCase__ , split_batches=lowerCAmelCase__ )
self.check_iterable_dataset_shards(lowerCAmelCase__ , lowerCAmelCase__ , batch_size=4 , drop_last=lowerCAmelCase__ , split_batches=lowerCAmelCase__ )
self.check_iterable_dataset_shards(lowerCAmelCase__ , lowerCAmelCase__ , batch_size=4 , drop_last=lowerCAmelCase__ , split_batches=lowerCAmelCase__ )
self.check_iterable_dataset_shards(lowerCAmelCase__ , lowerCAmelCase__ , batch_size=4 , drop_last=lowerCAmelCase__ , split_batches=lowerCAmelCase__ )
def __A ( self ) -> Dict:
SCREAMING_SNAKE_CASE = BatchSampler(range(16 ) , batch_size=4 , drop_last=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = SkipBatchSampler(lowerCAmelCase__ , 2 )
self.assertListEqual(list(lowerCAmelCase__ ) , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def __A ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def __A ( self ) -> List[str]:
SCREAMING_SNAKE_CASE = DataLoader(list(range(16 ) ) , batch_size=4 )
SCREAMING_SNAKE_CASE = skip_first_batches(lowerCAmelCase__ , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def __A ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = DataLoaderShard(list(range(16 ) ) , batch_size=4 )
for idx, _ in enumerate(lowerCAmelCase__ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(lowerCAmelCase__ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def __A ( self ) -> Dict:
Accelerator()
SCREAMING_SNAKE_CASE = DataLoaderDispatcher(range(16 ) , batch_size=4 )
for idx, _ in enumerate(lowerCAmelCase__ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(lowerCAmelCase__ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
| 38 |
"""simple docstring"""
class lowerCAmelCase :
'''simple docstring'''
def __init__( self , lowerCAmelCase__ ) -> None:
SCREAMING_SNAKE_CASE = size
SCREAMING_SNAKE_CASE = [0] * size
SCREAMING_SNAKE_CASE = [0] * size
@staticmethod
def __A ( lowerCAmelCase__ ) -> int:
return index | (index + 1)
@staticmethod
def __A ( lowerCAmelCase__ ) -> int:
return (index & (index + 1)) - 1
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> None:
SCREAMING_SNAKE_CASE = value
while index < self.size:
SCREAMING_SNAKE_CASE = self.get_prev(lowerCAmelCase__ ) + 1
if current_left_border == index:
SCREAMING_SNAKE_CASE = value
else:
SCREAMING_SNAKE_CASE = max(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = self.get_next(lowerCAmelCase__ )
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> int:
right -= 1 # Because of right is exclusive
SCREAMING_SNAKE_CASE = 0
while left <= right:
SCREAMING_SNAKE_CASE = self.get_prev(lowerCAmelCase__ )
if left <= current_left:
SCREAMING_SNAKE_CASE = max(lowerCAmelCase__ , self.tree[right] )
SCREAMING_SNAKE_CASE = current_left
else:
SCREAMING_SNAKE_CASE = max(lowerCAmelCase__ , self.arr[right] )
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 38 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a : List[str] = {'configuration_xglm': ['XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XGLMConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[int] = ['XGLMTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : str = ['XGLMTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Union[str, Any] = [
'XGLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'XGLMForCausalLM',
'XGLMModel',
'XGLMPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[Any] = [
'FlaxXGLMForCausalLM',
'FlaxXGLMModel',
'FlaxXGLMPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : int = [
'TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXGLMForCausalLM',
'TFXGLMModel',
'TFXGLMPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
a : Any = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 114 |
"""simple docstring"""
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
UpperCAmelCase__ : List[str] = logging.get_logger(__name__)
@add_end_docstrings(a__ )
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
def __init__(self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> Tuple:
"""simple docstring"""
super().__init__(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
requires_backends(self , """vision""" )
self.check_model_type(SCREAMING_SNAKE_CASE__ )
def __call__(self , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> List[str]:
"""simple docstring"""
return super().__call__(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , **SCREAMING_SNAKE_CASE__ ) -> Any:
"""simple docstring"""
return {}, {}, {}
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = load_image(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Any = image.size
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors=self.framework )
return model_inputs
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.model(**SCREAMING_SNAKE_CASE__ )
return model_outputs
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = model_outputs.predicted_depth
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode="""bicubic""" , align_corners=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = prediction.squeeze().cpu().numpy()
SCREAMING_SNAKE_CASE__ : Any = (output * 2_55 / np.max(SCREAMING_SNAKE_CASE__ )).astype("""uint8""" )
SCREAMING_SNAKE_CASE__ : List[str] = Image.fromarray(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : str = {}
SCREAMING_SNAKE_CASE__ : Any = predicted_depth
SCREAMING_SNAKE_CASE__ : Dict = depth
return output_dict
| 25 | 0 |
'''simple docstring'''
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
_lowercase : Optional[int] = logging.get_logger(__name__) # pylint: disable=invalid-name
_lowercase : str = """
Examples:
```py
>>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-prior\")
>>> pipe_prior.to(\"cuda\")
>>> prompt = \"red cat, 4k photo\"
>>> out = pipe_prior(prompt)
>>> image_emb = out.image_embeds
>>> zero_image_emb = out.negative_image_embeds
>>> pipe = KandinskyV22Pipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-decoder\")
>>> pipe.to(\"cuda\")
>>> image = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=50,
... ).images
>>> image[0].save(\"cat.png\")
```
"""
def lowerCamelCase__ ( A : Any , A : Optional[Any] , A : Any=8 ):
'''simple docstring'''
UpperCAmelCase = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
UpperCAmelCase = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class UpperCamelCase__( lowerCAmelCase ):
def __init__( self : Union[str, Any] , lowerCAmelCase : UNetaDConditionModel , lowerCAmelCase : DDPMScheduler , lowerCAmelCase : VQModel , )-> List[str]:
"""simple docstring"""
super().__init__()
self.register_modules(
unet=lowerCAmelCase , scheduler=lowerCAmelCase , movq=lowerCAmelCase , )
UpperCAmelCase = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def a__( self : int , lowerCAmelCase : Dict , lowerCAmelCase : List[Any] , lowerCAmelCase : int , lowerCAmelCase : Any , lowerCAmelCase : Tuple , lowerCAmelCase : str )-> str:
"""simple docstring"""
if latents is None:
UpperCAmelCase = randn_tensor(lowerCAmelCase , generator=lowerCAmelCase , device=lowerCAmelCase , dtype=lowerCAmelCase )
else:
if latents.shape != shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
UpperCAmelCase = latents.to(lowerCAmelCase )
UpperCAmelCase = latents * scheduler.init_noise_sigma
return latents
def a__( self : Dict , lowerCAmelCase : Union[str, Any]=0 )-> Optional[Any]:
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
UpperCAmelCase = torch.device(F"""cuda:{gpu_id}""" )
UpperCAmelCase = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowerCAmelCase , lowerCAmelCase )
def a__( self : str , lowerCAmelCase : Union[str, Any]=0 )-> List[str]:
"""simple docstring"""
if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0''' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' )
UpperCAmelCase = torch.device(F"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to('''cpu''' , silence_dtype_warnings=lowerCAmelCase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
UpperCAmelCase = None
for cpu_offloaded_model in [self.unet, self.movq]:
UpperCAmelCase , UpperCAmelCase = cpu_offload_with_hook(lowerCAmelCase , lowerCAmelCase , prev_module_hook=lowerCAmelCase )
# We'll offload the last model manually.
UpperCAmelCase = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def a__( self : int )-> Union[str, Any]:
"""simple docstring"""
if not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(lowerCAmelCase , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(lowerCAmelCase )
def __call__( self : Union[str, Any] , lowerCAmelCase : Union[torch.FloatTensor, List[torch.FloatTensor]] , lowerCAmelCase : Union[torch.FloatTensor, List[torch.FloatTensor]] , lowerCAmelCase : int = 512 , lowerCAmelCase : int = 512 , lowerCAmelCase : int = 100 , lowerCAmelCase : float = 4.0 , lowerCAmelCase : int = 1 , lowerCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCAmelCase : Optional[torch.FloatTensor] = None , lowerCAmelCase : Optional[str] = "pil" , lowerCAmelCase : bool = True , )-> int:
"""simple docstring"""
UpperCAmelCase = self._execution_device
UpperCAmelCase = guidance_scale > 1.0
if isinstance(lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase = torch.cat(lowerCAmelCase , dim=0 )
UpperCAmelCase = image_embeds.shape[0] * num_images_per_prompt
if isinstance(lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase = torch.cat(lowerCAmelCase , dim=0 )
if do_classifier_free_guidance:
UpperCAmelCase = image_embeds.repeat_interleave(lowerCAmelCase , dim=0 )
UpperCAmelCase = negative_image_embeds.repeat_interleave(lowerCAmelCase , dim=0 )
UpperCAmelCase = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=lowerCAmelCase )
self.scheduler.set_timesteps(lowerCAmelCase , device=lowerCAmelCase )
UpperCAmelCase = self.scheduler.timesteps
UpperCAmelCase = self.unet.config.in_channels
UpperCAmelCase , UpperCAmelCase = downscale_height_and_width(lowerCAmelCase , lowerCAmelCase , self.movq_scale_factor )
# create initial latent
UpperCAmelCase = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , self.scheduler , )
for i, t in enumerate(self.progress_bar(lowerCAmelCase ) ):
# expand the latents if we are doing classifier free guidance
UpperCAmelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCAmelCase = {'''image_embeds''': image_embeds}
UpperCAmelCase = self.unet(
sample=lowerCAmelCase , timestep=lowerCAmelCase , encoder_hidden_states=lowerCAmelCase , added_cond_kwargs=lowerCAmelCase , return_dict=lowerCAmelCase , )[0]
if do_classifier_free_guidance:
UpperCAmelCase , UpperCAmelCase = noise_pred.split(latents.shape[1] , dim=1 )
UpperCAmelCase , UpperCAmelCase = noise_pred.chunk(2 )
UpperCAmelCase , UpperCAmelCase = variance_pred.chunk(2 )
UpperCAmelCase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
UpperCAmelCase = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , '''variance_type''' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
UpperCAmelCase , UpperCAmelCase = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase = self.scheduler.step(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , generator=lowerCAmelCase , )[0]
# post-processing
UpperCAmelCase = self.movq.decode(lowerCAmelCase , force_not_quantize=lowerCAmelCase )['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
UpperCAmelCase = image * 0.5 + 0.5
UpperCAmelCase = image.clamp(0 , 1 )
UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCAmelCase = self.numpy_to_pil(lowerCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCAmelCase )
| 369 |
'''simple docstring'''
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class UpperCamelCase__( nn.Module ):
__magic_name__ : int
__magic_name__ : int
__magic_name__ : float = 0.0
__magic_name__ : int = 1
__magic_name__ : int = 1
__magic_name__ : bool = True
__magic_name__ : bool = False
__magic_name__ : bool = False
__magic_name__ : bool = False
__magic_name__ : jnp.dtype = jnp.floataa
def a__( self : str )-> Dict:
"""simple docstring"""
UpperCAmelCase = []
UpperCAmelCase = []
for i in range(self.num_layers ):
UpperCAmelCase = self.in_channels if i == 0 else self.out_channels
UpperCAmelCase = FlaxResnetBlockaD(
in_channels=lowerCAmelCase , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(lowerCAmelCase )
UpperCAmelCase = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(lowerCAmelCase )
UpperCAmelCase = resnets
UpperCAmelCase = attentions
if self.add_downsample:
UpperCAmelCase = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : List[Any] , lowerCAmelCase : Any , lowerCAmelCase : int , lowerCAmelCase : Dict , lowerCAmelCase : List[str]=True )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = ()
for resnet, attn in zip(self.resnets , self.attentions ):
UpperCAmelCase = resnet(lowerCAmelCase , lowerCAmelCase , deterministic=lowerCAmelCase )
UpperCAmelCase = attn(lowerCAmelCase , lowerCAmelCase , deterministic=lowerCAmelCase )
output_states += (hidden_states,)
if self.add_downsample:
UpperCAmelCase = self.downsamplers_a(lowerCAmelCase )
output_states += (hidden_states,)
return hidden_states, output_states
class UpperCamelCase__( nn.Module ):
__magic_name__ : int
__magic_name__ : int
__magic_name__ : float = 0.0
__magic_name__ : int = 1
__magic_name__ : bool = True
__magic_name__ : jnp.dtype = jnp.floataa
def a__( self : List[str] )-> Any:
"""simple docstring"""
UpperCAmelCase = []
for i in range(self.num_layers ):
UpperCAmelCase = self.in_channels if i == 0 else self.out_channels
UpperCAmelCase = FlaxResnetBlockaD(
in_channels=lowerCAmelCase , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(lowerCAmelCase )
UpperCAmelCase = resnets
if self.add_downsample:
UpperCAmelCase = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : List[str] , lowerCAmelCase : Tuple , lowerCAmelCase : Dict , lowerCAmelCase : List[str]=True )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = ()
for resnet in self.resnets:
UpperCAmelCase = resnet(lowerCAmelCase , lowerCAmelCase , deterministic=lowerCAmelCase )
output_states += (hidden_states,)
if self.add_downsample:
UpperCAmelCase = self.downsamplers_a(lowerCAmelCase )
output_states += (hidden_states,)
return hidden_states, output_states
class UpperCamelCase__( nn.Module ):
__magic_name__ : int
__magic_name__ : int
__magic_name__ : int
__magic_name__ : float = 0.0
__magic_name__ : int = 1
__magic_name__ : int = 1
__magic_name__ : bool = True
__magic_name__ : bool = False
__magic_name__ : bool = False
__magic_name__ : bool = False
__magic_name__ : jnp.dtype = jnp.floataa
def a__( self : List[str] )-> Tuple:
"""simple docstring"""
UpperCAmelCase = []
UpperCAmelCase = []
for i in range(self.num_layers ):
UpperCAmelCase = self.in_channels if (i == self.num_layers - 1) else self.out_channels
UpperCAmelCase = self.prev_output_channel if i == 0 else self.out_channels
UpperCAmelCase = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(lowerCAmelCase )
UpperCAmelCase = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(lowerCAmelCase )
UpperCAmelCase = resnets
UpperCAmelCase = attentions
if self.add_upsample:
UpperCAmelCase = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Union[str, Any] , lowerCAmelCase : List[str] , lowerCAmelCase : Optional[int] , lowerCAmelCase : List[str] , lowerCAmelCase : Any , lowerCAmelCase : Union[str, Any]=True )-> Optional[int]:
"""simple docstring"""
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
UpperCAmelCase = res_hidden_states_tuple[-1]
UpperCAmelCase = res_hidden_states_tuple[:-1]
UpperCAmelCase = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
UpperCAmelCase = resnet(lowerCAmelCase , lowerCAmelCase , deterministic=lowerCAmelCase )
UpperCAmelCase = attn(lowerCAmelCase , lowerCAmelCase , deterministic=lowerCAmelCase )
if self.add_upsample:
UpperCAmelCase = self.upsamplers_a(lowerCAmelCase )
return hidden_states
class UpperCamelCase__( nn.Module ):
__magic_name__ : int
__magic_name__ : int
__magic_name__ : int
__magic_name__ : float = 0.0
__magic_name__ : int = 1
__magic_name__ : bool = True
__magic_name__ : jnp.dtype = jnp.floataa
def a__( self : Optional[int] )-> str:
"""simple docstring"""
UpperCAmelCase = []
for i in range(self.num_layers ):
UpperCAmelCase = self.in_channels if (i == self.num_layers - 1) else self.out_channels
UpperCAmelCase = self.prev_output_channel if i == 0 else self.out_channels
UpperCAmelCase = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(lowerCAmelCase )
UpperCAmelCase = resnets
if self.add_upsample:
UpperCAmelCase = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Union[str, Any] , lowerCAmelCase : Dict , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Dict=True )-> Tuple:
"""simple docstring"""
for resnet in self.resnets:
# pop res hidden states
UpperCAmelCase = res_hidden_states_tuple[-1]
UpperCAmelCase = res_hidden_states_tuple[:-1]
UpperCAmelCase = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
UpperCAmelCase = resnet(lowerCAmelCase , lowerCAmelCase , deterministic=lowerCAmelCase )
if self.add_upsample:
UpperCAmelCase = self.upsamplers_a(lowerCAmelCase )
return hidden_states
class UpperCamelCase__( nn.Module ):
__magic_name__ : int
__magic_name__ : float = 0.0
__magic_name__ : int = 1
__magic_name__ : int = 1
__magic_name__ : bool = False
__magic_name__ : bool = False
__magic_name__ : jnp.dtype = jnp.floataa
def a__( self : int )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = [
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
UpperCAmelCase = []
for _ in range(self.num_layers ):
UpperCAmelCase = FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(lowerCAmelCase )
UpperCAmelCase = FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(lowerCAmelCase )
UpperCAmelCase = resnets
UpperCAmelCase = attentions
def __call__( self : Dict , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Dict , lowerCAmelCase : Any=True )-> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = self.resnets[0](lowerCAmelCase , lowerCAmelCase )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
UpperCAmelCase = attn(lowerCAmelCase , lowerCAmelCase , deterministic=lowerCAmelCase )
UpperCAmelCase = resnet(lowerCAmelCase , lowerCAmelCase , deterministic=lowerCAmelCase )
return hidden_states
| 91 | 0 |
def lowerCamelCase__ ( A__ : list[list[int]] , A__ : int , A__ : int , A__ : set ):
'''simple docstring'''
__lowerCamelCase, __lowerCamelCase = len(A__ ), len(grid[0] )
if (
min(A__ , A__ ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
__lowerCamelCase = 0
count += depth_first_search(A__ , row + 1 , A__ , A__ )
count += depth_first_search(A__ , row - 1 , A__ , A__ )
count += depth_first_search(A__ , A__ , col + 1 , A__ )
count += depth_first_search(A__ , A__ , col - 1 , A__ )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 12 |
import unittest
from transformers import BigBirdConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
from transformers.models.big_bird.modeling_flax_big_bird import (
FlaxBigBirdForCausalLM,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForPreTraining,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
FlaxBigBirdModel,
)
class lowerCamelCase__( unittest.TestCase):
def __init__( self: Optional[int] , UpperCamelCase_: Any , UpperCamelCase_: int=2 , UpperCamelCase_: Optional[Any]=56 , UpperCamelCase_: Tuple=True , UpperCamelCase_: Union[str, Any]=True , UpperCamelCase_: Optional[Any]=True , UpperCamelCase_: str=True , UpperCamelCase_: str=99 , UpperCamelCase_: Tuple=32 , UpperCamelCase_: int=2 , UpperCamelCase_: Optional[int]=2 , UpperCamelCase_: Tuple=7 , UpperCamelCase_: Optional[int]="gelu_new" , UpperCamelCase_: Any=0.1 , UpperCamelCase_: List[str]=0.1 , UpperCamelCase_: List[Any]=5_12 , UpperCamelCase_: Union[str, Any]=16 , UpperCamelCase_: int=2 , UpperCamelCase_: Dict=0.02 , UpperCamelCase_: Tuple=4 , UpperCamelCase_: Union[str, Any]="block_sparse" , UpperCamelCase_: Optional[Any]=True , UpperCamelCase_: Any=False , UpperCamelCase_: Any=2 , UpperCamelCase_: int=3 , ):
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = seq_length
__lowerCamelCase = is_training
__lowerCamelCase = use_attention_mask
__lowerCamelCase = use_token_type_ids
__lowerCamelCase = use_labels
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = type_sequence_label_size
__lowerCamelCase = initializer_range
__lowerCamelCase = num_choices
__lowerCamelCase = rescale_embeddings
__lowerCamelCase = attention_type
__lowerCamelCase = use_bias
__lowerCamelCase = block_size
__lowerCamelCase = num_random_blocks
def lowerCAmelCase__ ( self: int ):
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase = None
if self.use_attention_mask:
__lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCamelCase = None
if self.use_token_type_ids:
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCamelCase = BigBirdConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , )
return config, input_ids, token_type_ids, attention_mask
def lowerCAmelCase__ ( self: Dict ):
__lowerCamelCase = self.prepare_config_and_inputs()
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = config_and_inputs
__lowerCamelCase = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""attention_mask""": attention_mask,
}
return config, inputs_dict
@require_flax
class lowerCamelCase__( __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : Optional[int] = (
(
FlaxBigBirdForCausalLM,
FlaxBigBirdModel,
FlaxBigBirdForPreTraining,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
)
if is_flax_available()
else ()
)
UpperCAmelCase__ : Optional[int] = False
UpperCAmelCase__ : Optional[int] = False
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = FlaxBigBirdModelTester(self )
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def lowerCAmelCase__ ( self: Optional[Any] ):
super().test_from_pretrained_save_pretrained()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def lowerCAmelCase__ ( self: List[Any] ):
super().test_from_pretrained_with_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def lowerCAmelCase__ ( self: List[Any] ):
super().test_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def lowerCAmelCase__ ( self: List[str] ):
super().test_hidden_states_output()
@slow
def lowerCAmelCase__ ( self: Optional[Any] ):
for model_class_name in self.all_model_classes:
__lowerCamelCase = model_class_name.from_pretrained("""google/bigbird-roberta-base""" )
self.assertIsNotNone(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Dict ):
if self.test_attn_probs:
super().test_attention_outputs()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase, __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__lowerCamelCase = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ )
__lowerCamelCase = model_class(UpperCamelCase_ )
@jax.jit
def model_jitted(UpperCamelCase_: Tuple , UpperCamelCase_: List[Any]=None , **UpperCamelCase_: Union[str, Any] ):
return model(input_ids=UpperCamelCase_ , attention_mask=UpperCamelCase_ , **UpperCamelCase_ )
with self.subTest("""JIT Enabled""" ):
__lowerCamelCase = model_jitted(**UpperCamelCase_ ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
__lowerCamelCase = model_jitted(**UpperCamelCase_ ).to_tuple()
self.assertEqual(len(UpperCamelCase_ ) , len(UpperCamelCase_ ) )
for jitted_output, output in zip(UpperCamelCase_ , UpperCamelCase_ ):
self.assertEqual(jitted_output.shape , output.shape )
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: int , UpperCamelCase_: List[str] , UpperCamelCase_: Any , UpperCamelCase_: Dict=1E-5 , UpperCamelCase_: List[str]="outputs" , UpperCamelCase_: List[str]=None ):
# `bigbird_block_sparse_attention` in `FlaxBigBird` returns `attention_probs = None`, while in PyTorch version,
# an effort was done to return `attention_probs` (yet to be verified).
if name.startswith("""outputs.attentions""" ):
return
else:
super().check_pt_flax_outputs(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
| 12 | 1 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
_a = logging.get_logger(__name__)
class _lowerCAmelCase ( lowercase ):
"""simple docstring"""
def __init__( self : int, *UpperCAmelCase__ : Any, **UpperCAmelCase__ : List[Any] ):
warnings.warn(
"The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use LayoutLMv2ImageProcessor instead.", UpperCAmelCase__, )
super().__init__(*UpperCAmelCase__, **UpperCAmelCase__ )
| 144 |
"""simple docstring"""
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class _lowerCAmelCase ( lowercase ):
"""simple docstring"""
def __init__( self : List[str], UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : Any, UpperCAmelCase__ : Dict ):
__lowercase = dataset
__lowercase = process
__lowercase = params
def __len__( self : str ):
return len(self.dataset )
def __getitem__( self : List[Any], UpperCAmelCase__ : int ):
__lowercase = self.dataset[i]
__lowercase = self.process(UpperCAmelCase__, **self.params )
return processed
class _lowerCAmelCase ( lowercase ):
"""simple docstring"""
def __init__( self : Optional[Any], UpperCAmelCase__ : Dict, UpperCAmelCase__ : Any, UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : List[Any]=None ):
__lowercase = loader
__lowercase = infer
__lowercase = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
__lowercase = None
__lowercase = loader_batch_size
# Internal bookkeeping
__lowercase = None
__lowercase = None
def __len__( self : str ):
return len(self.loader )
def __iter__( self : List[str] ):
__lowercase = iter(self.loader )
return self
def _lowercase ( self : Union[str, Any] ):
if isinstance(self._loader_batch_data, torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
__lowercase = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
__lowercase = {}
for k, element in self._loader_batch_data.items():
if isinstance(UpperCAmelCase__, UpperCAmelCase__ ):
# Convert ModelOutput to tuple first
__lowercase = element.to_tuple()
if isinstance(element[0], torch.Tensor ):
__lowercase = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0], np.ndarray ):
__lowercase = tuple(np.expand_dims(el[self._loader_batch_index], 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(UpperCAmelCase__, UpperCAmelCase__ ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0], torch.Tensor ):
__lowercase = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0], np.ndarray ):
__lowercase = tuple(np.expand_dims(el[self._loader_batch_index], 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
__lowercase = None
elif isinstance(element[self._loader_batch_index], torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
__lowercase = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index], np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
__lowercase = np.expand_dims(element[self._loader_batch_index], 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
__lowercase = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
__lowercase = self._loader_batch_data.__class__(UpperCAmelCase__ )
self._loader_batch_index += 1
return result
def _lowercase ( self : Tuple ):
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
__lowercase = next(self.iterator )
__lowercase = self.infer(UpperCAmelCase__, **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(UpperCAmelCase__, torch.Tensor ):
__lowercase = processed
else:
__lowercase = list(processed.keys() )[0]
__lowercase = processed[key]
if isinstance(UpperCAmelCase__, UpperCAmelCase__ ):
__lowercase = len(UpperCAmelCase__ )
else:
__lowercase = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
__lowercase = observed_batch_size
# Setting internal index to unwrap the batch
__lowercase = processed
__lowercase = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class _lowerCAmelCase ( lowercase ):
"""simple docstring"""
def __init__( self : Union[str, Any], UpperCAmelCase__ : Any, UpperCAmelCase__ : List[str], UpperCAmelCase__ : int, UpperCAmelCase__ : str=None ):
super().__init__(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
def __iter__( self : str ):
__lowercase = iter(self.loader )
__lowercase = None
return self
def _lowercase ( self : int ):
if self.subiterator is None:
__lowercase = self.infer(next(self.iterator ), **self.params )
try:
# Try to return next item
__lowercase = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
__lowercase = self.infer(next(self.iterator ), **self.params )
__lowercase = next(self.subiterator )
return processed
class _lowerCAmelCase ( lowercase ):
"""simple docstring"""
def __iter__( self : int ):
__lowercase = iter(self.loader )
return self
def _lowercase ( self : List[str] ):
# Extremely similar to PipelineIterator in its unpacking mechanism
# BUT, we have an extra required item which is the presence of `is_last`
# That is because everything is flattened by `PipelineChunkIterator` we
# need to keep track of how to regroup here in the original `process`
# boundaries so that `process` and `postprocess` see the same data.
# This iterator accumulates items (possibly while unbatching) until it
# its a `is_last` and then just passes it on to the caller.
__lowercase = False
__lowercase = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
__lowercase = self.loader_batch_item()
__lowercase = item.pop("is_last" )
accumulator.append(UpperCAmelCase__ )
if is_last:
return accumulator
while not is_last:
__lowercase = self.infer(next(self.iterator ), **self.params )
if self.loader_batch_size is not None:
if isinstance(UpperCAmelCase__, torch.Tensor ):
__lowercase = processed
else:
__lowercase = list(processed.keys() )[0]
__lowercase = processed[key]
if isinstance(UpperCAmelCase__, UpperCAmelCase__ ):
__lowercase = len(UpperCAmelCase__ )
else:
__lowercase = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
__lowercase = observed_batch_size
__lowercase = processed
__lowercase = 0
while self._loader_batch_index < self.loader_batch_size:
__lowercase = self.loader_batch_item()
__lowercase = item.pop("is_last" )
accumulator.append(UpperCAmelCase__ )
if is_last:
return accumulator
else:
__lowercase = processed
__lowercase = item.pop("is_last" )
accumulator.append(UpperCAmelCase__ )
return accumulator
class _lowerCAmelCase ( lowercase ):
"""simple docstring"""
def __init__( self : List[Any], UpperCAmelCase__ : Dataset, UpperCAmelCase__ : str ):
__lowercase = dataset
__lowercase = key
def __len__( self : Optional[Any] ):
return len(self.dataset )
def __getitem__( self : Union[str, Any], UpperCAmelCase__ : Any ):
return self.dataset[i][self.key]
class _lowerCAmelCase ( lowercase ):
"""simple docstring"""
def __init__( self : str, UpperCAmelCase__ : Dataset, UpperCAmelCase__ : str, UpperCAmelCase__ : str ):
__lowercase = dataset
__lowercase = keya
__lowercase = keya
def __len__( self : Optional[int] ):
return len(self.dataset )
def __getitem__( self : Dict, UpperCAmelCase__ : Tuple ):
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 144 | 1 |
"""simple docstring"""
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class A_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self :Optional[Any] , lowercase_ :Optional[Any] , lowercase_ :List[str]=13 , lowercase_ :Tuple=30 , lowercase_ :str=2 , lowercase_ :Optional[int]=3 , lowercase_ :Dict=True , lowercase_ :List[str]=True , lowercase_ :str=32 , lowercase_ :Dict=5 , lowercase_ :Optional[int]=4 , lowercase_ :Optional[Any]=37 , lowercase_ :Dict="gelu" , lowercase_ :int=0.1 , lowercase_ :int=0.1 , lowercase_ :Union[str, Any]=10 , lowercase_ :str=0.02 , ) -> Optional[int]:
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = image_size
UpperCAmelCase = patch_size
UpperCAmelCase = num_channels
UpperCAmelCase = is_training
UpperCAmelCase = use_labels
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = type_sequence_label_size
UpperCAmelCase = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase = (image_size // patch_size) ** 2
UpperCAmelCase = num_patches + 1
def UpperCAmelCase__ ( self :str ) -> List[str]:
UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase = ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowercase_ , initializer_range=self.initializer_range , )
return config, pixel_values
def UpperCAmelCase__ ( self :str , lowercase_ :Union[str, Any] , lowercase_ :List[Any] ) -> List[Any]:
UpperCAmelCase = FlaxViTModel(config=lowercase_ )
UpperCAmelCase = model(lowercase_ )
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase = (self.image_size, self.image_size)
UpperCAmelCase = (self.patch_size, self.patch_size)
UpperCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) )
def UpperCAmelCase__ ( self :List[Any] , lowercase_ :Optional[int] , lowercase_ :Dict ) -> List[Any]:
UpperCAmelCase = self.type_sequence_label_size
UpperCAmelCase = FlaxViTForImageClassification(config=lowercase_ )
UpperCAmelCase = model(lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase = 1
UpperCAmelCase = FlaxViTForImageClassification(lowercase_ )
UpperCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase = model(lowercase_ )
def UpperCAmelCase__ ( self :str ) -> Optional[int]:
UpperCAmelCase = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) = config_and_inputs
UpperCAmelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_flax
class A_ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def UpperCAmelCase__ ( self :int ) -> None:
UpperCAmelCase = FlaxViTModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ , hidden_size=37 )
def UpperCAmelCase__ ( self :Optional[Any] ) -> Tuple:
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self :Dict ) -> Tuple:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def UpperCAmelCase__ ( self :str ) -> Dict:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase_ )
def UpperCAmelCase__ ( self :Any ) -> Optional[int]:
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(lowercase_ )
UpperCAmelCase = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase = [*signature.parameters.keys()]
UpperCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowercase_ )
def UpperCAmelCase__ ( self :str ) -> Optional[int]:
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase = self._prepare_for_class(lowercase_ , lowercase_ )
UpperCAmelCase = model_class(lowercase_ )
@jax.jit
def model_jitted(lowercase_ :Tuple , **lowercase_ :Dict ):
return model(pixel_values=lowercase_ , **lowercase_ )
with self.subTest('JIT Enabled' ):
UpperCAmelCase = model_jitted(**lowercase_ ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
UpperCAmelCase = model_jitted(**lowercase_ ).to_tuple()
self.assertEqual(len(lowercase_ ) , len(lowercase_ ) )
for jitted_output, output in zip(lowercase_ , lowercase_ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def UpperCAmelCase__ ( self :List[Any] ) -> str:
for model_class_name in self.all_model_classes:
UpperCAmelCase = model_class_name.from_pretrained('google/vit-base-patch16-224' )
UpperCAmelCase = model(np.ones((1, 3, 2_24, 2_24) ) )
self.assertIsNotNone(lowercase_ )
| 78 |
def __UpperCamelCase ( lowerCAmelCase__ : int = 5_0_0_0_0_0_0_0 ):
__a : int = set()
__a : str = int((limit - 2_4) ** (1 / 2) )
__a : int = set(range(3 , prime_square_limit + 1 , 2 ) )
primes.add(2 )
for p in range(3 , prime_square_limit + 1 , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , prime_square_limit + 1 , lowerCAmelCase__ ) ) )
for primea in primes:
__a : Union[str, Any] = primea * primea
for primea in primes:
__a : Union[str, Any] = primea * primea * primea
if square + cube >= limit - 1_6:
break
for primea in primes:
__a : int = primea * primea * primea * primea
__a : Union[str, Any] = square + cube + tetr
if total >= limit:
break
ret.add(lowerCAmelCase__ )
return len(lowerCAmelCase__ )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 216 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase = {
'configuration_lilt': ['LILT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LiltConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'LILT_PRETRAINED_MODEL_ARCHIVE_LIST',
'LiltForQuestionAnswering',
'LiltForSequenceClassification',
'LiltForTokenClassification',
'LiltModel',
'LiltPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lilt import (
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
LiltPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 28 |
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class __a ( unittest.TestCase ):
def A ( self : List[Any] ):
lowerCAmelCase_ : Dict = Vector([1, 2, 3] )
self.assertEqual(x.component(0 ) , 1 )
self.assertEqual(x.component(2 ) , 3 )
lowerCAmelCase_ : Optional[Any] = Vector()
def A ( self : List[str] ):
lowerCAmelCase_ : Tuple = Vector([0, 0, 0, 0, 0, 1] )
self.assertEqual(str(UpperCAmelCase ) , """(0,0,0,0,0,1)""" )
def A ( self : Any ):
lowerCAmelCase_ : Union[str, Any] = Vector([1, 2, 3, 4] )
self.assertEqual(len(UpperCAmelCase ) , 4 )
def A ( self : Dict ):
lowerCAmelCase_ : Dict = Vector([1, 2] )
lowerCAmelCase_ : str = Vector([1, 2, 3, 4, 5] )
lowerCAmelCase_ : Optional[int] = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] )
lowerCAmelCase_ : Dict = Vector([1, -1, 1, -1, 2, -3, 4, -5] )
self.assertAlmostEqual(x.euclidean_length() , 2.236 , 3 )
self.assertAlmostEqual(y.euclidean_length() , 7.416 , 3 )
self.assertEqual(z.euclidean_length() , 0 )
self.assertAlmostEqual(w.euclidean_length() , 7.616 , 3 )
def A ( self : Optional[Any] ):
lowerCAmelCase_ : Optional[int] = Vector([1, 2, 3] )
lowerCAmelCase_ : Union[str, Any] = Vector([1, 1, 1] )
self.assertEqual((x + y).component(0 ) , 2 )
self.assertEqual((x + y).component(1 ) , 3 )
self.assertEqual((x + y).component(2 ) , 4 )
def A ( self : Optional[Any] ):
lowerCAmelCase_ : Optional[Any] = Vector([1, 2, 3] )
lowerCAmelCase_ : Dict = Vector([1, 1, 1] )
self.assertEqual((x - y).component(0 ) , 0 )
self.assertEqual((x - y).component(1 ) , 1 )
self.assertEqual((x - y).component(2 ) , 2 )
def A ( self : Union[str, Any] ):
lowerCAmelCase_ : Dict = Vector([1, 2, 3] )
lowerCAmelCase_ : Optional[int] = Vector([2, -1, 4] ) # for test of dot product
lowerCAmelCase_ : str = Vector([1, -2, -1] )
self.assertEqual(str(x * 3.0 ) , """(3.0,6.0,9.0)""" )
self.assertEqual((a * b) , 0 )
def A ( self : List[str] ):
self.assertEqual(str(zero_vector(10 ) ).count("""0""" ) , 10 )
def A ( self : Tuple ):
self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , """(0,1,0)""" )
def A ( self : Optional[Any] ):
lowerCAmelCase_ : Optional[Any] = Vector([1, 2, 3] )
lowerCAmelCase_ : Union[str, Any] = Vector([1, 0, 1] )
self.assertEqual(str(axpy(2 , UpperCAmelCase , UpperCAmelCase ) ) , """(3,4,7)""" )
def A ( self : Optional[int] ):
lowerCAmelCase_ : List[Any] = Vector([1, 0, 0, 0, 0, 0] )
lowerCAmelCase_ : int = x.copy()
self.assertEqual(str(UpperCAmelCase ) , str(UpperCAmelCase ) )
def A ( self : Union[str, Any] ):
lowerCAmelCase_ : Union[str, Any] = Vector([1, 0, 0] )
x.change_component(0 , 0 )
x.change_component(1 , 1 )
self.assertEqual(str(UpperCAmelCase ) , """(0,1,0)""" )
def A ( self : Any ):
lowerCAmelCase_ : int = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual("""|1,2,3|\n|2,4,5|\n|6,7,8|\n""" , str(UpperCAmelCase ) )
def A ( self : Optional[int] ):
lowerCAmelCase_ : Dict = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
lowerCAmelCase_ : List[str] = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(minors[x][y] , a.minor(UpperCAmelCase , UpperCAmelCase ) )
def A ( self : Tuple ):
lowerCAmelCase_ : Dict = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
lowerCAmelCase_ : Union[str, Any] = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(cofactors[x][y] , a.cofactor(UpperCAmelCase , UpperCAmelCase ) )
def A ( self : Optional[int] ):
lowerCAmelCase_ : Optional[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(-5 , a.determinant() )
def A ( self : Optional[int] ):
lowerCAmelCase_ : Dict = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 )
lowerCAmelCase_ : Any = Vector([1, 2, 3] )
self.assertEqual("""(14,32,50)""" , str(a * x ) )
self.assertEqual("""|2,4,6|\n|8,10,12|\n|14,16,18|\n""" , str(a * 2 ) )
def A ( self : Tuple ):
lowerCAmelCase_ : int = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a.change_component(0 , 2 , 5 )
self.assertEqual("""|1,2,5|\n|2,4,5|\n|6,7,8|\n""" , str(UpperCAmelCase ) )
def A ( self : Optional[int] ):
lowerCAmelCase_ : str = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(7 , a.component(2 , 1 ) , 0.01 )
def A ( self : Dict ):
lowerCAmelCase_ : Any = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
lowerCAmelCase_ : Optional[int] = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual("""|2,4,10|\n|4,8,10|\n|12,14,18|\n""" , str(a + b ) )
def A ( self : Union[str, Any] ):
lowerCAmelCase_ : str = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
lowerCAmelCase_ : Optional[int] = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual("""|0,0,-4|\n|0,0,0|\n|0,0,-2|\n""" , str(a - b ) )
def A ( self : Optional[int] ):
self.assertEqual(
"""|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n""" , str(square_zero_matrix(5 ) ) , )
if __name__ == "__main__":
unittest.main()
| 28 | 1 |
import warnings
from ..trainer import Trainer
from ..utils import logging
__A : Any = logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__):
def __init__( self , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE )-> Optional[int]:
warnings.warn(
"""`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` """
"""instead.""" , _SCREAMING_SNAKE_CASE , )
super().__init__(args=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
| 154 |
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
__A : Dict = logging.get_logger(__name__)
def __UpperCamelCase ( _A : Union[str, Any] ) ->List[str]:
"""simple docstring"""
lowerCamelCase_ =MobileNetVaConfig(layer_norm_eps=0.0_0_1 )
if "_quant" in model_name:
raise ValueError("""Quantized models are not supported.""" )
lowerCamelCase_ =re.match(R"""^mobilenet_v1_([^_]*)_([^_]*)$""" , _A )
if matches:
lowerCamelCase_ =float(matches[1] )
lowerCamelCase_ =int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
lowerCamelCase_ =1001
lowerCamelCase_ ="""imagenet-1k-id2label.json"""
lowerCamelCase_ ="""huggingface/label-files"""
lowerCamelCase_ =json.load(open(hf_hub_download(_A , _A , repo_type="""dataset""" ) , """r""" ) )
lowerCamelCase_ ={int(_A ) + 1: v for k, v in idalabel.items()}
lowerCamelCase_ ="""background"""
lowerCamelCase_ =idalabel
lowerCamelCase_ ={v: k for k, v in idalabel.items()}
return config
def __UpperCamelCase ( ) ->int:
"""simple docstring"""
lowerCamelCase_ ="""http://images.cocodataset.org/val2017/000000039769.jpg"""
lowerCamelCase_ =Image.open(requests.get(_A , stream=_A ).raw )
return im
@torch.no_grad()
def __UpperCamelCase ( _A : List[Any] , _A : Any , _A : str , _A : int=False ) ->List[str]:
"""simple docstring"""
lowerCamelCase_ =get_mobilenet_va_config(_A )
# Load 🤗 model
lowerCamelCase_ =MobileNetVaForImageClassification(_A ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(_A , _A , _A )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
lowerCamelCase_ =MobileNetVaImageProcessor(
crop_size={"""width""": config.image_size, """height""": config.image_size} , size={"""shortest_edge""": config.image_size + 32} , )
lowerCamelCase_ =image_processor(images=prepare_img() , return_tensors="""pt""" )
lowerCamelCase_ =model(**_A )
lowerCamelCase_ =outputs.logits
assert logits.shape == (1, 1001)
if model_name == "mobilenet_v1_1.0_224":
lowerCamelCase_ =torch.tensor([-4.1_7_3_9, -1.1_2_3_3, 3.1_2_0_5] )
elif model_name == "mobilenet_v1_0.75_192":
lowerCamelCase_ =torch.tensor([-3.9_4_4_0, -2.3_1_4_1, -0.3_3_3_3] )
else:
lowerCamelCase_ =None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , _A , atol=1E-4 )
Path(_A ).mkdir(exist_ok=_A )
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(_A )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(_A )
if push_to_hub:
print("""Pushing to the hub...""" )
lowerCamelCase_ ="""google/""" + model_name
image_processor.push_to_hub(_A )
model.push_to_hub(_A )
if __name__ == "__main__":
__A : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='mobilenet_v1_1.0_224',
type=str,
help='Name of the MobileNetV1 model you\'d like to convert. Should in the form \'mobilenet_v1_<depth>_<size>\'.',
)
parser.add_argument(
'--checkpoint_path', required=True, type=str, help='Path to the original TensorFlow checkpoint (.ckpt file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', required=True, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
__A : List[str] = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 154 | 1 |
'''simple docstring'''
def snake_case_ ( __SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
lowercase_ : List[Any] = abs(__SCREAMING_SNAKE_CASE )
lowercase_ : Any = 0
while n > 0:
res += n % 10
n //= 10
return res
def snake_case_ ( __SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
lowercase_ : List[Any] = abs(__SCREAMING_SNAKE_CASE )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def snake_case_ ( __SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
return sum(int(__SCREAMING_SNAKE_CASE ) for c in str(abs(__SCREAMING_SNAKE_CASE ) ) )
def snake_case_ ( ):
"""simple docstring"""
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(__SCREAMING_SNAKE_CASE : Callable , __SCREAMING_SNAKE_CASE : int ) -> None:
lowercase_ : Any = F'''{func.__name__}({value})'''
lowercase_ : List[Any] = timeit(F'''__main__.{call}''' , setup='''import __main__''' )
print(F'''{call:56} = {func(__SCREAMING_SNAKE_CASE )} -- {timing:.4f} seconds''' )
for value in (262144, 1125899906842624, 1267650600228229401496703205376):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 264 |
'''simple docstring'''
def snake_case_ ( __SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
assert column_title.isupper()
lowercase_ : Dict = 0
lowercase_ : Tuple = len(__SCREAMING_SNAKE_CASE ) - 1
lowercase_ : Optional[int] = 0
while index >= 0:
lowercase_ : Optional[Any] = (ord(column_title[index] ) - 64) * pow(26 , __SCREAMING_SNAKE_CASE )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 264 | 1 |
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class __snake_case ( a , unittest.TestCase ):
UpperCAmelCase__ : Dict = BarthezTokenizer
UpperCAmelCase__ : Optional[Any] = BarthezTokenizerFast
UpperCAmelCase__ : Union[str, Any] = True
UpperCAmelCase__ : Optional[Any] = True
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
super().setUp()
UpperCAmelCase_ = BarthezTokenizerFast.from_pretrained('''moussaKam/mbarthez''')
tokenizer.save_pretrained(self.tmpdirname)
tokenizer.save_pretrained(self.tmpdirname , legacy_format=_snake_case)
UpperCAmelCase_ = tokenizer
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = '''<pad>'''
UpperCAmelCase_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_snake_case) , _snake_case)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_snake_case) , _snake_case)
def lowerCamelCase ( self : Any):
"""simple docstring"""
UpperCAmelCase_ = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '''<s>''')
self.assertEqual(vocab_keys[1] , '''<pad>''')
self.assertEqual(vocab_keys[-1] , '''<mask>''')
self.assertEqual(len(_snake_case) , 101122)
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 101122)
@require_torch
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
UpperCAmelCase_ = [0, 57, 3018, 70307, 91, 2]
UpperCAmelCase_ = self.tokenizer(
_snake_case , max_length=len(_snake_case) , padding=_snake_case , truncation=_snake_case , return_tensors='''pt''')
self.assertIsInstance(_snake_case , _snake_case)
self.assertEqual((2, 6) , batch.input_ids.shape)
self.assertEqual((2, 6) , batch.attention_mask.shape)
UpperCAmelCase_ = batch.input_ids.tolist()[0]
self.assertListEqual(_snake_case , _snake_case)
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = self.get_rust_tokenizer()
UpperCAmelCase_ = '''I was born in 92000, and this is falsé.'''
UpperCAmelCase_ = tokenizer.tokenize(_snake_case)
UpperCAmelCase_ = rust_tokenizer.tokenize(_snake_case)
self.assertListEqual(_snake_case , _snake_case)
UpperCAmelCase_ = tokenizer.encode(_snake_case , add_special_tokens=_snake_case)
UpperCAmelCase_ = rust_tokenizer.encode(_snake_case , add_special_tokens=_snake_case)
self.assertListEqual(_snake_case , _snake_case)
UpperCAmelCase_ = self.get_rust_tokenizer()
UpperCAmelCase_ = tokenizer.encode(_snake_case)
UpperCAmelCase_ = rust_tokenizer.encode(_snake_case)
self.assertListEqual(_snake_case , _snake_case)
@slow
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = {'''input_ids''': [[0, 490, 14328, 4507, 354, 47, 43669, 95, 25, 78117, 20215, 19779, 190, 22, 400, 4, 35343, 80310, 603, 86, 24937, 105, 33438, 94762, 196, 39642, 7, 15, 15933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10534, 87, 25, 66, 3358, 196, 55289, 8, 82961, 81, 2204, 75203, 7, 15, 763, 12956, 216, 178, 14328, 9595, 1377, 69693, 7, 448, 71021, 196, 18106, 1437, 13974, 108, 9083, 4, 49315, 7, 39, 86, 1326, 2793, 46333, 4, 448, 196, 74588, 7, 49315, 7, 39, 21, 822, 38470, 74, 21, 66723, 62480, 8, 22050, 5, 2]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
UpperCAmelCase_ = [
'''Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '''
'''utilisé principalement dans le domaine du traitement automatique des langues (TAL).''',
'''À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '''
'''pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '''
'''telles que la traduction et la synthèse de texte.''',
]
self.tokenizer_integration_test_util(
expected_encoding=_snake_case , model_name='''moussaKam/mbarthez''' , revision='''c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6''' , sequences=_snake_case , )
| 51 |
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class __snake_case :
@staticmethod
def lowerCamelCase ( *_snake_case : Optional[int] , **_snake_case : int):
"""simple docstring"""
pass
def A (__A : Image ) -> str:
"""simple docstring"""
UpperCAmelCase_ = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class __snake_case ( unittest.TestCase ):
UpperCAmelCase__ : Tuple = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def lowerCamelCase ( self : Tuple , _snake_case : Optional[Any] , _snake_case : Optional[Any] , _snake_case : List[str]):
"""simple docstring"""
UpperCAmelCase_ = DepthEstimationPipeline(model=_snake_case , image_processor=_snake_case)
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def lowerCamelCase ( self : str , _snake_case : Optional[int] , _snake_case : List[str]):
"""simple docstring"""
UpperCAmelCase_ = depth_estimator('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
self.assertEqual({'''predicted_depth''': ANY(torch.Tensor), '''depth''': ANY(Image.Image)} , _snake_case)
import datasets
UpperCAmelCase_ = datasets.load_dataset('''hf-internal-testing/fixtures_image_utils''' , '''image''' , split='''test''')
UpperCAmelCase_ = depth_estimator(
[
Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png'''),
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
# RGBA
dataset[0]['''file'''],
# LA
dataset[1]['''file'''],
# L
dataset[2]['''file'''],
])
self.assertEqual(
[
{'''predicted_depth''': ANY(torch.Tensor), '''depth''': ANY(Image.Image)},
{'''predicted_depth''': ANY(torch.Tensor), '''depth''': ANY(Image.Image)},
{'''predicted_depth''': ANY(torch.Tensor), '''depth''': ANY(Image.Image)},
{'''predicted_depth''': ANY(torch.Tensor), '''depth''': ANY(Image.Image)},
{'''predicted_depth''': ANY(torch.Tensor), '''depth''': ANY(Image.Image)},
] , _snake_case , )
@require_tf
@unittest.skip('''Depth estimation is not implemented in TF''')
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
pass
@slow
@require_torch
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = '''Intel/dpt-large'''
UpperCAmelCase_ = pipeline('''depth-estimation''' , model=_snake_case)
UpperCAmelCase_ = depth_estimator('''http://images.cocodataset.org/val2017/000000039769.jpg''')
UpperCAmelCase_ = hashimage(outputs['''depth'''])
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs['''predicted_depth'''].max().item()) , 2_9.3_0_4)
self.assertEqual(nested_simplify(outputs['''predicted_depth'''].min().item()) , 2.6_6_2)
@require_torch
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
self.skipTest('''There is not hf-internal-testing tiny model for either GLPN nor DPT''')
| 51 | 1 |
"""simple docstring"""
def UpperCAmelCase__ (snake_case__ : int = 1_00_00_00 ):
"""simple docstring"""
_snake_case : Any = 1
_snake_case : Optional[Any] = 1
_snake_case : List[str] = {1: 1}
for inputa in range(2 , snake_case__ ):
_snake_case : Optional[Any] = 0
_snake_case : str = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
_snake_case : Optional[int] = (3 * number) + 1
counter += 1
if inputa not in counters:
_snake_case : List[str] = counter
if counter > pre_counter:
_snake_case : List[Any] = inputa
_snake_case : List[Any] = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 354 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
A_ = {
'''configuration_maskformer''': ['''MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MaskFormerConfig'''],
'''configuration_maskformer_swin''': ['''MaskFormerSwinConfig'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = ['''MaskFormerFeatureExtractor''']
A_ = ['''MaskFormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
'''MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MaskFormerForInstanceSegmentation''',
'''MaskFormerModel''',
'''MaskFormerPreTrainedModel''',
]
A_ = [
'''MaskFormerSwinBackbone''',
'''MaskFormerSwinModel''',
'''MaskFormerSwinPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
A_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 132 | 0 |
'''simple docstring'''
__snake_case ={str(digit): digit**5 for digit in range(10)}
def a_ ( lowerCamelCase : int ):
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(lowerCamelCase ) )
def a_ ( ):
return sum(
number
for number in range(1000 , 1000000 )
if number == digits_fifth_powers_sum(lowerCamelCase ) )
if __name__ == "__main__":
print(solution())
| 4 |
'''simple docstring'''
import re
def _lowercase ( __A ):
'''simple docstring'''
return [char.split() for char in re.split(R"""[^ a-z A-Z 0-9 \s]""" ,str_ )]
def _lowercase ( __A ):
'''simple docstring'''
__UpperCamelCase = split_input(str_ )
return "".join(
["""""".join([char.capitalize() for char in sub_str] ) for sub_str in string_split] )
def _lowercase ( __A ,__A ,__A ):
'''simple docstring'''
try:
__UpperCamelCase = split_input(__A )
if upper:
__UpperCamelCase = """""".join(
[
separator.join([char.upper() for char in sub_str] )
for sub_str in string_split
] )
else:
__UpperCamelCase = """""".join(
[
separator.join([char.lower() for char in sub_str] )
for sub_str in string_split
] )
return res_str
except IndexError:
return "not valid string"
def _lowercase ( __A ):
'''simple docstring'''
return to_simple_case(__A )
def _lowercase ( __A ):
'''simple docstring'''
try:
__UpperCamelCase = to_simple_case(__A )
return res_str[0].lower() + res_str[1:]
except IndexError:
return "not valid string"
def _lowercase ( __A ,__A ):
'''simple docstring'''
return to_complex_case(__A ,__A ,"""_""" )
def _lowercase ( __A ,__A ):
'''simple docstring'''
return to_complex_case(__A ,__A ,"""-""" )
if __name__ == "__main__":
__import__('doctest').testmod()
| 349 | 0 |
'''simple docstring'''
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
A__ : Union[str, Any] =logging.get_logger(__name__)
A__ : Union[str, Any] =list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
A__ : Optional[Any] =tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class UpperCAmelCase :
_lowercase: str = field(
default=snake_case_ , metadata={'''help''': '''Model type selected in the list: ''' + ''', '''.join(snake_case_ )} )
_lowercase: str = field(
default=snake_case_ , metadata={'''help''': '''The input data dir. Should contain the .json files for the SQuAD task.'''} )
_lowercase: int = field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
_lowercase: int = field(
default=128 , metadata={'''help''': '''When splitting up a long document into chunks, how much stride to take between chunks.'''} , )
_lowercase: int = field(
default=64 , metadata={
'''help''': (
'''The maximum number of tokens for the question. Questions longer than this will '''
'''be truncated to this length.'''
)
} , )
_lowercase: int = field(
default=30 , metadata={
'''help''': (
'''The maximum length of an answer that can be generated. This is needed because the start '''
'''and end predictions are not conditioned on one another.'''
)
} , )
_lowercase: bool = field(
default=snake_case_ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
_lowercase: bool = field(
default=snake_case_ , metadata={'''help''': '''If true, the SQuAD examples contain some that do not have an answer.'''} )
_lowercase: float = field(
default=0.0 , metadata={'''help''': '''If null_score - best_non_null is greater than the threshold predict null.'''} )
_lowercase: int = field(
default=20 , metadata={'''help''': '''If null_score - best_non_null is greater than the threshold predict null.'''} )
_lowercase: int = field(
default=0 , metadata={
'''help''': (
'''language id of input for language-specific xlm models (see'''
''' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'''
)
} , )
_lowercase: int = field(default=1 , metadata={'''help''': '''multiple threads for converting example to features'''} )
class UpperCAmelCase ( snake_case_ ):
_lowercase: Dict = '''train'''
_lowercase: Tuple = '''dev'''
class UpperCAmelCase ( snake_case_ ):
_lowercase: SquadDataTrainingArguments
_lowercase: List[SquadFeatures]
_lowercase: Split
_lowercase: bool
def __init__( self : Tuple , __snake_case : SquadDataTrainingArguments , __snake_case : PreTrainedTokenizer , __snake_case : Optional[int] = None , __snake_case : Union[str, Split] = Split.train , __snake_case : Optional[bool] = False , __snake_case : Optional[str] = None , __snake_case : Optional[str] = "pt" , ) -> Optional[Any]:
_lowerCAmelCase = args
_lowerCAmelCase = is_language_sensitive
_lowerCAmelCase = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(__snake_case , __snake_case ):
try:
_lowerCAmelCase = Split[mode]
except KeyError:
raise KeyError("""mode is not a valid split name""" )
_lowerCAmelCase = mode
# Load data features from cache or dataset file
_lowerCAmelCase = """v2""" if args.version_2_with_negative else """v1"""
_lowerCAmelCase = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f"cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}" , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_lowerCAmelCase = cached_features_file + """.lock"""
with FileLock(__snake_case ):
if os.path.exists(__snake_case ) and not args.overwrite_cache:
_lowerCAmelCase = time.time()
_lowerCAmelCase = torch.load(__snake_case )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
_lowerCAmelCase = self.old_features["""features"""]
_lowerCAmelCase = self.old_features.get("""dataset""" , __snake_case )
_lowerCAmelCase = self.old_features.get("""examples""" , __snake_case )
logger.info(
f"Loading features from cached file {cached_features_file} [took %.3f s]" , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f"Deleting cached file {cached_features_file} will allow dataset and examples to be cached in"
""" future run""" )
else:
if mode == Split.dev:
_lowerCAmelCase = self.processor.get_dev_examples(args.data_dir )
else:
_lowerCAmelCase = self.processor.get_train_examples(args.data_dir )
_lowerCAmelCase , _lowerCAmelCase = squad_convert_examples_to_features(
examples=self.examples , tokenizer=__snake_case , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=__snake_case , )
_lowerCAmelCase = time.time()
torch.save(
{"""features""": self.features, """dataset""": self.dataset, """examples""": self.examples} , __snake_case , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f"Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]" )
def __len__( self : List[Any] ) -> Optional[int]:
return len(self.features )
def __getitem__( self : List[str] , __snake_case : int ) -> Dict[str, torch.Tensor]:
# Convert to Tensors and build dataset
_lowerCAmelCase = self.features[i]
_lowerCAmelCase = torch.tensor(feature.input_ids , dtype=torch.long )
_lowerCAmelCase = torch.tensor(feature.attention_mask , dtype=torch.long )
_lowerCAmelCase = torch.tensor(feature.token_type_ids , dtype=torch.long )
_lowerCAmelCase = torch.tensor(feature.cls_index , dtype=torch.long )
_lowerCAmelCase = torch.tensor(feature.p_mask , dtype=torch.float )
_lowerCAmelCase = torch.tensor(feature.is_impossible , dtype=torch.float )
_lowerCAmelCase = {
"""input_ids""": input_ids,
"""attention_mask""": attention_mask,
"""token_type_ids""": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"""cls_index""": cls_index, """p_mask""": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"""is_impossible""": is_impossible} )
if self.is_language_sensitive:
inputs.update({"""langs""": (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
_lowerCAmelCase = torch.tensor(feature.start_position , dtype=torch.long )
_lowerCAmelCase = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({"""start_positions""": start_positions, """end_positions""": end_positions} )
return inputs
| 220 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class UpperCAmelCase ( unittest.TestCase ):
def lowercase__ ( self : Optional[int] ) -> Dict:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowercase__ ( self : List[Any] ) -> List[str]:
_lowerCAmelCase = 1
_lowerCAmelCase = 3
_lowerCAmelCase = (32, 32)
_lowerCAmelCase = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__snake_case )
return image
@property
def lowercase__ ( self : int ) -> Union[str, Any]:
torch.manual_seed(0 )
_lowerCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
return model
@property
def lowercase__ ( self : Optional[int] ) -> List[str]:
torch.manual_seed(0 )
_lowerCAmelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
return model
@property
def lowercase__ ( self : Dict ) -> Optional[Any]:
torch.manual_seed(0 )
_lowerCAmelCase = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=50_06 , )
return RobertaSeriesModelWithTransformation(__snake_case )
@property
def lowercase__ ( self : Union[str, Any] ) -> str:
def extract(*__snake_case : List[Any] , **__snake_case : Any ):
class UpperCAmelCase :
def __init__( self : Any ) -> Any:
_lowerCAmelCase = torch.ones([0] )
def lowercase__ ( self : Optional[Any] , __snake_case : Tuple ) -> Dict:
self.pixel_values.to(__snake_case )
return self
return Out()
return extract
def lowercase__ ( self : List[str] ) -> Optional[int]:
_lowerCAmelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase = self.dummy_cond_unet
_lowerCAmelCase = PNDMScheduler(skip_prk_steps=__snake_case )
_lowerCAmelCase = self.dummy_vae
_lowerCAmelCase = self.dummy_text_encoder
_lowerCAmelCase = XLMRobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-xlm-roberta""" )
_lowerCAmelCase = 77
_lowerCAmelCase = self.dummy_image.to(__snake_case )
_lowerCAmelCase = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
_lowerCAmelCase = AltDiffusionImgaImgPipeline(
unet=__snake_case , scheduler=__snake_case , vae=__snake_case , text_encoder=__snake_case , tokenizer=__snake_case , safety_checker=__snake_case , feature_extractor=self.dummy_extractor , )
_lowerCAmelCase = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=__snake_case )
_lowerCAmelCase = alt_pipe.to(__snake_case )
alt_pipe.set_progress_bar_config(disable=__snake_case )
_lowerCAmelCase = """A painting of a squirrel eating a burger"""
_lowerCAmelCase = torch.Generator(device=__snake_case ).manual_seed(0 )
_lowerCAmelCase = alt_pipe(
[prompt] , generator=__snake_case , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , image=__snake_case , )
_lowerCAmelCase = output.images
_lowerCAmelCase = torch.Generator(device=__snake_case ).manual_seed(0 )
_lowerCAmelCase = alt_pipe(
[prompt] , generator=__snake_case , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , image=__snake_case , return_dict=__snake_case , )[0]
_lowerCAmelCase = image[0, -3:, -3:, -1]
_lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_lowerCAmelCase = np.array([0.44_27, 0.37_31, 0.42_49, 0.49_41, 0.45_46, 0.41_48, 0.41_93, 0.46_66, 0.44_99] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5E-3
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def lowercase__ ( self : Tuple ) -> str:
_lowerCAmelCase = self.dummy_cond_unet
_lowerCAmelCase = PNDMScheduler(skip_prk_steps=__snake_case )
_lowerCAmelCase = self.dummy_vae
_lowerCAmelCase = self.dummy_text_encoder
_lowerCAmelCase = XLMRobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-xlm-roberta""" )
_lowerCAmelCase = 77
_lowerCAmelCase = self.dummy_image.to(__snake_case )
# put models in fp16
_lowerCAmelCase = unet.half()
_lowerCAmelCase = vae.half()
_lowerCAmelCase = bert.half()
# make sure here that pndm scheduler skips prk
_lowerCAmelCase = AltDiffusionImgaImgPipeline(
unet=__snake_case , scheduler=__snake_case , vae=__snake_case , text_encoder=__snake_case , tokenizer=__snake_case , safety_checker=__snake_case , feature_extractor=self.dummy_extractor , )
_lowerCAmelCase = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=__snake_case )
_lowerCAmelCase = alt_pipe.to(__snake_case )
alt_pipe.set_progress_bar_config(disable=__snake_case )
_lowerCAmelCase = """A painting of a squirrel eating a burger"""
_lowerCAmelCase = torch.manual_seed(0 )
_lowerCAmelCase = alt_pipe(
[prompt] , generator=__snake_case , num_inference_steps=2 , output_type="""np""" , image=__snake_case , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def lowercase__ ( self : Optional[Any] ) -> Optional[Any]:
_lowerCAmelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
# resize to resolution that is divisible by 8 but not 16 or 32
_lowerCAmelCase = init_image.resize((7_60, 5_04) )
_lowerCAmelCase = """BAAI/AltDiffusion"""
_lowerCAmelCase = AltDiffusionImgaImgPipeline.from_pretrained(
__snake_case , safety_checker=__snake_case , )
pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
pipe.enable_attention_slicing()
_lowerCAmelCase = """A fantasy landscape, trending on artstation"""
_lowerCAmelCase = torch.manual_seed(0 )
_lowerCAmelCase = pipe(
prompt=__snake_case , image=__snake_case , strength=0.75 , guidance_scale=7.5 , generator=__snake_case , output_type="""np""" , )
_lowerCAmelCase = output.images[0]
_lowerCAmelCase = image[2_55:2_58, 3_83:3_86, -1]
assert image.shape == (5_04, 7_60, 3)
_lowerCAmelCase = np.array([0.93_58, 0.93_97, 0.95_99, 0.99_01, 1.00_00, 1.00_00, 0.98_82, 1.00_00, 1.00_00] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class UpperCAmelCase ( unittest.TestCase ):
def lowercase__ ( self : int ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : Dict ) -> Tuple:
_lowerCAmelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
_lowerCAmelCase = init_image.resize((7_68, 5_12) )
_lowerCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy""" )
_lowerCAmelCase = """BAAI/AltDiffusion"""
_lowerCAmelCase = AltDiffusionImgaImgPipeline.from_pretrained(
__snake_case , safety_checker=__snake_case , )
pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
pipe.enable_attention_slicing()
_lowerCAmelCase = """A fantasy landscape, trending on artstation"""
_lowerCAmelCase = torch.manual_seed(0 )
_lowerCAmelCase = pipe(
prompt=__snake_case , image=__snake_case , strength=0.75 , guidance_scale=7.5 , generator=__snake_case , output_type="""np""" , )
_lowerCAmelCase = output.images[0]
assert image.shape == (5_12, 7_68, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1E-2
| 220 | 1 |
'''simple docstring'''
import numpy as np
def a__ ( lowercase : np.array ) -> np.array:
"""simple docstring"""
return 1 / (1 + np.exp(-vector ))
def a__ ( lowercase : np.array ) -> np.array:
"""simple docstring"""
return vector * sigmoid(1.7_0_2 * vector )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 324 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
lowercase__ : Dict = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
lowercase__ : List[Any] = 25_00_04
lowercase__ : str = 25_00_20
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
"""simple docstring"""
_snake_case : Optional[Any] = MBartTokenizer
_snake_case : Tuple = MBartTokenizerFast
_snake_case : List[str] = True
_snake_case : Optional[Any] = True
def snake_case__ ( self : Any ) -> Optional[int]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
_UpperCamelCase = MBartTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case__ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = MBartTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__ )
_UpperCamelCase = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(lowerCAmelCase__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
_UpperCamelCase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
_UpperCamelCase = tokenizer.convert_tokens_to_ids(lowerCAmelCase__ )
self.assertListEqual(
lowerCAmelCase__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
_UpperCamelCase = tokenizer.convert_ids_to_tokens(lowerCAmelCase__ )
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
def snake_case__ ( self : Any ) -> Dict:
'''simple docstring'''
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
_UpperCamelCase = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-mbart''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_UpperCamelCase = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
_UpperCamelCase = self.tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
_UpperCamelCase = tempfile.mkdtemp()
_UpperCamelCase = tokenizer_r.save_pretrained(lowerCAmelCase__ )
_UpperCamelCase = tokenizer_p.save_pretrained(lowerCAmelCase__ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
_UpperCamelCase = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(lowerCAmelCase__ , lowerCAmelCase__ )
# Checks everything loads correctly in the same way
_UpperCamelCase = tokenizer_r.from_pretrained(lowerCAmelCase__ )
_UpperCamelCase = tokenizer_p.from_pretrained(lowerCAmelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(lowerCAmelCase__ )
# Save tokenizer rust, legacy_format=True
_UpperCamelCase = tempfile.mkdtemp()
_UpperCamelCase = tokenizer_r.save_pretrained(lowerCAmelCase__ , legacy_format=lowerCAmelCase__ )
_UpperCamelCase = tokenizer_p.save_pretrained(lowerCAmelCase__ )
# Checks it save with the same files
self.assertSequenceEqual(lowerCAmelCase__ , lowerCAmelCase__ )
# Checks everything loads correctly in the same way
_UpperCamelCase = tokenizer_r.from_pretrained(lowerCAmelCase__ )
_UpperCamelCase = tokenizer_p.from_pretrained(lowerCAmelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
shutil.rmtree(lowerCAmelCase__ )
# Save tokenizer rust, legacy_format=False
_UpperCamelCase = tempfile.mkdtemp()
_UpperCamelCase = tokenizer_r.save_pretrained(lowerCAmelCase__ , legacy_format=lowerCAmelCase__ )
_UpperCamelCase = tokenizer_p.save_pretrained(lowerCAmelCase__ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
_UpperCamelCase = tokenizer_r.from_pretrained(lowerCAmelCase__ )
_UpperCamelCase = tokenizer_p.from_pretrained(lowerCAmelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
shutil.rmtree(lowerCAmelCase__ )
@require_torch
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
_snake_case : Dict = 'facebook/mbart-large-en-ro'
_snake_case : Dict = [
' UN Chief Says There Is No Military Solution in Syria',
' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.',
]
_snake_case : List[Any] = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'
' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'
' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.',
]
_snake_case : Union[str, Any] = [8_2_7_4, 1_2_7_8_7_3, 2_5_9_1_6, 7, 8_6_2_2, 2_0_7_1, 4_3_8, 6_7_4_8_5, 5_3, 1_8_7_8_9_5, 2_3, 5_1_7_1_2, 2, EN_CODE]
@classmethod
def snake_case__ ( cls : List[str] ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='''en_XX''' , tgt_lang='''ro_RO''' )
_UpperCamelCase = 1
return cls
def snake_case__ ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ar_AR'''] , 250001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''en_EN'''] , 250004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ro_RO'''] , 250020 )
def snake_case__ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , lowerCAmelCase__ )
def snake_case__ ( self : str ) -> List[Any]:
'''simple docstring'''
self.assertIn(lowerCAmelCase__ , self.tokenizer.all_special_ids )
_UpperCamelCase = [RO_CODE, 884, 9019, 96, 9, 916, 86792, 36, 18743, 15596, 5, 2]
_UpperCamelCase = self.tokenizer.decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
_UpperCamelCase = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertNotIn(self.tokenizer.eos_token , lowerCAmelCase__ )
def snake_case__ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = ['''this is gunna be a long sentence ''' * 20]
assert isinstance(src_text[0] , lowerCAmelCase__ )
_UpperCamelCase = 10
_UpperCamelCase = self.tokenizer(lowerCAmelCase__ , max_length=lowerCAmelCase__ , truncation=lowerCAmelCase__ ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , lowerCAmelCase__ )
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
def snake_case__ ( self : List[Any] ) -> int:
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ) , [250026, 250001] )
def snake_case__ ( self : int ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = tempfile.mkdtemp()
_UpperCamelCase = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(lowerCAmelCase__ )
_UpperCamelCase = MBartTokenizer.from_pretrained(lowerCAmelCase__ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , lowerCAmelCase__ )
@require_torch
def snake_case__ ( self : Any ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=lowerCAmelCase__ , return_tensors='''pt''' )
_UpperCamelCase = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def snake_case__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
_UpperCamelCase = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , )
_UpperCamelCase = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
_UpperCamelCase = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , lowerCAmelCase__ )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] )
def snake_case__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = self.tokenizer(self.src_text , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=3 , return_tensors='''pt''' )
_UpperCamelCase = self.tokenizer(
text_target=self.tgt_text , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=10 , return_tensors='''pt''' )
_UpperCamelCase = targets['''input_ids''']
_UpperCamelCase = shift_tokens_right(lowerCAmelCase__ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def snake_case__ ( self : Tuple ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = self.tokenizer._build_translation_inputs(
'''A test''' , return_tensors='''pt''' , src_lang='''en_XX''' , tgt_lang='''ar_AR''' )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , {
# A, test, EOS, en_XX
'''input_ids''': [[62, 3034, 2, 250004]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 250001,
} , )
| 324 | 1 |
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
_lowerCAmelCase = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', F'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', F'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.weight''',
F'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.bias''',
F'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias'''))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("input_proj.weight", "input_projection.weight"),
("input_proj.bias", "input_projection.bias"),
("query_embed.weight", "query_position_embeddings.weight"),
("transformer.encoder.norm.weight", "encoder.layernorm.weight"),
("transformer.encoder.norm.bias", "encoder.layernorm.bias"),
("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
("class_embed.weight", "class_labels_classifier.weight"),
("class_embed.bias", "class_labels_classifier.bias"),
("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
]
)
def UpperCamelCase ( a , a , a ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ = state_dict.pop(a )
__magic_name__ = val
def UpperCamelCase ( a ) -> Optional[Any]:
'''simple docstring'''
__magic_name__ = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
__magic_name__ = key.replace('''backbone.0.body''' , '''backbone.conv_encoder.model''' )
__magic_name__ = value
else:
__magic_name__ = value
return new_state_dict
def UpperCamelCase ( a ) -> Dict:
'''simple docstring'''
__magic_name__ = ''''''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
__magic_name__ = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
__magic_name__ = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
__magic_name__ = in_proj_weight[:256, :]
__magic_name__ = in_proj_bias[:256]
__magic_name__ = in_proj_weight[256:512, :]
__magic_name__ = in_proj_bias[256:512]
__magic_name__ = in_proj_weight[-256:, :]
__magic_name__ = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
__magic_name__ = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
__magic_name__ = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
__magic_name__ = in_proj_weight[:256, :]
__magic_name__ = in_proj_bias[:256]
__magic_name__ = in_proj_weight[256:512, :]
__magic_name__ = in_proj_bias[256:512]
__magic_name__ = in_proj_weight[-256:, :]
__magic_name__ = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
__magic_name__ = state_dict.pop(
F'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight''' )
__magic_name__ = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
__magic_name__ = in_proj_weight_cross_attn[:256, :]
__magic_name__ = in_proj_bias_cross_attn[:256]
__magic_name__ = in_proj_weight_cross_attn[256:512, :]
__magic_name__ = in_proj_bias_cross_attn[256:512]
__magic_name__ = in_proj_weight_cross_attn[-256:, :]
__magic_name__ = in_proj_bias_cross_attn[-256:]
def UpperCamelCase ( a , a ) -> Optional[Any]:
'''simple docstring'''
__magic_name__ , __magic_name__ = image.size
__magic_name__ = max(a , a )
__magic_name__ = 800 if '''detection''' in checkpoint_url else 1000
__magic_name__ = target_max_size / current_max_size
__magic_name__ = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def UpperCamelCase ( a ) -> List[str]:
'''simple docstring'''
__magic_name__ = F.to_tensor(a )
__magic_name__ = F.normalize(a , mean=[0.4_85, 0.4_56, 0.4_06] , std=[0.2_29, 0.2_24, 0.2_25] )
return image
@torch.no_grad()
def UpperCamelCase ( a , a , a ) -> List[str]:
'''simple docstring'''
logger.info('''Converting model...''' )
# load original state dict
__magic_name__ = torch.hub.load_state_dict_from_url(a , map_location='''cpu''' )
# rename keys
for src, dest in rename_keys:
rename_key(a , a , a )
__magic_name__ = rename_backbone_keys(a )
# query, key and value matrices need special treatment
read_in_q_k_v(a )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
__magic_name__ = '''model.'''
for key in state_dict.copy().keys():
if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ):
__magic_name__ = state_dict.pop(a )
__magic_name__ = val
# create HuggingFace model and load state dict
__magic_name__ = TableTransformerConfig(
backbone='''resnet18''' , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
__magic_name__ = 15
__magic_name__ = 2
__magic_name__ = {0: '''table''', 1: '''table rotated'''}
__magic_name__ = idalabel
__magic_name__ = {v: k for k, v in idalabel.items()}
else:
__magic_name__ = 125
__magic_name__ = 6
__magic_name__ = {
0: '''table''',
1: '''table column''',
2: '''table row''',
3: '''table column header''',
4: '''table projected row header''',
5: '''table spanning cell''',
}
__magic_name__ = idalabel
__magic_name__ = {v: k for k, v in idalabel.items()}
__magic_name__ = DetrImageProcessor(
format='''coco_detection''' , max_size=800 if '''detection''' in checkpoint_url else 1000 )
__magic_name__ = TableTransformerForObjectDetection(a )
model.load_state_dict(a )
model.eval()
# verify our conversion
__magic_name__ = '''example_pdf.png''' if '''detection''' in checkpoint_url else '''example_table.png'''
__magic_name__ = hf_hub_download(repo_id='''nielsr/example-pdf''' , repo_type='''dataset''' , filename=a )
__magic_name__ = Image.open(a ).convert('''RGB''' )
__magic_name__ = normalize(resize(a , a ) ).unsqueeze(0 )
__magic_name__ = model(a )
if "detection" in checkpoint_url:
__magic_name__ = (1, 15, 3)
__magic_name__ = torch.tensor(
[[-6.78_97, -16.99_85, 6.79_37], [-8.01_86, -22.21_92, 6.96_77], [-7.31_17, -21.07_08, 7.40_55]] )
__magic_name__ = torch.tensor([[0.48_67, 0.17_67, 0.67_32], [0.67_18, 0.44_79, 0.38_30], [0.47_16, 0.17_60, 0.63_64]] )
else:
__magic_name__ = (1, 125, 7)
__magic_name__ = torch.tensor(
[[-18.14_30, -8.32_14, 4.82_74], [-18.46_85, -7.13_61, -4.26_67], [-26.36_93, -9.34_29, -4.99_62]] )
__magic_name__ = torch.tensor([[0.49_83, 0.55_95, 0.94_40], [0.49_16, 0.63_15, 0.59_54], [0.61_08, 0.86_37, 0.11_35]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , a , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , a , atol=1e-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(a ).mkdir(exist_ok=a )
model.save_pretrained(a )
image_processor.save_pretrained(a )
if push_to_hub:
# Push model to HF hub
logger.info('''Pushing model to the hub...''' )
__magic_name__ = (
'''microsoft/table-transformer-detection'''
if '''detection''' in checkpoint_url
else '''microsoft/table-transformer-structure-recognition'''
)
model.push_to_hub(a )
image_processor.push_to_hub(a )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_url",
default="https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth",
type=str,
choices=[
"https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth",
"https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth",
],
help="URL of the Table Transformer checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
_lowerCAmelCase = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 98 |
'''simple docstring'''
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __init__( self : Tuple , a__ : Union[str, Any] , a__ : Union[str, Any]=7 , a__ : Dict=3 , a__ : Optional[Any]=18 , a__ : Optional[Any]=30 , a__ : Tuple=400 , a__ : Optional[int]=True , a__ : int=None , a__ : Union[str, Any]=True , a__ : Optional[Any]=None , a__ : str=True , a__ : List[Any]=[0.5, 0.5, 0.5] , a__ : Tuple=[0.5, 0.5, 0.5] , a__ : Union[str, Any]=False , ):
__magic_name__ = size if size is not None else {'''height''': 20, '''width''': 20}
__magic_name__ = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
__magic_name__ = parent
__magic_name__ = batch_size
__magic_name__ = num_channels
__magic_name__ = image_size
__magic_name__ = min_resolution
__magic_name__ = max_resolution
__magic_name__ = do_resize
__magic_name__ = size
__magic_name__ = do_center_crop
__magic_name__ = crop_size
__magic_name__ = do_normalize
__magic_name__ = image_mean
__magic_name__ = image_std
__magic_name__ = do_reduce_labels
def snake_case__ ( self : Optional[Any] ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def UpperCamelCase ( ) -> str:
'''simple docstring'''
__magic_name__ = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
__magic_name__ = Image.open(dataset[0]['''file'''] )
__magic_name__ = Image.open(dataset[1]['''file'''] )
return image, map
def UpperCamelCase ( ) -> List[str]:
'''simple docstring'''
__magic_name__ = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
__magic_name__ = Image.open(ds[0]['''file'''] )
__magic_name__ = Image.open(ds[1]['''file'''] )
__magic_name__ = Image.open(ds[2]['''file'''] )
__magic_name__ = Image.open(ds[3]['''file'''] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class _SCREAMING_SNAKE_CASE ( __a ,unittest.TestCase ):
__SCREAMING_SNAKE_CASE :Optional[Any] = BeitImageProcessor if is_vision_available() else None
def snake_case__ ( self : Dict ):
__magic_name__ = BeitImageProcessingTester(self )
@property
def snake_case__ ( self : Dict ):
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case__ ( self : Optional[Any] ):
__magic_name__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a__ , '''do_resize''' ) )
self.assertTrue(hasattr(a__ , '''size''' ) )
self.assertTrue(hasattr(a__ , '''do_center_crop''' ) )
self.assertTrue(hasattr(a__ , '''center_crop''' ) )
self.assertTrue(hasattr(a__ , '''do_normalize''' ) )
self.assertTrue(hasattr(a__ , '''image_mean''' ) )
self.assertTrue(hasattr(a__ , '''image_std''' ) )
def snake_case__ ( self : int ):
__magic_name__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 20, '''width''': 20} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
self.assertEqual(image_processor.do_reduce_labels , a__ )
__magic_name__ = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=a__ )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
self.assertEqual(image_processor.do_reduce_labels , a__ )
def snake_case__ ( self : Optional[Any] ):
pass
def snake_case__ ( self : Dict ):
# Initialize image_processing
__magic_name__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__magic_name__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=a__ )
for image in image_inputs:
self.assertIsInstance(a__ , Image.Image )
# Test not batched input
__magic_name__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__magic_name__ = image_processing(a__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def snake_case__ ( self : Optional[int] ):
# Initialize image_processing
__magic_name__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__magic_name__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=a__ , numpify=a__ )
for image in image_inputs:
self.assertIsInstance(a__ , np.ndarray )
# Test not batched input
__magic_name__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__magic_name__ = image_processing(a__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def snake_case__ ( self : List[str] ):
# Initialize image_processing
__magic_name__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__magic_name__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=a__ , torchify=a__ )
for image in image_inputs:
self.assertIsInstance(a__ , torch.Tensor )
# Test not batched input
__magic_name__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__magic_name__ = image_processing(a__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def snake_case__ ( self : Union[str, Any] ):
# Initialize image_processing
__magic_name__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__magic_name__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=a__ , torchify=a__ )
__magic_name__ = []
for image in image_inputs:
self.assertIsInstance(a__ , torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
__magic_name__ = image_processing(image_inputs[0] , maps[0] , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
1,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
# Test batched
__magic_name__ = image_processing(a__ , a__ , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
# Test not batched input (PIL images)
__magic_name__ , __magic_name__ = prepare_semantic_single_inputs()
__magic_name__ = image_processing(a__ , a__ , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
1,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
# Test batched input (PIL images)
__magic_name__ , __magic_name__ = prepare_semantic_batch_inputs()
__magic_name__ = image_processing(a__ , a__ , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
2,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
def snake_case__ ( self : Any ):
# Initialize image_processing
__magic_name__ = self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
__magic_name__ , __magic_name__ = prepare_semantic_single_inputs()
__magic_name__ = image_processing(a__ , a__ , return_tensors='''pt''' )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 150 )
__magic_name__ = True
__magic_name__ = image_processing(a__ , a__ , return_tensors='''pt''' )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
| 98 | 1 |
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
lowercase : Optional[int] = [
{"""dataset""": """wikipedia""", """config_name""": """20220301.de"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.en"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.fr"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.frr"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.it"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.simple"""},
{"""dataset""": """snli""", """config_name""": """plain_text"""},
{"""dataset""": """eli5""", """config_name""": """LFQA_reddit"""},
{"""dataset""": """wiki40b""", """config_name""": """en"""},
{"""dataset""": """wiki_dpr""", """config_name""": """psgs_w100.nq.compressed"""},
{"""dataset""": """wiki_dpr""", """config_name""": """psgs_w100.nq.no_index"""},
{"""dataset""": """wiki_dpr""", """config_name""": """psgs_w100.multiset.no_index"""},
{"""dataset""": """natural_questions""", """config_name""": """default"""},
]
def A_ ( A__=True ) -> List[str]:
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=__UpperCAmelCase ) )
class A__ ( __UpperCAmelCase ):
"""simple docstring"""
__A : Tuple = None
__A : Union[str, Any] = None
def __lowercase ( self , lowercase , lowercase) -> Union[str, Any]:
'''simple docstring'''
with TemporaryDirectory() as tmp_dir:
a__ : Dict = dataset_module_factory(lowercase , cache_dir=lowercase)
a__ : str = import_main_class(dataset_module.module_path , dataset=lowercase)
a__ : DatasetBuilder = builder_cls(
cache_dir=lowercase , config_name=lowercase , hash=dataset_module.hash , )
a__ : Dict = '/'.join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=lowercase).replace(os.sep , '/'),
config.DATASET_INFO_FILENAME,
])
a__ : List[Any] = cached_path(lowercase , cache_dir=lowercase)
self.assertTrue(os.path.exists(lowercase))
@pytest.mark.integration
def A_ ( A__ ) -> Any:
a__ : List[str] = tmp_path_factory.mktemp('test_hf_gcp' ) / 'test_wikipedia_simple'
a__ : List[Any] = dataset_module_factory('wikipedia' , cache_dir=A__ )
a__ : Optional[int] = import_main_class(dataset_module.module_path )
a__ : DatasetBuilder = builder_cls(
cache_dir=A__ , config_name='20220301.frr' , hash=dataset_module.hash , )
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
a__ : List[Any] = None
builder_instance.download_and_prepare()
a__ : str = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def A_ ( A__ ) -> int:
a__ : Dict = dataset_module_factory('wikipedia' , cache_dir=A__ )
a__ : Union[str, Any] = import_main_class(dataset_module.module_path , dataset=A__ )
a__ : DatasetBuilder = builder_cls(
cache_dir=A__ , config_name='20220301.frr' , hash=dataset_module.hash , )
a__ : str = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(A__ , A__ )
assert "train" in ds
assert isinstance(ds['train'] , A__ )
assert next(iter(ds['train'] ) )
| 99 |
'''simple docstring'''
class A__ :
def __init__( self :List[Any] ) -> None:
'''simple docstring'''
_a : dict[str, TrieNode] ={} # Mapping from char to TrieNode
_a : List[str] =False
def __UpperCAmelCase ( self :Tuple , SCREAMING_SNAKE_CASE :list[str] ) -> None:
'''simple docstring'''
for word in words:
self.insert(SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Union[str, Any] , SCREAMING_SNAKE_CASE :str ) -> None:
'''simple docstring'''
_a : str =self
for char in word:
if char not in curr.nodes:
_a : Dict =TrieNode()
_a : List[Any] =curr.nodes[char]
_a : int =True
def __UpperCAmelCase ( self :Union[str, Any] , SCREAMING_SNAKE_CASE :str ) -> bool:
'''simple docstring'''
_a : int =self
for char in word:
if char not in curr.nodes:
return False
_a : List[Any] =curr.nodes[char]
return curr.is_leaf
def __UpperCAmelCase ( self :Dict , SCREAMING_SNAKE_CASE :str ) -> None:
'''simple docstring'''
def _delete(SCREAMING_SNAKE_CASE :TrieNode , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :int ) -> bool:
if index == len(SCREAMING_SNAKE_CASE ):
# If word does not exist
if not curr.is_leaf:
return False
_a : Any =False
return len(curr.nodes ) == 0
_a : int =word[index]
_a : int =curr.nodes.get(SCREAMING_SNAKE_CASE )
# If char not in current trie node
if not char_node:
return False
# Flag to check if node can be deleted
_a : List[Any] =_delete(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , index + 1 )
if delete_curr:
del curr.nodes[char]
return len(curr.nodes ) == 0
return delete_curr
_delete(self , SCREAMING_SNAKE_CASE , 0 )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : TrieNode ,_UpperCAmelCase : str ) -> None:
if node.is_leaf:
print(_UpperCAmelCase ,end=""" """ )
for key, value in node.nodes.items():
print_words(_UpperCAmelCase ,word + key )
def SCREAMING_SNAKE_CASE_ ( ) -> bool:
_a : List[str] ="""banana bananas bandana band apple all beast""".split()
_a : List[Any] =TrieNode()
root.insert_many(_UpperCAmelCase )
# print_words(root, "")
assert all(root.find(_UpperCAmelCase ) for word in words )
assert root.find("""banana""" )
assert not root.find("""bandanas""" )
assert not root.find("""apps""" )
assert root.find("""apple""" )
assert root.find("""all""" )
root.delete("""all""" )
assert not root.find("""all""" )
root.delete("""banana""" )
assert not root.find("""banana""" )
assert root.find("""bananas""" )
return True
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ,_UpperCAmelCase : bool ) -> None:
print(str(_UpperCAmelCase ) ,"""works!""" if passes else """doesn't work :(""" )
def SCREAMING_SNAKE_CASE_ ( ) -> None:
assert test_trie()
def SCREAMING_SNAKE_CASE_ ( ) -> None:
print_results("""Testing trie functionality""" ,test_trie() )
if __name__ == "__main__":
main()
| 276 | 0 |
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class _lowerCamelCase( _a ):
lowercase_ : Tuple = (PNDMScheduler,)
lowercase_ : str = (("""num_inference_steps""", 50),)
def UpperCamelCase ( self, **lowerCamelCase) -> Dict:
"""simple docstring"""
_lowercase : Optional[int] = {
'num_train_timesteps': 10_00,
'beta_start': 0.0_0_0_1,
'beta_end': 0.0_2,
'beta_schedule': 'linear',
}
config.update(**lowerCamelCase)
return config
def UpperCamelCase ( self, lowerCamelCase=0, **lowerCamelCase) -> Optional[Any]:
"""simple docstring"""
_lowercase : Optional[Any] = dict(self.forward_default_kwargs)
_lowercase : int = kwargs.pop('num_inference_steps', lowerCamelCase)
_lowercase : Optional[int] = self.dummy_sample
_lowercase : List[str] = 0.1 * sample
_lowercase : str = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
_lowercase : Any = self.get_scheduler_config(**lowerCamelCase)
_lowercase : List[str] = scheduler_class(**lowerCamelCase)
scheduler.set_timesteps(lowerCamelCase)
# copy over dummy past residuals
_lowercase : Optional[Any] = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase)
_lowercase : Optional[int] = scheduler_class.from_pretrained(lowerCamelCase)
new_scheduler.set_timesteps(lowerCamelCase)
# copy over dummy past residuals
_lowercase : Optional[int] = dummy_past_residuals[:]
_lowercase : List[Any] = scheduler.step_prk(lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase).prev_sample
_lowercase : Any = new_scheduler.step_prk(lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
_lowercase : Union[str, Any] = scheduler.step_plms(lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase).prev_sample
_lowercase : Optional[int] = new_scheduler.step_plms(lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
pass
def UpperCamelCase ( self, lowerCamelCase=0, **lowerCamelCase) -> int:
"""simple docstring"""
_lowercase : Dict = dict(self.forward_default_kwargs)
_lowercase : Dict = kwargs.pop('num_inference_steps', lowerCamelCase)
_lowercase : List[str] = self.dummy_sample
_lowercase : List[Any] = 0.1 * sample
_lowercase : Tuple = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
_lowercase : Union[str, Any] = self.get_scheduler_config()
_lowercase : Any = scheduler_class(**lowerCamelCase)
scheduler.set_timesteps(lowerCamelCase)
# copy over dummy past residuals (must be after setting timesteps)
_lowercase : List[str] = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase)
_lowercase : Any = scheduler_class.from_pretrained(lowerCamelCase)
# copy over dummy past residuals
new_scheduler.set_timesteps(lowerCamelCase)
# copy over dummy past residual (must be after setting timesteps)
_lowercase : Optional[int] = dummy_past_residuals[:]
_lowercase : Any = scheduler.step_prk(lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase).prev_sample
_lowercase : Union[str, Any] = new_scheduler.step_prk(lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
_lowercase : Tuple = scheduler.step_plms(lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase).prev_sample
_lowercase : List[str] = new_scheduler.step_plms(lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def UpperCamelCase ( self, **lowerCamelCase) -> str:
"""simple docstring"""
_lowercase : List[str] = self.scheduler_classes[0]
_lowercase : int = self.get_scheduler_config(**lowerCamelCase)
_lowercase : Union[str, Any] = scheduler_class(**lowerCamelCase)
_lowercase : Optional[Any] = 10
_lowercase : Tuple = self.dummy_model()
_lowercase : Optional[Any] = self.dummy_sample_deter
scheduler.set_timesteps(lowerCamelCase)
for i, t in enumerate(scheduler.prk_timesteps):
_lowercase : Any = model(lowerCamelCase, lowerCamelCase)
_lowercase : Union[str, Any] = scheduler.step_prk(lowerCamelCase, lowerCamelCase, lowerCamelCase).prev_sample
for i, t in enumerate(scheduler.plms_timesteps):
_lowercase : Union[str, Any] = model(lowerCamelCase, lowerCamelCase)
_lowercase : Optional[int] = scheduler.step_plms(lowerCamelCase, lowerCamelCase, lowerCamelCase).prev_sample
return sample
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : Dict = dict(self.forward_default_kwargs)
_lowercase : Optional[Any] = kwargs.pop('num_inference_steps', lowerCamelCase)
for scheduler_class in self.scheduler_classes:
_lowercase : Dict = self.get_scheduler_config()
_lowercase : List[str] = scheduler_class(**lowerCamelCase)
_lowercase : Optional[Any] = self.dummy_sample
_lowercase : Dict = 0.1 * sample
if num_inference_steps is not None and hasattr(lowerCamelCase, 'set_timesteps'):
scheduler.set_timesteps(lowerCamelCase)
elif num_inference_steps is not None and not hasattr(lowerCamelCase, 'set_timesteps'):
_lowercase : Any = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_lowercase : List[str] = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
_lowercase : List[Any] = dummy_past_residuals[:]
_lowercase : Optional[int] = scheduler.step_prk(lowerCamelCase, 0, lowerCamelCase, **lowerCamelCase).prev_sample
_lowercase : Union[str, Any] = scheduler.step_prk(lowerCamelCase, 1, lowerCamelCase, **lowerCamelCase).prev_sample
self.assertEqual(output_a.shape, sample.shape)
self.assertEqual(output_a.shape, output_a.shape)
_lowercase : Tuple = scheduler.step_plms(lowerCamelCase, 0, lowerCamelCase, **lowerCamelCase).prev_sample
_lowercase : Any = scheduler.step_plms(lowerCamelCase, 1, lowerCamelCase, **lowerCamelCase).prev_sample
self.assertEqual(output_a.shape, sample.shape)
self.assertEqual(output_a.shape, output_a.shape)
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
for timesteps in [1_00, 10_00]:
self.check_over_configs(num_train_timesteps=lowerCamelCase)
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=lowerCamelCase)
_lowercase : Any = self.scheduler_classes[0]
_lowercase : Union[str, Any] = self.get_scheduler_config(steps_offset=1)
_lowercase : Any = scheduler_class(**lowerCamelCase)
scheduler.set_timesteps(10)
assert torch.equal(
scheduler.timesteps, torch.LongTensor(
[9_01, 8_51, 8_51, 8_01, 8_01, 7_51, 7_51, 7_01, 7_01, 6_51, 6_51, 6_01, 6_01, 5_01, 4_01, 3_01, 2_01, 1_01, 1]), )
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1], [0.0_0_2, 0.0_2]):
self.check_over_configs(beta_start=lowerCamelCase, beta_end=lowerCamelCase)
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowerCamelCase)
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCamelCase)
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
for t in [1, 5, 10]:
self.check_over_forward(time_step=lowerCamelCase)
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
for t, num_inference_steps in zip([1, 5, 10], [10, 50, 1_00]):
self.check_over_forward(num_inference_steps=lowerCamelCase)
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : Optional[int] = 27
for scheduler_class in self.scheduler_classes:
_lowercase : Tuple = self.dummy_sample
_lowercase : Union[str, Any] = 0.1 * sample
_lowercase : int = self.get_scheduler_config()
_lowercase : int = scheduler_class(**lowerCamelCase)
scheduler.set_timesteps(lowerCamelCase)
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2]):
_lowercase : List[str] = scheduler.step_prk(lowerCamelCase, lowerCamelCase, lowerCamelCase).prev_sample
def UpperCamelCase ( self) -> int:
"""simple docstring"""
with self.assertRaises(lowerCamelCase):
_lowercase : str = self.scheduler_classes[0]
_lowercase : Optional[Any] = self.get_scheduler_config()
_lowercase : List[Any] = scheduler_class(**lowerCamelCase)
scheduler.step_plms(self.dummy_sample, 1, self.dummy_sample).prev_sample
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : Dict = self.full_loop()
_lowercase : Tuple = torch.sum(torch.abs(lowerCamelCase))
_lowercase : str = torch.mean(torch.abs(lowerCamelCase))
assert abs(result_sum.item() - 1_98.13_18) < 1E-2
assert abs(result_mean.item() - 0.2_5_8_0) < 1E-3
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
_lowercase : Dict = self.full_loop(prediction_type='v_prediction')
_lowercase : Union[str, Any] = torch.sum(torch.abs(lowerCamelCase))
_lowercase : Optional[Any] = torch.mean(torch.abs(lowerCamelCase))
assert abs(result_sum.item() - 67.39_86) < 1E-2
assert abs(result_mean.item() - 0.0_8_7_8) < 1E-3
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : Any = self.full_loop(set_alpha_to_one=lowerCamelCase, beta_start=0.0_1)
_lowercase : Tuple = torch.sum(torch.abs(lowerCamelCase))
_lowercase : str = torch.mean(torch.abs(lowerCamelCase))
assert abs(result_sum.item() - 2_30.03_99) < 1E-2
assert abs(result_mean.item() - 0.2_9_9_5) < 1E-3
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
_lowercase : Any = self.full_loop(set_alpha_to_one=lowerCamelCase, beta_start=0.0_1)
_lowercase : Tuple = torch.sum(torch.abs(lowerCamelCase))
_lowercase : List[str] = torch.mean(torch.abs(lowerCamelCase))
assert abs(result_sum.item() - 1_86.94_82) < 1E-2
assert abs(result_mean.item() - 0.2_4_3_4) < 1E-3
| 351 |
from math import sqrt
import numpy as np
from sympy import symbols
# Coefficient
# Speed of light (m/s)
SCREAMING_SNAKE_CASE : Optional[int] = 299792458
# Symbols
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = symbols("ct x y z")
def UpperCamelCase_( lowerCamelCase_ ) -> float:
if velocity > c:
raise ValueError('Speed must not exceed light speed 299,792,458 [m/s]!' )
elif velocity < 1:
# Usually the speed should be much higher than 1 (c order of magnitude)
raise ValueError('Speed must be greater than or equal to 1!' )
return velocity / c
def UpperCamelCase_( lowerCamelCase_ ) -> float:
return 1 / sqrt(1 - beta(lowerCamelCase_ ) ** 2 )
def UpperCamelCase_( lowerCamelCase_ ) -> np.ndarray:
return np.array(
[
[gamma(lowerCamelCase_ ), -gamma(lowerCamelCase_ ) * beta(lowerCamelCase_ ), 0, 0],
[-gamma(lowerCamelCase_ ) * beta(lowerCamelCase_ ), gamma(lowerCamelCase_ ), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
] )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ = None ) -> np.ndarray:
# Ensure event is not empty
if event is None:
_lowercase : Union[str, Any] = np.array([ct, x, y, z] ) # Symbolic four vector
else:
event[0] *= c # x0 is ct (speed of light * time)
return transformation_matrix(lowerCamelCase_ ) @ event
if __name__ == "__main__":
import doctest
doctest.testmod()
# Example of symbolic vector:
SCREAMING_SNAKE_CASE : Optional[int] = transform(29979245)
print("Example of four vector: ")
print(F"ct' = {four_vector[0]}")
print(F"x' = {four_vector[1]}")
print(F"y' = {four_vector[2]}")
print(F"z' = {four_vector[3]}")
# Substitute symbols with numerical values
SCREAMING_SNAKE_CASE : Tuple = {ct: c, x: 1, y: 1, z: 1}
SCREAMING_SNAKE_CASE : Tuple = [four_vector[i].subs(sub_dict) for i in range(4)]
print(F"\n{numerical_vector}")
| 84 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCamelCase : List[Any] = {'configuration_yolos': ['YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP', 'YolosConfig', 'YolosOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : str = ['YolosFeatureExtractor']
lowerCamelCase : Tuple = ['YolosImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Tuple = [
'YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST',
'YolosForObjectDetection',
'YolosModel',
'YolosPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
lowerCamelCase : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 124 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class __lowercase (unittest.TestCase ):
"""simple docstring"""
def __init__( self , A , A=7 , A=3 , A=3_0 , A=4_0_0 , A=True , A=None , A=0.9 , A=None , A=True , A=[0.5, 0.5, 0.5] , A=[0.5, 0.5, 0.5] , ) -> Dict:
snake_case : Optional[int] = size if size is not None else {"""shortest_edge""": 3_0}
snake_case : Optional[int] = crop_size if crop_size is not None else {"""height""": 3_0, """width""": 3_0}
snake_case : int = parent
snake_case : List[str] = batch_size
snake_case : Any = num_channels
snake_case : Optional[Any] = min_resolution
snake_case : Any = max_resolution
snake_case : Dict = do_resize_and_center_crop
snake_case : Any = size
snake_case : List[Any] = crop_pct
snake_case : int = crop_size
snake_case : int = do_normalize
snake_case : List[Any] = image_mean
snake_case : Tuple = image_std
def UpperCAmelCase ( self ) -> int:
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class __lowercase (UpperCamelCase__ , unittest.TestCase ):
"""simple docstring"""
_snake_case = PoolFormerImageProcessor if is_vision_available() else None
def UpperCAmelCase ( self ) -> Optional[Any]:
snake_case : str = PoolFormerImageProcessingTester(self )
@property
def UpperCAmelCase ( self ) -> Tuple:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase ( self ) -> Dict:
snake_case : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A , """do_resize_and_center_crop""" ) )
self.assertTrue(hasattr(A , """size""" ) )
self.assertTrue(hasattr(A , """crop_pct""" ) )
self.assertTrue(hasattr(A , """do_normalize""" ) )
self.assertTrue(hasattr(A , """image_mean""" ) )
self.assertTrue(hasattr(A , """image_std""" ) )
def UpperCAmelCase ( self ) -> int:
snake_case : List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 3_0} )
self.assertEqual(image_processor.crop_size , {"""height""": 3_0, """width""": 3_0} )
snake_case : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 4_2} )
self.assertEqual(image_processor.crop_size , {"""height""": 8_4, """width""": 8_4} )
def UpperCAmelCase ( self ) -> Tuple:
pass
def UpperCAmelCase ( self ) -> List[Any]:
# Initialize image_processing
snake_case : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=A )
for image in image_inputs:
self.assertIsInstance(A , Image.Image )
# Test not batched input
snake_case : int = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
snake_case : Tuple = image_processing(A , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def UpperCAmelCase ( self ) -> Dict:
# Initialize image_processing
snake_case : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , numpify=A )
for image in image_inputs:
self.assertIsInstance(A , np.ndarray )
# Test not batched input
snake_case : Dict = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
snake_case : Any = image_processing(A , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def UpperCAmelCase ( self ) -> List[str]:
# Initialize image_processing
snake_case : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , torchify=A )
for image in image_inputs:
self.assertIsInstance(A , torch.Tensor )
# Test not batched input
snake_case : Optional[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
snake_case : int = image_processing(A , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 124 | 1 |
'''simple docstring'''
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
_lowerCAmelCase = importlib.util.find_spec("s3fs") is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
_lowerCAmelCase = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(F'''A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.''')
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def UpperCamelCase ( a ) -> str:
'''simple docstring'''
if "://" in dataset_path:
__magic_name__ = dataset_path.split('''://''' )[1]
return dataset_path
def UpperCamelCase ( a ) -> bool:
'''simple docstring'''
if fs is not None and fs.protocol != "file":
return True
else:
return False
def UpperCamelCase ( a , a , a ) -> Tuple:
'''simple docstring'''
__magic_name__ = not is_remote_filesystem(a )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(a ) , fs._strip_protocol(a ) )
else:
fs.mv(a , a , recursive=a )
def UpperCamelCase ( ) -> None:
'''simple docstring'''
if hasattr(fsspec.asyn , '''reset_lock''' ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
__magic_name__ = None
__magic_name__ = None
__magic_name__ = threading.Lock()
| 98 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowerCAmelCase = {"configuration_glpn": ["GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP", "GLPNConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = ["GLPNFeatureExtractor"]
_lowerCAmelCase = ["GLPNImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
"GLPN_PRETRAINED_MODEL_ARCHIVE_LIST",
"GLPNForDepthEstimation",
"GLPNLayer",
"GLPNModel",
"GLPNPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_glpn import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP, GLPNConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_glpn import GLPNFeatureExtractor
from .image_processing_glpn import GLPNImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_glpn import (
GLPN_PRETRAINED_MODEL_ARCHIVE_LIST,
GLPNForDepthEstimation,
GLPNLayer,
GLPNModel,
GLPNPreTrainedModel,
)
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 98 | 1 |
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class __snake_case ( unittest.TestCase ):
_a = JukeboxTokenizer
_a = {
'''artist''': '''Zac Brown Band''',
'''genres''': '''Country''',
'''lyrics''': '''I met a traveller from an antique land,
Who said "Two vast and trunkless legs of stone
Stand in the desert. . . . Near them, on the sand,
Half sunk a shattered visage lies, whose frown,
And wrinkled lip, and sneer of cold command,
Tell that its sculptor well those passions read
Which yet survive, stamped on these lifeless things,
The hand that mocked them, and the heart that fed;
And on the pedestal, these words appear:
My name is Ozymandias, King of Kings;
Look on my Works, ye Mighty, and despair!
Nothing beside remains. Round the decay
Of that colossal Wreck, boundless and bare
The lone and level sands stretch far away
''',
}
@require_torch
def UpperCAmelCase__ ( self : str):
import torch
lowerCAmelCase_ : List[Any] = JukeboxTokenizer.from_pretrained('''openai/jukebox-1b-lyrics''')
lowerCAmelCase_ : Any = tokenizer(**self.metas)['''input_ids''']
# fmt: off
lowerCAmelCase_ : List[str] = [
torch.tensor([[
0, 0, 0, 7_1_6_9, 5_0_7, 9, 7_6, 3_9, 3_1, 4_6, 7_6, 2_7,
7_6, 4_6, 4_4, 2_7, 4_8, 3_1, 3_8, 3_8, 3_1, 4_4, 7_6, 3_2,
4_4, 4_1, 3_9, 7_6, 2_7, 4_0, 7_6, 2_7, 4_0, 4_6, 3_5, 4_3,
4_7, 3_1, 7_6, 3_8, 2_7, 4_0, 3_0, 6_4, 7_8, 7_6, 7_6, 7_6,
7_6, 7_6, 7_6, 7_6, 7_6, 2_3, 3_4, 4_1, 7_6, 4_5, 2_7, 3_5,
3_0, 7_6, 7_1, 2_0, 4_9, 4_1, 7_6, 4_8, 2_7, 4_5, 4_6, 7_6,
2_7, 4_0, 3_0, 7_6, 4_6, 4_4, 4_7, 4_0, 3_7, 3_8, 3_1, 4_5,
4_5, 7_6, 3_8, 3_1, 3_3, 4_5, 7_6, 4_1, 3_2, 7_6, 4_5, 4_6,
4_1, 4_0, 3_1, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6,
1_9, 4_6, 2_7, 4_0, 3_0, 7_6, 3_5, 4_0, 7_6, 4_6, 3_4, 3_1,
7_6, 3_0, 3_1, 4_5, 3_1, 4_4, 4_6, 6_3, 7_6, 6_3, 7_6, 6_3,
7_6, 6_3, 7_6, 1_4, 3_1, 2_7, 4_4, 7_6, 4_6, 3_4, 3_1, 3_9,
6_4, 7_6, 4_1, 4_0, 7_6, 4_6, 3_4, 3_1, 7_6, 4_5, 2_7, 4_0,
3_0, 6_4, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 8,
2_7, 3_8, 3_2, 7_6, 4_5, 4_7, 4_0, 3_7, 7_6, 2_7, 7_6, 4_5,
3_4, 2_7, 4_6, 4_6, 3_1, 4_4, 3_1, 3_0, 7_6, 4_8, 3_5, 4_5,
2_7, 3_3, 3_1, 7_6, 3_8, 3_5, 3_1, 4_5, 6_4, 7_6, 4_9, 3_4,
4_1, 4_5, 3_1, 7_6, 3_2, 4_4, 4_1, 4_9, 4_0, 6_4, 7_8, 7_6,
7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1, 4_0, 3_0, 7_6, 4_9,
4_4, 3_5, 4_0, 3_7, 3_8, 3_1, 3_0, 7_6, 3_8, 3_5, 4_2, 6_4,
7_6, 2_7, 4_0, 3_0, 7_6, 4_5, 4_0, 3_1, 3_1, 4_4, 7_6, 4_1,
3_2, 7_6, 2_9, 4_1, 3_8, 3_0, 7_6, 2_9, 4_1, 3_9, 3_9, 2_7,
4_0, 3_0, 6_4, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6,
2_0, 3_1, 3_8, 3_8, 7_6, 4_6, 3_4, 2_7, 4_6, 7_6, 3_5, 4_6,
4_5, 7_6, 4_5, 2_9, 4_7, 3_8, 4_2, 4_6, 4_1, 4_4, 7_6, 4_9,
3_1, 3_8, 3_8, 7_6, 4_6, 3_4, 4_1, 4_5, 3_1, 7_6, 4_2, 2_7,
4_5, 4_5, 3_5, 4_1, 4_0, 4_5, 7_6, 4_4, 3_1, 2_7, 3_0, 7_8,
7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 2_3, 3_4, 3_5, 2_9,
3_4, 7_6, 5_1, 3_1, 4_6, 7_6, 4_5, 4_7, 4_4, 4_8, 3_5, 4_8,
3_1, 6_4, 7_6, 4_5, 4_6, 2_7, 3_9, 4_2, 3_1, 3_0, 7_6, 4_1,
4_0, 7_6, 4_6, 3_4, 3_1, 4_5, 3_1, 7_6, 3_8, 3_5, 3_2, 3_1,
3_8, 3_1, 4_5, 4_5, 7_6, 4_6, 3_4, 3_5, 4_0, 3_3, 4_5, 6_4,
7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 2_0, 3_4, 3_1,
7_6, 3_4, 2_7, 4_0, 3_0, 7_6, 4_6, 3_4, 2_7, 4_6, 7_6, 3_9,
4_1, 2_9, 3_7, 3_1, 3_0, 7_6, 4_6, 3_4, 3_1, 3_9, 6_4, 7_6,
2_7, 4_0, 3_0, 7_6, 4_6, 3_4, 3_1, 7_6, 3_4, 3_1, 2_7, 4_4,
4_6, 7_6, 4_6, 3_4, 2_7, 4_6, 7_6, 3_2, 3_1, 3_0, 6_6, 7_8,
7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1, 4_0, 3_0, 7_6,
4_1, 4_0, 7_6, 4_6, 3_4, 3_1, 7_6, 4_2, 3_1, 3_0, 3_1, 4_5,
4_6, 2_7, 3_8, 6_4, 7_6, 4_6, 3_4, 3_1, 4_5, 3_1, 7_6, 4_9,
4_1, 4_4, 3_0, 4_5, 7_6, 2_7, 4_2, 4_2, 3_1, 2_7, 4_4, 6_5,
7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1_3, 5_1, 7_6,
4_0, 2_7, 3_9, 3_1, 7_6, 3_5, 4_5, 7_6, 1_5, 5_2, 5_1, 3_9,
2_7, 4_0, 3_0, 3_5, 2_7, 4_5, 6_4, 7_6, 1_1, 3_5, 4_0, 3_3,
7_6, 4_1, 3_2, 7_6, 1_1, 3_5, 4_0, 3_3, 4_5, 6_6, 7_8, 7_6,
7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1_2, 4_1, 4_1, 3_7, 7_6,
4_1, 4_0, 7_6, 3_9, 5_1, 7_6, 2_3, 4_1, 4_4, 3_7, 4_5, 6_4,
7_6, 5_1, 3_1, 7_6, 1_3, 3_5, 3_3, 3_4, 4_6, 5_1, 6_4, 7_6,
2_7, 4_0, 3_0, 7_6, 3_0, 3_1, 4_5, 4_2, 2_7, 3_5, 4_4, 6_7,
7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1_4, 4_1, 4_6,
3_4, 3_5, 4_0, 3_3, 7_6, 2_8, 3_1, 4_5, 3_5, 3_0, 3_1, 7_6,
4_4, 3_1, 3_9, 2_7, 3_5, 4_0, 4_5, 6_3, 7_6, 1_8, 4_1, 4_7,
4_0, 3_0, 7_6, 4_6, 3_4, 3_1, 7_6, 3_0, 3_1, 2_9, 2_7, 5_1,
7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1_5, 3_2, 7_6,
4_6, 3_4, 2_7, 4_6, 7_6, 2_9, 4_1, 3_8, 4_1, 4_5, 4_5, 2_7,
3_8, 7_6, 2_3, 4_4, 3_1, 2_9, 3_7, 6_4, 7_6, 2_8, 4_1, 4_7,
4_0, 3_0, 3_8, 3_1, 4_5, 4_5, 7_6, 2_7, 4_0, 3_0, 7_6, 2_8,
2_7, 4_4, 3_1, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6,
2_0, 3_4, 3_1, 7_6, 3_8, 4_1, 4_0, 3_1, 7_6, 2_7, 4_0, 3_0,
7_6, 3_8, 3_1, 4_8, 3_1, 3_8, 7_6, 4_5, 2_7, 4_0, 3_0, 4_5,
7_6, 4_5, 4_6, 4_4, 3_1, 4_6, 2_9, 3_4, 7_6, 3_2, 2_7, 4_4,
7_6, 2_7, 4_9, 2_7, 5_1, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6,
7_6, 7_6]]),
torch.tensor([[0, 0, 0, 1_0_6_9, 1_1]]),
torch.tensor([[0, 0, 0, 1_0_6_9, 1_1]]),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0]))
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1]))
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2]))
@require_torch
def UpperCAmelCase__ ( self : Any):
import torch
lowerCAmelCase_ : str = JukeboxTokenizer.from_pretrained('''openai/jukebox-5b-lyrics''')
lowerCAmelCase_ : List[Any] = tokenizer(**self.metas)['''input_ids''']
# fmt: off
lowerCAmelCase_ : Optional[int] = [
torch.tensor([[
0, 0, 0, 1_0_6_9, 1_1, -1, -1, -1, -1, 9, 7_7, 3_9,
3_1, 4_6, 7_7, 2_7, 7_7, 4_6, 4_4, 2_7, 4_8, 3_1, 3_8, 3_8,
3_1, 4_4, 7_7, 3_2, 4_4, 4_1, 3_9, 7_7, 2_7, 4_0, 7_7, 2_7,
4_0, 4_6, 3_5, 4_3, 4_7, 3_1, 7_7, 3_8, 2_7, 4_0, 3_0, 6_4,
7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 2_3, 3_4, 4_1,
7_7, 4_5, 2_7, 3_5, 3_0, 7_7, 7_2, 2_0, 4_9, 4_1, 7_7, 4_8,
2_7, 4_5, 4_6, 7_7, 2_7, 4_0, 3_0, 7_7, 4_6, 4_4, 4_7, 4_0,
3_7, 3_8, 3_1, 4_5, 4_5, 7_7, 3_8, 3_1, 3_3, 4_5, 7_7, 4_1,
3_2, 7_7, 4_5, 4_6, 4_1, 4_0, 3_1, 7_9, 7_7, 7_7, 7_7, 7_7,
7_7, 7_7, 7_7, 7_7, 1_9, 4_6, 2_7, 4_0, 3_0, 7_7, 3_5, 4_0,
7_7, 4_6, 3_4, 3_1, 7_7, 3_0, 3_1, 4_5, 3_1, 4_4, 4_6, 6_3,
7_7, 6_3, 7_7, 6_3, 7_7, 6_3, 7_7, 1_4, 3_1, 2_7, 4_4, 7_7,
4_6, 3_4, 3_1, 3_9, 6_4, 7_7, 4_1, 4_0, 7_7, 4_6, 3_4, 3_1,
7_7, 4_5, 2_7, 4_0, 3_0, 6_4, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7,
7_7, 7_7, 7_7, 8, 2_7, 3_8, 3_2, 7_7, 4_5, 4_7, 4_0, 3_7,
7_7, 2_7, 7_7, 4_5, 3_4, 2_7, 4_6, 4_6, 3_1, 4_4, 3_1, 3_0,
7_7, 4_8, 3_5, 4_5, 2_7, 3_3, 3_1, 7_7, 3_8, 3_5, 3_1, 4_5,
6_4, 7_7, 4_9, 3_4, 4_1, 4_5, 3_1, 7_7, 3_2, 4_4, 4_1, 4_9,
4_0, 6_4, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 1,
4_0, 3_0, 7_7, 4_9, 4_4, 3_5, 4_0, 3_7, 3_8, 3_1, 3_0, 7_7,
3_8, 3_5, 4_2, 6_4, 7_7, 2_7, 4_0, 3_0, 7_7, 4_5, 4_0, 3_1,
3_1, 4_4, 7_7, 4_1, 3_2, 7_7, 2_9, 4_1, 3_8, 3_0, 7_7, 2_9,
4_1, 3_9, 3_9, 2_7, 4_0, 3_0, 6_4, 7_9, 7_7, 7_7, 7_7, 7_7,
7_7, 7_7, 7_7, 7_7, 2_0, 3_1, 3_8, 3_8, 7_7, 4_6, 3_4, 2_7,
4_6, 7_7, 3_5, 4_6, 4_5, 7_7, 4_5, 2_9, 4_7, 3_8, 4_2, 4_6,
4_1, 4_4, 7_7, 4_9, 3_1, 3_8, 3_8, 7_7, 4_6, 3_4, 4_1, 4_5,
3_1, 7_7, 4_2, 2_7, 4_5, 4_5, 3_5, 4_1, 4_0, 4_5, 7_7, 4_4,
3_1, 2_7, 3_0, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
2_3, 3_4, 3_5, 2_9, 3_4, 7_7, 5_1, 3_1, 4_6, 7_7, 4_5, 4_7,
4_4, 4_8, 3_5, 4_8, 3_1, 6_4, 7_7, 4_5, 4_6, 2_7, 3_9, 4_2,
3_1, 3_0, 7_7, 4_1, 4_0, 7_7, 4_6, 3_4, 3_1, 4_5, 3_1, 7_7,
3_8, 3_5, 3_2, 3_1, 3_8, 3_1, 4_5, 4_5, 7_7, 4_6, 3_4, 3_5,
4_0, 3_3, 4_5, 6_4, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
7_7, 2_0, 3_4, 3_1, 7_7, 3_4, 2_7, 4_0, 3_0, 7_7, 4_6, 3_4,
2_7, 4_6, 7_7, 3_9, 4_1, 2_9, 3_7, 3_1, 3_0, 7_7, 4_6, 3_4,
3_1, 3_9, 6_4, 7_7, 2_7, 4_0, 3_0, 7_7, 4_6, 3_4, 3_1, 7_7,
3_4, 3_1, 2_7, 4_4, 4_6, 7_7, 4_6, 3_4, 2_7, 4_6, 7_7, 3_2,
3_1, 3_0, 6_6, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
1, 4_0, 3_0, 7_7, 4_1, 4_0, 7_7, 4_6, 3_4, 3_1, 7_7, 4_2,
3_1, 3_0, 3_1, 4_5, 4_6, 2_7, 3_8, 6_4, 7_7, 4_6, 3_4, 3_1,
4_5, 3_1, 7_7, 4_9, 4_1, 4_4, 3_0, 4_5, 7_7, 2_7, 4_2, 4_2,
3_1, 2_7, 4_4, 6_5, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
7_7, 1_3, 5_1, 7_7, 4_0, 2_7, 3_9, 3_1, 7_7, 3_5, 4_5, 7_7,
1_5, 5_2, 5_1, 3_9, 2_7, 4_0, 3_0, 3_5, 2_7, 4_5, 6_4, 7_7,
1_1, 3_5, 4_0, 3_3, 7_7, 4_1, 3_2, 7_7, 1_1, 3_5, 4_0, 3_3,
4_5, 6_6, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 1_2,
4_1, 4_1, 3_7, 7_7, 4_1, 4_0, 7_7, 3_9, 5_1, 7_7, 2_3, 4_1,
4_4, 3_7, 4_5, 6_4, 7_7, 5_1, 3_1, 7_7, 1_3, 3_5, 3_3, 3_4,
4_6, 5_1, 6_4, 7_7, 2_7, 4_0, 3_0, 7_7, 3_0, 3_1, 4_5, 4_2,
2_7, 3_5, 4_4, 6_7, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
7_7, 1_4, 4_1, 4_6, 3_4, 3_5, 4_0, 3_3, 7_7, 2_8, 3_1, 4_5,
3_5, 3_0, 3_1, 7_7, 4_4, 3_1, 3_9, 2_7, 3_5, 4_0, 4_5, 6_3,
7_7, 1_8, 4_1, 4_7, 4_0, 3_0, 7_7, 4_6, 3_4, 3_1, 7_7, 3_0,
3_1, 2_9, 2_7, 5_1, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
7_7, 1_5, 3_2, 7_7, 4_6, 3_4, 2_7, 4_6, 7_7, 2_9, 4_1, 3_8,
4_1, 4_5, 4_5, 2_7, 3_8, 7_7, 2_3, 4_4, 3_1, 2_9, 3_7, 6_4,
7_7, 2_8, 4_1, 4_7, 4_0, 3_0, 3_8, 3_1, 4_5, 4_5, 7_7, 2_7,
4_0, 3_0, 7_7, 2_8, 2_7, 4_4, 3_1, 7_9, 7_7, 7_7, 7_7, 7_7,
7_7, 7_7, 7_7, 7_7, 2_0, 3_4, 3_1, 7_7, 3_8, 4_1, 4_0, 3_1,
7_7, 2_7, 4_0, 3_0, 7_7, 3_8, 3_1, 4_8, 3_1, 3_8, 7_7, 4_5,
2_7, 4_0, 3_0, 4_5, 7_7, 4_5, 4_6, 4_4, 3_1, 4_6, 2_9, 3_4,
7_7, 3_2, 2_7, 4_4, 7_7, 2_7, 4_9, 2_7, 5_1, 7_9, 7_7, 7_7,
7_7, 7_7, 7_7, 7_7, 7_7, 7_7]]),
torch.tensor([[0, 0, 0, 1_0_6_9, 1_1, -1, -1, -1, -1]]),
torch.tensor([[0, 0, 0, 1_0_6_9, 1_1, -1, -1, -1, -1]]),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0]))
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1]))
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2]))
| 103 |
"""simple docstring"""
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {'vocab_file': 'vocab.json'}
_snake_case = {
'vocab_file': {
'mgp-str': 'https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json',
}
}
_snake_case = {'mgp-str': 27}
class UpperCamelCase ( snake_case_ ):
UpperCamelCase : List[str] = VOCAB_FILES_NAMES
UpperCamelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Optional[int] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[Any]="[GO]" , UpperCAmelCase__ : Tuple="[GO]" , UpperCAmelCase__ : Optional[int]="[s]" , UpperCAmelCase__ : int="[GO]" , **UpperCAmelCase__ : Dict ) -> int:
super().__init__(
unk_token=UpperCAmelCase__ , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , **UpperCAmelCase__ , )
with open(UpperCAmelCase__ , encoding="""utf-8""" ) as vocab_handle:
_a : int = json.load(UpperCAmelCase__ )
_a : Optional[int] = {v: k for k, v in self.vocab.items()}
@property
def _lowercase ( self : Dict ) -> Union[str, Any]:
return len(self.vocab )
def _lowercase ( self : Union[str, Any] ) -> str:
return dict(self.vocab , **self.added_tokens_encoder )
def _lowercase ( self : Dict , UpperCAmelCase__ : str ) -> Union[str, Any]:
_a : Tuple = []
for s in text:
char_tokens.extend(UpperCAmelCase__ )
return char_tokens
def _lowercase ( self : List[Any] , UpperCAmelCase__ : str ) -> Dict:
return self.vocab.get(UpperCAmelCase__ , self.vocab.get(self.unk_token ) )
def _lowercase ( self : Optional[int] , UpperCAmelCase__ : Tuple ) -> List[Any]:
return self.decoder.get(UpperCAmelCase__ )
def _lowercase ( self : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(UpperCAmelCase__ ):
logger.error("""Vocabulary path ({}) should be a directory""".format(UpperCAmelCase__ ) )
return
_a : Tuple = os.path.join(
UpperCAmelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
with open(UpperCAmelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=UpperCAmelCase__ , ensure_ascii=UpperCAmelCase__ ) + """\n""" )
return (vocab_file,)
| 294 | 0 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase__ = logging.get_logger(__name__)
def lowerCAmelCase_ ( __A, __A=False ) -> Any:
'''simple docstring'''
UpperCAmelCase__ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""blocks.{i}.norm1.weight""", f"""deit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""blocks.{i}.norm1.bias""", f"""deit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((f"""blocks.{i}.attn.proj.weight""", f"""deit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.attn.proj.bias""", f"""deit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""blocks.{i}.norm2.weight""", f"""deit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""blocks.{i}.norm2.bias""", f"""deit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.weight""", f"""deit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.bias""", f"""deit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.weight""", f"""deit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.bias""", f"""deit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "deit.embeddings.cls_token"),
("dist_token", "deit.embeddings.distillation_token"),
("patch_embed.proj.weight", "deit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "deit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "deit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
UpperCAmelCase__ = [(pair[0], pair[1][4:]) if pair[1].startswith("deit" ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
("norm.weight", "deit.layernorm.weight"),
("norm.bias", "deit.layernorm.bias"),
("head.weight", "cls_classifier.weight"),
("head.bias", "cls_classifier.bias"),
("head_dist.weight", "distillation_classifier.weight"),
("head_dist.bias", "distillation_classifier.bias"),
] )
return rename_keys
def lowerCAmelCase_ ( __A, __A, __A=False ) -> Tuple:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
UpperCAmelCase__ = ""
else:
UpperCAmelCase__ = "deit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCAmelCase__ = state_dict.pop(f"""blocks.{i}.attn.qkv.weight""" )
UpperCAmelCase__ = state_dict.pop(f"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase__ = in_proj_weight[
: config.hidden_size, :
]
UpperCAmelCase__ = in_proj_bias[: config.hidden_size]
UpperCAmelCase__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCAmelCase__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCAmelCase__ = in_proj_weight[
-config.hidden_size :, :
]
UpperCAmelCase__ = in_proj_bias[-config.hidden_size :]
def lowerCAmelCase_ ( __A, __A, __A ) -> Dict:
'''simple docstring'''
UpperCAmelCase__ = dct.pop(__A )
UpperCAmelCase__ = val
def lowerCAmelCase_ ( ) -> Dict:
'''simple docstring'''
UpperCAmelCase__ = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase__ = Image.open(requests.get(__A, stream=__A ).raw )
return im
@torch.no_grad()
def lowerCAmelCase_ ( __A, __A ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase__ = DeiTConfig()
# all deit models have fine-tuned heads
UpperCAmelCase__ = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
UpperCAmelCase__ = 1_000
UpperCAmelCase__ = "huggingface/label-files"
UpperCAmelCase__ = "imagenet-1k-id2label.json"
UpperCAmelCase__ = json.load(open(hf_hub_download(__A, __A, repo_type="dataset" ), "r" ) )
UpperCAmelCase__ = {int(__A ): v for k, v in idalabel.items()}
UpperCAmelCase__ = idalabel
UpperCAmelCase__ = {v: k for k, v in idalabel.items()}
UpperCAmelCase__ = int(deit_name[-6:-4] )
UpperCAmelCase__ = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith("tiny" ):
UpperCAmelCase__ = 192
UpperCAmelCase__ = 768
UpperCAmelCase__ = 12
UpperCAmelCase__ = 3
elif deit_name[9:].startswith("small" ):
UpperCAmelCase__ = 384
UpperCAmelCase__ = 1_536
UpperCAmelCase__ = 12
UpperCAmelCase__ = 6
if deit_name[9:].startswith("base" ):
pass
elif deit_name[4:].startswith("large" ):
UpperCAmelCase__ = 1_024
UpperCAmelCase__ = 4_096
UpperCAmelCase__ = 24
UpperCAmelCase__ = 16
# load original model from timm
UpperCAmelCase__ = timm.create_model(__A, pretrained=__A )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
UpperCAmelCase__ = timm_model.state_dict()
UpperCAmelCase__ = create_rename_keys(__A, __A )
for src, dest in rename_keys:
rename_key(__A, __A, __A )
read_in_q_k_v(__A, __A, __A )
# load HuggingFace model
UpperCAmelCase__ = DeiTForImageClassificationWithTeacher(__A ).eval()
model.load_state_dict(__A )
# Check outputs on an image, prepared by DeiTImageProcessor
UpperCAmelCase__ = int(
(256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
UpperCAmelCase__ = DeiTImageProcessor(size=__A, crop_size=config.image_size )
UpperCAmelCase__ = image_processor(images=prepare_img(), return_tensors="pt" )
UpperCAmelCase__ = encoding["pixel_values"]
UpperCAmelCase__ = model(__A )
UpperCAmelCase__ = timm_model(__A )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__A, outputs.logits, atol=1e-3 )
Path(__A ).mkdir(exist_ok=__A )
print(f"""Saving model {deit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__A )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__A )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--deit_name',
default='vit_deit_base_distilled_patch16_224',
type=str,
help='Name of the DeiT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
UpperCamelCase__ = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 143 |
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class A :
def __init__(self : int , __UpperCAmelCase : Tuple , __UpperCAmelCase : List[str]=1_3 , __UpperCAmelCase : Optional[Any]=7 , __UpperCAmelCase : Dict=True , __UpperCAmelCase : int=True , __UpperCAmelCase : Any=True , __UpperCAmelCase : int=True , __UpperCAmelCase : Dict=9_9 , __UpperCAmelCase : Optional[int]=3_2 , __UpperCAmelCase : Dict=2 , __UpperCAmelCase : Any=4 , __UpperCAmelCase : Tuple=3_7 , __UpperCAmelCase : List[Any]="gelu" , __UpperCAmelCase : Dict=0.1 , __UpperCAmelCase : int=0.1 , __UpperCAmelCase : str=5_1_2 , __UpperCAmelCase : Union[str, Any]=1_6 , __UpperCAmelCase : Tuple=2 , __UpperCAmelCase : Tuple=0.02 , __UpperCAmelCase : Optional[int]=3 , __UpperCAmelCase : str=4 , __UpperCAmelCase : Any=None , ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = parent
UpperCAmelCase__ = 1_3
UpperCAmelCase__ = 7
UpperCAmelCase__ = True
UpperCAmelCase__ = True
UpperCAmelCase__ = True
UpperCAmelCase__ = True
UpperCAmelCase__ = 9_9
UpperCAmelCase__ = 3_2
UpperCAmelCase__ = 2
UpperCAmelCase__ = 4
UpperCAmelCase__ = 3_7
UpperCAmelCase__ = "gelu"
UpperCAmelCase__ = 0.1
UpperCAmelCase__ = 0.1
UpperCAmelCase__ = 5_1_2
UpperCAmelCase__ = 1_6
UpperCAmelCase__ = 2
UpperCAmelCase__ = 0.02
UpperCAmelCase__ = 3
UpperCAmelCase__ = 4
UpperCAmelCase__ = None
def lowercase_ (self : Union[str, Any] ) -> str:
"""simple docstring"""
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase__ = None
if self.use_input_mask:
UpperCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase__ = None
if self.use_token_type_ids:
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = None
if self.use_labels:
UpperCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase__ = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase__ = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=__UpperCAmelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase_ (self : List[Any] , __UpperCAmelCase : str , __UpperCAmelCase : List[str] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Any ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = TFRoFormerModel(config=__UpperCAmelCase )
UpperCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
UpperCAmelCase__ = [input_ids, input_mask]
UpperCAmelCase__ = model(__UpperCAmelCase )
UpperCAmelCase__ = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase_ (self : int , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Any , __UpperCAmelCase : Any , __UpperCAmelCase : List[str] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : List[Any] ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = True
UpperCAmelCase__ = TFRoFormerForCausalLM(config=__UpperCAmelCase )
UpperCAmelCase__ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
UpperCAmelCase__ = model(__UpperCAmelCase )["logits"]
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def lowercase_ (self : str , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : int , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Any , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Any , __UpperCAmelCase : int ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = TFRoFormerForMaskedLM(config=__UpperCAmelCase )
UpperCAmelCase__ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
UpperCAmelCase__ = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase_ (self : Optional[Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : str ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = self.num_labels
UpperCAmelCase__ = TFRoFormerForSequenceClassification(config=__UpperCAmelCase )
UpperCAmelCase__ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
UpperCAmelCase__ = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase_ (self : str , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : int , __UpperCAmelCase : Any , __UpperCAmelCase : Tuple , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Optional[Any] ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = self.num_choices
UpperCAmelCase__ = TFRoFormerForMultipleChoice(config=__UpperCAmelCase )
UpperCAmelCase__ = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
UpperCAmelCase__ = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
UpperCAmelCase__ = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
UpperCAmelCase__ = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
UpperCAmelCase__ = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase_ (self : int , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : List[str] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : int ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = self.num_labels
UpperCAmelCase__ = TFRoFormerForTokenClassification(config=__UpperCAmelCase )
UpperCAmelCase__ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
UpperCAmelCase__ = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase_ (self : Dict , __UpperCAmelCase : Tuple , __UpperCAmelCase : Any , __UpperCAmelCase : Any , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Dict ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = TFRoFormerForQuestionAnswering(config=__UpperCAmelCase )
UpperCAmelCase__ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
UpperCAmelCase__ = model(__UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase_ (self : Dict ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = self.prepare_config_and_inputs()
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) = config_and_inputs
UpperCAmelCase__ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class A ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
__UpperCAmelCase : str = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
__UpperCAmelCase : List[str] = (
{
'feature-extraction': TFRoFormerModel,
'fill-mask': TFRoFormerForMaskedLM,
'question-answering': TFRoFormerForQuestionAnswering,
'text-classification': TFRoFormerForSequenceClassification,
'text-generation': TFRoFormerForCausalLM,
'token-classification': TFRoFormerForTokenClassification,
'zero-shot': TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
__UpperCAmelCase : Optional[int] = False
__UpperCAmelCase : List[Any] = False
def lowercase_ (self : List[str] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : Any , __UpperCAmelCase : Dict , __UpperCAmelCase : Any ) -> int:
"""simple docstring"""
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def lowercase_ (self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = TFRoFormerModelTester(self )
UpperCAmelCase__ = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=3_7 )
def lowercase_ (self : Optional[int] ) -> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase_ (self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def lowercase_ (self : Dict ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__UpperCAmelCase )
def lowercase_ (self : Union[str, Any] ) -> str:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*__UpperCAmelCase )
def lowercase_ (self : Optional[Any] ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__UpperCAmelCase )
def lowercase_ (self : List[Any] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__UpperCAmelCase )
def lowercase_ (self : int ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__UpperCAmelCase )
def lowercase_ (self : List[str] ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__UpperCAmelCase )
@slow
def lowercase_ (self : Any ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = TFRoFormerModel.from_pretrained("junnyu/roformer_chinese_base" )
self.assertIsNotNone(__UpperCAmelCase )
@require_tf
class A ( unittest.TestCase ):
@slow
def lowercase_ (self : List[Any] ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = TFRoFormerForMaskedLM.from_pretrained("junnyu/roformer_chinese_base" )
UpperCAmelCase__ = tf.constant([[0, 1, 2, 3, 4, 5]] )
UpperCAmelCase__ = model(__UpperCAmelCase )[0]
# TODO Replace vocab size
UpperCAmelCase__ = 5_0_0_0_0
UpperCAmelCase__ = [1, 6, vocab_size]
self.assertEqual(output.shape , __UpperCAmelCase )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
UpperCAmelCase__ = tf.constant(
[
[
[-0.12053341, -1.0264901, 0.29221946],
[-1.5133783, 0.197433, 0.15190607],
[-5.0135403, -3.900256, -0.84038764],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4 )
@require_tf
class A ( unittest.TestCase ):
__UpperCAmelCase : Tuple = 1E-4
def lowercase_ (self : List[str] ) -> str:
"""simple docstring"""
UpperCAmelCase__ = tf.constant([[4, 1_0]] )
UpperCAmelCase__ = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
UpperCAmelCase__ = emba(input_ids.shape )
UpperCAmelCase__ = tf.constant(
[[0.0000, 0.0000, 0.0000, 1.0000, 1.0000, 1.0000], [0.8415, 0.0464, 0.0022, 0.5403, 0.9989, 1.0000]] )
tf.debugging.assert_near(__UpperCAmelCase , __UpperCAmelCase , atol=self.tolerance )
def lowercase_ (self : List[Any] ) -> int:
"""simple docstring"""
UpperCAmelCase__ = tf.constant(
[
[0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
[0.8415, 0.8219, 0.8020, 0.7819, 0.7617],
[0.9093, 0.9364, 0.9581, 0.9749, 0.9870],
] )
UpperCAmelCase__ = TFRoFormerSinusoidalPositionalEmbedding(num_positions=5_1_2 , embedding_dim=5_1_2 )
emba([2, 1_6, 5_1_2] )
UpperCAmelCase__ = emba.weight[:3, :5]
tf.debugging.assert_near(__UpperCAmelCase , __UpperCAmelCase , atol=self.tolerance )
@require_tf
class A ( unittest.TestCase ):
__UpperCAmelCase : Any = 1E-4
def lowercase_ (self : Tuple ) -> int:
"""simple docstring"""
UpperCAmelCase__ = tf.reshape(tf.range(2 * 1_2 * 1_6 * 6_4 , dtype=tf.floataa ) , shape=(2, 1_2, 1_6, 6_4) ) / 1_0_0
UpperCAmelCase__ = -tf.reshape(tf.range(2 * 1_2 * 1_6 * 6_4 , dtype=tf.floataa ) , shape=(2, 1_2, 1_6, 6_4) ) / 1_0_0
UpperCAmelCase__ = TFRoFormerSinusoidalPositionalEmbedding(num_positions=3_2 , embedding_dim=6_4 )
UpperCAmelCase__ = embed_positions([2, 1_6, 7_6_8] )[None, None, :, :]
UpperCAmelCase__ , UpperCAmelCase__ = TFRoFormerSelfAttention.apply_rotary_position_embeddings(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
UpperCAmelCase__ = tf.constant(
[
[0.0000, 0.0100, 0.0200, 0.0300, 0.0400, 0.0500, 0.0600, 0.0700],
[-0.2012, 0.8897, 0.0263, 0.9401, 0.2074, 0.9463, 0.3481, 0.9343],
[-1.7057, 0.6271, -1.2145, 1.3897, -0.6303, 1.7647, -0.1173, 1.8985],
[-2.1731, -1.6397, -2.7358, 0.2854, -2.1840, 1.7183, -1.3018, 2.4871],
[0.2717, -3.6173, -2.9206, -2.1988, -3.6638, 0.3858, -2.9155, 2.2980],
[3.9859, -2.1580, -0.7984, -4.4904, -4.1181, -2.0252, -4.4782, 1.1253],
] )
UpperCAmelCase__ = tf.constant(
[
[0.0000, -0.0100, -0.0200, -0.0300, -0.0400, -0.0500, -0.0600, -0.0700],
[0.2012, -0.8897, -0.0263, -0.9401, -0.2074, -0.9463, -0.3481, -0.9343],
[1.7057, -0.6271, 1.2145, -1.3897, 0.6303, -1.7647, 0.1173, -1.8985],
[2.1731, 1.6397, 2.7358, -0.2854, 2.1840, -1.7183, 1.3018, -2.4871],
[-0.2717, 3.6173, 2.9206, 2.1988, 3.6638, -0.3858, 2.9155, -2.2980],
[-3.9859, 2.1580, 0.7984, 4.4904, 4.1181, 2.0252, 4.4782, -1.1253],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , __UpperCAmelCase , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , __UpperCAmelCase , atol=self.tolerance )
| 143 | 1 |
'''simple docstring'''
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
UpperCAmelCase = logging.get_logger(__name__)
class lowerCAmelCase :
lowerCAmelCase_ = 42
lowerCAmelCase_ = None
@staticmethod
def snake_case ( ):
"""simple docstring"""
raise NotImplementedError
def snake_case ( self : str , __lowercase : Optional[int] , __lowercase : int , __lowercase : str , **__lowercase : Dict ):
"""simple docstring"""
raise NotImplementedError
def snake_case ( self : str , __lowercase : Optional[int] ):
"""simple docstring"""
raise NotImplementedError
def snake_case ( self : Dict ):
"""simple docstring"""
if not self.is_available():
raise RuntimeError(
f'''You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.''' )
@classmethod
def snake_case ( cls : Any ):
"""simple docstring"""
return f'''`pip install {cls.pip_package or cls.name}`'''
class lowerCAmelCase ( A ):
lowerCAmelCase_ = "optuna"
@staticmethod
def snake_case ( ):
"""simple docstring"""
return is_optuna_available()
def snake_case ( self : Tuple , __lowercase : List[str] , __lowercase : int , __lowercase : str , **__lowercase : int ):
"""simple docstring"""
return run_hp_search_optuna(__lowercase , __lowercase , __lowercase , **__lowercase )
def snake_case ( self : Dict , __lowercase : List[str] ):
"""simple docstring"""
return default_hp_space_optuna(__lowercase )
class lowerCAmelCase ( A ):
lowerCAmelCase_ = "ray"
lowerCAmelCase_ = "'ray[tune]'"
@staticmethod
def snake_case ( ):
"""simple docstring"""
return is_ray_available()
def snake_case ( self : Union[str, Any] , __lowercase : List[Any] , __lowercase : int , __lowercase : str , **__lowercase : List[Any] ):
"""simple docstring"""
return run_hp_search_ray(__lowercase , __lowercase , __lowercase , **__lowercase )
def snake_case ( self : Optional[Any] , __lowercase : Optional[Any] ):
"""simple docstring"""
return default_hp_space_ray(__lowercase )
class lowerCAmelCase ( A ):
lowerCAmelCase_ = "sigopt"
@staticmethod
def snake_case ( ):
"""simple docstring"""
return is_sigopt_available()
def snake_case ( self : List[str] , __lowercase : Optional[int] , __lowercase : int , __lowercase : str , **__lowercase : Dict ):
"""simple docstring"""
return run_hp_search_sigopt(__lowercase , __lowercase , __lowercase , **__lowercase )
def snake_case ( self : Tuple , __lowercase : int ):
"""simple docstring"""
return default_hp_space_sigopt(__lowercase )
class lowerCAmelCase ( A ):
lowerCAmelCase_ = "wandb"
@staticmethod
def snake_case ( ):
"""simple docstring"""
return is_wandb_available()
def snake_case ( self : Optional[Any] , __lowercase : Tuple , __lowercase : int , __lowercase : str , **__lowercase : List[Any] ):
"""simple docstring"""
return run_hp_search_wandb(__lowercase , __lowercase , __lowercase , **__lowercase )
def snake_case ( self : int , __lowercase : int ):
"""simple docstring"""
return default_hp_space_wandb(__lowercase )
UpperCAmelCase = {
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def __UpperCamelCase ( ):
'''simple docstring'''
__lowercase =[backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(lowercase__ ) > 0:
__lowercase =available_backends[0].name
if len(lowercase__ ) > 1:
logger.info(
F'''{len(lowercase__ )} hyperparameter search backends available. Using {name} as the default.''' )
return name
raise RuntimeError(
'No hyperparameter search backend available.\n'
+ '\n'.join(
F''' - To install {backend.name} run {backend.pip_install()}'''
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
| 141 |
'''simple docstring'''
def __UpperCamelCase ( lowercase__ : Union[str, Any]=2_81_23 ):
'''simple docstring'''
__lowercase =[1] * (limit + 1)
for i in range(2, int(limit**0.5 ) + 1 ):
sum_divs[i * i] += i
for k in range(i + 1, limit // i + 1 ):
sum_divs[k * i] += k + i
__lowercase =set()
__lowercase =0
for n in range(1, limit + 1 ):
if sum_divs[n] > n:
abundants.add(lowercase__ )
if not any((n - a in abundants) for a in abundants ):
res += n
return res
if __name__ == "__main__":
print(solution())
| 141 | 1 |
"""simple docstring"""
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
# Load checkpoint
lowerCamelCase__ : Union[str, Any] = torch.load(_lowerCamelCase , map_location='cpu' )
lowerCamelCase__ : List[str] = chkpt['model']
# We have the base model one level deeper than the original XLM repository
lowerCamelCase__ : Optional[Any] = {}
for k, v in state_dict.items():
if "pred_layer" in k:
lowerCamelCase__ : Any = v
else:
lowerCamelCase__ : Optional[int] = v
lowerCamelCase__ : Dict = chkpt['params']
lowerCamelCase__ : List[Any] = {n: v for n, v in config.items() if not isinstance(_lowerCamelCase , (torch.FloatTensor, numpy.ndarray) )}
lowerCamelCase__ : str = chkpt['dico_word2id']
lowerCamelCase__ : Optional[int] = {s + '</w>' if s.find('@@' ) == -1 and i > 13 else s.replace('@@' , '' ): i for s, i in vocab.items()}
# Save pytorch-model
lowerCamelCase__ : List[Any] = pytorch_dump_folder_path + '/' + WEIGHTS_NAME
lowerCamelCase__ : Optional[Any] = pytorch_dump_folder_path + '/' + CONFIG_NAME
lowerCamelCase__ : Any = pytorch_dump_folder_path + '/' + VOCAB_FILES_NAMES['vocab_file']
print(f'''Save PyTorch model to {pytorch_weights_dump_path}''' )
torch.save(_lowerCamelCase , _lowerCamelCase )
print(f'''Save configuration file to {pytorch_config_dump_path}''' )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_lowerCamelCase , indent=2 ) + '\n' )
print(f'''Save vocab file to {pytorch_config_dump_path}''' )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_lowerCamelCase , indent=2 ) + '\n' )
if __name__ == "__main__":
A_ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--xlm_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
A_ : int = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 316 |
"""simple docstring"""
# Note: if you intend to run this script make sure you look under scripts/fsmt/
# to locate the appropriate script to do the work correctly. There is a set of scripts to:
# - download and prepare data and run the conversion script
# - perform eval to get the best hparam into the config
# - generate model_cards - useful if you have multiple models from the same paper
import argparse
import json
import os
import re
from collections import OrderedDict
from os.path import basename, dirname
import fairseq
import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
A_ : Optional[Any] = 2
# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
#
# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
# * `early_stopping`: `False` consistently scored better
# * `length_penalty` varied, so will assign the best one depending on the model
A_ : List[Any] = {
# fairseq:
"wmt19-ru-en": {"length_penalty": 1.1},
"wmt19-en-ru": {"length_penalty": 1.15},
"wmt19-en-de": {"length_penalty": 1.0},
"wmt19-de-en": {"length_penalty": 1.1},
# allenai:
"wmt16-en-de-dist-12-1": {"length_penalty": 0.6},
"wmt16-en-de-dist-6-1": {"length_penalty": 0.6},
"wmt16-en-de-12-1": {"length_penalty": 0.8},
"wmt19-de-en-6-6-base": {"length_penalty": 0.6},
"wmt19-de-en-6-6-big": {"length_penalty": 0.6},
}
# this remaps the different models to their organization names
A_ : str = {}
for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
A_ : str = "facebook"
for m in [
"wmt16-en-de-dist-12-1",
"wmt16-en-de-dist-6-1",
"wmt16-en-de-12-1",
"wmt19-de-en-6-6-base",
"wmt19-de-en-6-6-big",
]:
A_ : Optional[Any] = "allenai"
def lowerCamelCase_ ( _lowerCamelCase ):
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
lowerCamelCase__ : List[Any] = dict((re.sub(r'@@$' , '' , _lowerCamelCase ), v) if k.endswith('@@' ) else (re.sub(r'$' , '</w>' , _lowerCamelCase ), v) for k, v in d.items() )
lowerCamelCase__ : int = '<s> <pad> </s> <unk>'.split()
# restore the special tokens
for k in keep_keys:
del da[f'''{k}</w>''']
lowerCamelCase__ : List[str] = d[k] # restore
return da
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
# prep
assert os.path.exists(_lowerCamelCase )
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
print(f'''Writing results to {pytorch_dump_folder_path}''' )
# handle various types of models
lowerCamelCase__ : Optional[int] = basename(_lowerCamelCase )
lowerCamelCase__ : str = dirname(_lowerCamelCase )
lowerCamelCase__ : Any = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
lowerCamelCase__ : int = cls.hub_models()
lowerCamelCase__ : str = {'bpe': 'fastbpe', 'tokenizer': 'moses'}
lowerCamelCase__ : Optional[Any] = '.'
# note: since the model dump is old, fairseq has upgraded its model some
# time later, and it does a whole lot of rewrites and splits on the saved
# weights, therefore we can't use torch.load() directly on the model file.
# see: upgrade_state_dict(state_dict) in fairseq_model.py
print(f'''using checkpoint {checkpoint_file}''' )
lowerCamelCase__ : Any = hub_utils.from_pretrained(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , archive_map=_lowerCamelCase , **_lowerCamelCase )
lowerCamelCase__ : List[str] = vars(chkpt['args']['model'] )
lowerCamelCase__ : Optional[Any] = args['source_lang']
lowerCamelCase__ : List[str] = args['target_lang']
lowerCamelCase__ : List[str] = dirname(_lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = basename(_lowerCamelCase )
# dicts
lowerCamelCase__ : Optional[Any] = os.path.join(_lowerCamelCase , f'''dict.{src_lang}.txt''' )
lowerCamelCase__ : Optional[Any] = os.path.join(_lowerCamelCase , f'''dict.{tgt_lang}.txt''' )
lowerCamelCase__ : Dict = Dictionary.load(_lowerCamelCase )
lowerCamelCase__ : List[Any] = rewrite_dict_keys(src_dict.indices )
lowerCamelCase__ : int = len(_lowerCamelCase )
lowerCamelCase__ : List[Any] = os.path.join(_lowerCamelCase , 'vocab-src.json' )
print(f'''Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records''' )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_lowerCamelCase , ensure_ascii=_lowerCamelCase , indent=_lowerCamelCase ) )
# detect whether this is a do_lower_case situation, which can be derived by checking whether we
# have at least one uppercase letter in the source vocab
lowerCamelCase__ : Optional[int] = True
for k in src_vocab.keys():
if not k.islower():
lowerCamelCase__ : int = False
break
lowerCamelCase__ : str = Dictionary.load(_lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = rewrite_dict_keys(tgt_dict.indices )
lowerCamelCase__ : Optional[Any] = len(_lowerCamelCase )
lowerCamelCase__ : Dict = os.path.join(_lowerCamelCase , 'vocab-tgt.json' )
print(f'''Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records''' )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_lowerCamelCase , ensure_ascii=_lowerCamelCase , indent=_lowerCamelCase ) )
# merges_file (bpecodes)
lowerCamelCase__ : List[Any] = os.path.join(_lowerCamelCase , VOCAB_FILES_NAMES['merges_file'] )
for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
lowerCamelCase__ : Optional[int] = os.path.join(_lowerCamelCase , _lowerCamelCase )
if os.path.exists(_lowerCamelCase ):
break
with open(_lowerCamelCase , encoding='utf-8' ) as fin:
lowerCamelCase__ : Union[str, Any] = fin.read()
lowerCamelCase__ : Any = re.sub(r' \d+$' , '' , _lowerCamelCase , 0 , re.M ) # remove frequency number
print(f'''Generating {merges_file}''' )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as fout:
fout.write(_lowerCamelCase )
# model config
lowerCamelCase__ : Dict = os.path.join(_lowerCamelCase , 'config.json' )
# validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
# may have to modify the tokenizer if a different type is used by a future model
assert args["bpe"] == "fastbpe", f'''need to extend tokenizer to support bpe={args['bpe']}'''
assert args["tokenizer"] == "moses", f'''need to extend tokenizer to support bpe={args['tokenizer']}'''
lowerCamelCase__ : Optional[int] = {
'architectures': ['FSMTForConditionalGeneration'],
'model_type': 'fsmt',
'activation_dropout': args['activation_dropout'],
'activation_function': 'relu',
'attention_dropout': args['attention_dropout'],
'd_model': args['decoder_embed_dim'],
'dropout': args['dropout'],
'init_std': 0.02,
'max_position_embeddings': args['max_source_positions'],
'num_hidden_layers': args['encoder_layers'],
'src_vocab_size': src_vocab_size,
'tgt_vocab_size': tgt_vocab_size,
'langs': [src_lang, tgt_lang],
'encoder_attention_heads': args['encoder_attention_heads'],
'encoder_ffn_dim': args['encoder_ffn_embed_dim'],
'encoder_layerdrop': args['encoder_layerdrop'],
'encoder_layers': args['encoder_layers'],
'decoder_attention_heads': args['decoder_attention_heads'],
'decoder_ffn_dim': args['decoder_ffn_embed_dim'],
'decoder_layerdrop': args['decoder_layerdrop'],
'decoder_layers': args['decoder_layers'],
'bos_token_id': 0,
'pad_token_id': 1,
'eos_token_id': 2,
'is_encoder_decoder': True,
'scale_embedding': not args['no_scale_embedding'],
'tie_word_embeddings': args['share_all_embeddings'],
}
# good hparam defaults to start with
lowerCamelCase__ : str = 5
lowerCamelCase__ : Tuple = False
if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
lowerCamelCase__ : List[str] = best_score_hparams[model_dir]['length_penalty']
else:
lowerCamelCase__ : List[Any] = 1.0
print(f'''Generating {fsmt_model_config_file}''' )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_lowerCamelCase , ensure_ascii=_lowerCamelCase , indent=_lowerCamelCase ) )
# tokenizer config
lowerCamelCase__ : Dict = os.path.join(_lowerCamelCase , _lowerCamelCase )
lowerCamelCase__ : int = {
'langs': [src_lang, tgt_lang],
'model_max_length': 1024,
'do_lower_case': do_lower_case,
}
print(f'''Generating {fsmt_tokenizer_config_file}''' )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_lowerCamelCase , ensure_ascii=_lowerCamelCase , indent=_lowerCamelCase ) )
# model
lowerCamelCase__ : List[str] = chkpt['models'][0]
lowerCamelCase__ : Optional[Any] = model.state_dict()
# rename keys to start with 'model.'
lowerCamelCase__ : str = OrderedDict(('model.' + k, v) for k, v in model_state_dict.items() )
# remove unneeded keys
lowerCamelCase__ : int = [
'model.model',
'model.encoder.version',
'model.decoder.version',
'model.encoder_embed_tokens.weight',
'model.decoder_embed_tokens.weight',
'model.encoder.embed_positions._float_tensor',
'model.decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
model_state_dict.pop(_lowerCamelCase , _lowerCamelCase )
lowerCamelCase__ : Any = FSMTConfig.from_pretrained(_lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = FSMTForConditionalGeneration(_lowerCamelCase )
# check that it loads ok
model_new.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
# save
lowerCamelCase__ : List[Any] = os.path.join(_lowerCamelCase , _lowerCamelCase )
print(f'''Generating {pytorch_weights_dump_path}''' )
torch.save(_lowerCamelCase , _lowerCamelCase )
print('Conversion is done!' )
print('\nLast step is to upload the files to s3' )
print(f'''cd {data_root}''' )
print(f'''transformers-cli upload {model_dir}''' )
if __name__ == "__main__":
A_ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--fsmt_checkpoint_path",
default=None,
type=str,
required=True,
help=(
"Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"
" bpecodes, etc."
),
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
A_ : Dict = parser.parse_args()
convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
| 316 | 1 |
"""simple docstring"""
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowercase__ : int = get_tests_dir("""fixtures/test_sentencepiece_no_bos.model""")
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__ ( lowercase__, unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = PegasusTokenizer
_SCREAMING_SNAKE_CASE = PegasusTokenizerFast
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = True
def SCREAMING_SNAKE_CASE__ ( self : Any ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase_ : Optional[int] = PegasusTokenizer(_a )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
return PegasusTokenizer.from_pretrained('google/pegasus-large' )
def SCREAMING_SNAKE_CASE__ ( self : str , **SCREAMING_SNAKE_CASE_ : str ):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **_a )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Dict ):
return ("This is a test", "This is a test")
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
lowerCAmelCase_ : Any = '</s>'
lowerCAmelCase_ : Optional[int] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_a ) , _a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_a ) , _a )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
lowerCAmelCase_ : List[str] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<pad>' )
self.assertEqual(vocab_keys[1] , '</s>' )
self.assertEqual(vocab_keys[-1] , 'v' )
self.assertEqual(len(_a ) , 1_1_0_3 )
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_1_0_3 )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
lowerCAmelCase_ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
lowerCAmelCase_ : Tuple = self.tokenizer_class.from_pretrained(self.tmpdirname )
lowerCAmelCase_ : Union[str, Any] = (
'Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important'
' </s> <pad> <pad> <pad>'
)
lowerCAmelCase_ : Any = rust_tokenizer([raw_input_str] , return_tensors=_a , add_special_tokens=_a ).input_ids[0]
lowerCAmelCase_ : List[str] = py_tokenizer([raw_input_str] , return_tensors=_a , add_special_tokens=_a ).input_ids[0]
self.assertListEqual(_a , _a )
def SCREAMING_SNAKE_CASE__ ( self : str ):
lowerCAmelCase_ : Union[str, Any] = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
lowerCAmelCase_ : List[Any] = '<mask_1> To ensure a <mask_2> flow of bank resolutions.'
lowerCAmelCase_ : List[Any] = [2, 4_1_3, 6_1_5, 1_1_4, 3, 1_9_7_1, 1_1_3, 1_6_7_9, 1_0_7_1_0, 1_0_7, 1]
lowerCAmelCase_ : Union[str, Any] = tokenizer([raw_input_str] , return_tensors=_a ).input_ids[0]
self.assertListEqual(_a , _a )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
lowerCAmelCase_ : str = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_6_1_0_3
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 1_0_3
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 1_0_5
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1_0_2_4
lowerCAmelCase_ : Any = 'To ensure a smooth flow of bank resolutions.'
lowerCAmelCase_ : int = [4_1_3, 6_1_5, 1_1_4, 2_2_9_1, 1_9_7_1, 1_1_3, 1_6_7_9, 1_0_7_1_0, 1_0_7, 1]
lowerCAmelCase_ : Union[str, Any] = tokenizer([raw_input_str] , return_tensors=_a ).input_ids[0]
self.assertListEqual(_a , _a )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def SCREAMING_SNAKE_CASE__ ( self : str ):
lowerCAmelCase_ : Tuple = ['This is going to be way too long.' * 1_5_0, 'short example']
lowerCAmelCase_ : List[str] = ['not super long but more than 5 tokens', 'tiny']
lowerCAmelCase_ : str = self._large_tokenizer(_a , padding=_a , truncation=_a , return_tensors='pt' )
lowerCAmelCase_ : int = self._large_tokenizer(
text_target=_a , max_length=5 , padding=_a , truncation=_a , return_tensors='pt' )
assert batch.input_ids.shape == (2, 1_0_2_4)
assert batch.attention_mask.shape == (2, 1_0_2_4)
assert targets["input_ids"].shape == (2, 5)
assert len(_a ) == 2 # input_ids, attention_mask.
@slow
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
lowerCAmelCase_ : Tuple = {'input_ids': [[3_8_9_7_9, 1_4_3, 1_8_4_8_5, 6_0_6, 1_3_0, 2_6_6_6_9, 8_7_6_8_6, 1_2_1, 5_4_1_8_9, 1_1_2_9, 1_1_1, 2_6_6_6_9, 8_7_6_8_6, 1_2_1, 9_1_1_4, 1_4_7_8_7, 1_2_1, 1_3_2_4_9, 1_5_8, 5_9_2, 9_5_6, 1_2_1, 1_4_6_2_1, 3_1_5_7_6, 1_4_3, 6_2_6_1_3, 1_0_8, 9_6_8_8, 9_3_0, 4_3_4_3_0, 1_1_5_6_2, 6_2_6_1_3, 3_0_4, 1_0_8, 1_1_4_4_3, 8_9_7, 1_0_8, 9_3_1_4, 1_7_4_1_5, 6_3_3_9_9, 1_0_8, 1_1_4_4_3, 7_6_1_4, 1_8_3_1_6, 1_1_8, 4_2_8_4, 7_1_4_8, 1_2_4_3_0, 1_4_3, 1_4_0_0, 2_5_7_0_3, 1_5_8, 1_1_1, 4_2_8_4, 7_1_4_8, 1_1_7_7_2, 1_4_3, 2_1_2_9_7, 1_0_6_4, 1_5_8, 1_2_2, 2_0_4, 3_5_0_6, 1_7_5_4, 1_1_3_3, 1_4_7_8_7, 1_5_8_1, 1_1_5, 3_3_2_2_4, 4_4_8_2, 1_1_1, 1_3_5_5, 1_1_0, 2_9_1_7_3, 3_1_7, 5_0_8_3_3, 1_0_8, 2_0_1_4_7, 9_4_6_6_5, 1_1_1, 7_7_1_9_8, 1_0_7, 1], [1_1_0, 6_2_6_1_3, 1_1_7, 6_3_8, 1_1_2, 1_1_3_3, 1_2_1, 2_0_0_9_8, 1_3_5_5, 7_9_0_5_0, 1_3_8_7_2, 1_3_5, 1_5_9_6, 5_3_5_4_1, 1_3_5_2, 1_4_1, 1_3_0_3_9, 5_5_4_2, 1_2_4, 3_0_2, 5_1_8, 1_1_1, 2_6_8, 2_9_5_6, 1_1_5, 1_4_9, 4_4_2_7, 1_0_7, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_3_9, 1_2_3_5, 2_7_9_9, 1_8_2_8_9, 1_7_7_8_0, 2_0_4, 1_0_9, 9_4_7_4, 1_2_9_6, 1_0_7, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_a , model_name='google/bigbird-pegasus-large-arxiv' , revision='ba85d0851d708441f91440d509690f1ab6353415' , )
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__ ( lowercase__, unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = PegasusTokenizer
_SCREAMING_SNAKE_CASE = PegasusTokenizerFast
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = True
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase_ : Tuple = PegasusTokenizer(_a , offset=0 , mask_token_sent=_a , mask_token='[MASK]' )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
return PegasusTokenizer.from_pretrained('google/bigbird-pegasus-large-arxiv' )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , **SCREAMING_SNAKE_CASE_ : Optional[Any] ):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **_a )
def SCREAMING_SNAKE_CASE__ ( self : Dict , SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
return ("This is a test", "This is a test")
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
lowerCAmelCase_ : str = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
lowerCAmelCase_ : List[str] = self.tokenizer_class.from_pretrained(self.tmpdirname )
lowerCAmelCase_ : Optional[int] = (
'Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>'
' <pad> <pad> <pad>'
)
lowerCAmelCase_ : int = rust_tokenizer([raw_input_str] , return_tensors=_a , add_special_tokens=_a ).input_ids[0]
lowerCAmelCase_ : List[Any] = py_tokenizer([raw_input_str] , return_tensors=_a , add_special_tokens=_a ).input_ids[0]
self.assertListEqual(_a , _a )
@require_torch
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
lowerCAmelCase_ : Tuple = ['This is going to be way too long.' * 1_0_0_0, 'short example']
lowerCAmelCase_ : Optional[Any] = ['not super long but more than 5 tokens', 'tiny']
lowerCAmelCase_ : int = self._large_tokenizer(_a , padding=_a , truncation=_a , return_tensors='pt' )
lowerCAmelCase_ : str = self._large_tokenizer(
text_target=_a , max_length=5 , padding=_a , truncation=_a , return_tensors='pt' )
assert batch.input_ids.shape == (2, 4_0_9_6)
assert batch.attention_mask.shape == (2, 4_0_9_6)
assert targets["input_ids"].shape == (2, 5)
assert len(_a ) == 2 # input_ids, attention_mask.
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
lowerCAmelCase_ : Optional[int] = (
'This is an example string that is used to test the original TF implementation against the HF'
' implementation'
)
lowerCAmelCase_ : Optional[Any] = self._large_tokenizer(_a ).input_ids
self.assertListEqual(
_a , [1_8_2, 1_1_7, 1_4_2, 5_8_7, 4_2_1_1, 1_2_0, 1_1_7, 2_6_3, 1_1_2, 8_0_4, 1_0_9, 8_5_6, 2_5_0_1_6, 3_1_3_7, 4_6_4, 1_0_9, 2_6_9_5_5, 3_1_3_7, 1] , )
| 224 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
__lowerCAmelCase = TypeVar("""T""")
class UpperCAmelCase__ ( Generic[T] ):
"""simple docstring"""
def __init__( self : Tuple ,_a : T ):
'''simple docstring'''
_a : List[str] = data
_a : Node[T] | None = None
def __str__( self : Dict ):
'''simple docstring'''
return F"""{self.data}"""
class UpperCAmelCase__ ( Generic[T] ):
"""simple docstring"""
def __init__( self : Optional[int] ):
'''simple docstring'''
_a : Node[T] | None = None
def __iter__( self : str ):
'''simple docstring'''
_a : Tuple = self.top
while node:
yield node.data
_a : int = node.next
def __str__( self : str ):
'''simple docstring'''
return "->".join([str(_a ) for item in self] )
def __len__( self : Optional[Any] ):
'''simple docstring'''
return len(tuple(iter(self ) ) )
def __lowercase ( self : str ):
'''simple docstring'''
return self.top is None
def __lowercase ( self : List[Any] ,_a : T ):
'''simple docstring'''
_a : int = Node(_a )
if not self.is_empty():
_a : Optional[Any] = self.top
_a : List[str] = node
def __lowercase ( self : Tuple ):
'''simple docstring'''
if self.is_empty():
raise IndexError('pop from empty stack' )
assert isinstance(self.top ,_a )
_a : List[Any] = self.top
_a : int = self.top.next
return pop_node.data
def __lowercase ( self : List[str] ):
'''simple docstring'''
if self.is_empty():
raise IndexError('peek from empty stack' )
assert self.top is not None
return self.top.data
def __lowercase ( self : List[str] ):
'''simple docstring'''
_a : Optional[int] = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 271 | 0 |
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
lowercase_ = 'platform'
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , ):
if attention_mask is None:
__lowerCamelCase : Optional[Any] = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
__lowerCamelCase : List[Any] = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
__lowerCamelCase : Optional[Any] = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__lowerCamelCase : Union[str, Any] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__lowerCamelCase : Optional[int] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class A_ :
'''simple docstring'''
def __init__( self: List[Any] , a: List[Any] , a: str=13 , a: Dict=7 , a: Optional[Any]=True , a: int=False , a: int=99 , a: Dict=16 , a: Union[str, Any]=2 , a: Tuple=4 , a: Union[str, Any]=4 , a: List[str]="gelu" , a: Union[str, Any]=0.1 , a: str=0.1 , a: List[Any]=32 , a: Optional[Any]=2 , a: str=1 , a: List[Any]=0 , a: Optional[int]=0.0_2 , ):
__lowerCamelCase : Optional[int] = parent
__lowerCamelCase : Dict = batch_size
__lowerCamelCase : List[Any] = seq_length
__lowerCamelCase : Optional[Any] = is_training
__lowerCamelCase : Optional[Any] = use_labels
__lowerCamelCase : Tuple = vocab_size
__lowerCamelCase : Union[str, Any] = hidden_size
__lowerCamelCase : Dict = num_hidden_layers
__lowerCamelCase : Dict = num_attention_heads
__lowerCamelCase : Dict = intermediate_size
__lowerCamelCase : int = hidden_act
__lowerCamelCase : List[str] = hidden_dropout_prob
__lowerCamelCase : Tuple = attention_probs_dropout_prob
__lowerCamelCase : List[Any] = max_position_embeddings
__lowerCamelCase : Tuple = eos_token_id
__lowerCamelCase : Tuple = pad_token_id
__lowerCamelCase : Any = bos_token_id
__lowerCamelCase : Dict = initializer_range
def _snake_case ( self: Any ):
__lowerCamelCase : List[Any] = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
__lowerCamelCase : str = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
__lowerCamelCase : List[str] = shift_tokens_right(a , 1 , 2 )
__lowerCamelCase : List[str] = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=a , )
__lowerCamelCase : List[str] = prepare_blenderbot_inputs_dict(a , a , a )
return config, inputs_dict
def _snake_case ( self: List[Any] ):
__lowerCamelCase : int = self.prepare_config_and_inputs()
return config, inputs_dict
def _snake_case ( self: Optional[int] , a: Any , a: List[str] , a: Tuple ):
__lowerCamelCase : Optional[int] = 20
__lowerCamelCase : int = model_class_name(a )
__lowerCamelCase : Optional[Any] = model.encode(inputs_dict['input_ids'] )
__lowerCamelCase : Any = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
__lowerCamelCase : List[str] = model.init_cache(decoder_input_ids.shape[0] , a , a )
__lowerCamelCase : int = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='i4' )
__lowerCamelCase : Union[str, Any] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__lowerCamelCase : List[str] = model.decode(
decoder_input_ids[:, :-1] , a , decoder_attention_mask=a , past_key_values=a , decoder_position_ids=a , )
__lowerCamelCase : Optional[int] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
__lowerCamelCase : str = model.decode(
decoder_input_ids[:, -1:] , a , decoder_attention_mask=a , past_key_values=outputs_cache.past_key_values , decoder_position_ids=a , )
__lowerCamelCase : List[str] = model.decode(a , a )
__lowerCamelCase : Optional[Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F'Max diff is {diff}' )
def _snake_case ( self: Optional[int] , a: List[Any] , a: List[Any] , a: List[Any] ):
__lowerCamelCase : Optional[Any] = 20
__lowerCamelCase : Any = model_class_name(a )
__lowerCamelCase : Tuple = model.encode(inputs_dict['input_ids'] )
__lowerCamelCase : List[Any] = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
__lowerCamelCase : Optional[int] = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
__lowerCamelCase : Optional[Any] = model.init_cache(decoder_input_ids.shape[0] , a , a )
__lowerCamelCase : Union[str, Any] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__lowerCamelCase : Any = model.decode(
decoder_input_ids[:, :-1] , a , decoder_attention_mask=a , past_key_values=a , decoder_position_ids=a , )
__lowerCamelCase : List[Any] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
__lowerCamelCase : Dict = model.decode(
decoder_input_ids[:, -1:] , a , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=a , decoder_position_ids=a , )
__lowerCamelCase : List[Any] = model.decode(a , a , decoder_attention_mask=a )
__lowerCamelCase : List[Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F'Max diff is {diff}' )
@require_flax
class A_ ( unittest.TestCase ):
'''simple docstring'''
__snake_case = 99
def _snake_case ( self: Union[str, Any] ):
__lowerCamelCase : Dict = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
__lowerCamelCase : Tuple = input_ids.shape[0]
__lowerCamelCase : Optional[int] = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def _snake_case ( self: Union[str, Any] ):
__lowerCamelCase : List[str] = self._get_config_and_data()
__lowerCamelCase : List[Any] = FlaxBlenderbotForConditionalGeneration(a )
__lowerCamelCase : Optional[int] = lm_model(input_ids=a )
__lowerCamelCase : Dict = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['logits'].shape , a )
def _snake_case ( self: Union[str, Any] ):
__lowerCamelCase : Union[str, Any] = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
__lowerCamelCase : str = FlaxBlenderbotForConditionalGeneration(a )
__lowerCamelCase : Union[str, Any] = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
__lowerCamelCase : int = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
__lowerCamelCase : Any = lm_model(input_ids=a , decoder_input_ids=a )
__lowerCamelCase : Tuple = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['logits'].shape , a )
def _snake_case ( self: Union[str, Any] ):
__lowerCamelCase : Union[str, Any] = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
__lowerCamelCase : List[Any] = shift_tokens_right(a , 1 , 2 )
__lowerCamelCase : List[str] = np.equal(a , 1 ).astype(np.floataa ).sum()
__lowerCamelCase : Dict = np.equal(a , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(a , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class A_ ( __UpperCamelCase , unittest.TestCase , __UpperCamelCase ):
'''simple docstring'''
__snake_case = True
__snake_case = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
__snake_case = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def _snake_case ( self: Optional[Any] ):
__lowerCamelCase : int = FlaxBlenderbotModelTester(self )
def _snake_case ( self: Dict ):
__lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(a , a , a )
def _snake_case ( self: Optional[Any] ):
__lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(a , a , a )
def _snake_case ( self: Optional[Any] ):
__lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__lowerCamelCase : List[str] = self._prepare_for_class(a , a )
__lowerCamelCase : str = model_class(a )
@jax.jit
def encode_jitted(a: Any , a: Dict=None , **a: Union[str, Any] ):
return model.encode(input_ids=a , attention_mask=a )
with self.subTest('JIT Enabled' ):
__lowerCamelCase : Tuple = encode_jitted(**a ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
__lowerCamelCase : int = encode_jitted(**a ).to_tuple()
self.assertEqual(len(a ) , len(a ) )
for jitted_output, output in zip(a , a ):
self.assertEqual(jitted_output.shape , output.shape )
def _snake_case ( self: Union[str, Any] ):
__lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__lowerCamelCase : Tuple = model_class(a )
__lowerCamelCase : Tuple = model.encode(inputs_dict['input_ids'] , inputs_dict['attention_mask'] )
__lowerCamelCase : Tuple = {
'decoder_input_ids': inputs_dict['decoder_input_ids'],
'decoder_attention_mask': inputs_dict['decoder_attention_mask'],
'encoder_outputs': encoder_outputs,
}
@jax.jit
def decode_jitted(a: List[Any] , a: List[str] , a: Optional[Any] ):
return model.decode(
decoder_input_ids=a , decoder_attention_mask=a , encoder_outputs=a , )
with self.subTest('JIT Enabled' ):
__lowerCamelCase : Union[str, Any] = decode_jitted(**a ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
__lowerCamelCase : List[Any] = decode_jitted(**a ).to_tuple()
self.assertEqual(len(a ) , len(a ) )
for jitted_output, output in zip(a , a ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def _snake_case ( self: int ):
for model_class_name in self.all_model_classes:
__lowerCamelCase : List[str] = model_class_name.from_pretrained('facebook/blenderbot-400M-distill' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
__lowerCamelCase : str = np.ones((1, 1) ) * model.config.eos_token_id
__lowerCamelCase : Optional[Any] = model(a )
self.assertIsNotNone(a )
@unittest.skipUnless(jax_device != 'cpu' , '3B test too slow on CPU.' )
@slow
def _snake_case ( self: Any ):
__lowerCamelCase : Tuple = {'num_beams': 1, 'early_stopping': True, 'min_length': 15, 'max_length': 25}
__lowerCamelCase : Tuple = {'skip_special_tokens': True, 'clean_up_tokenization_spaces': True}
__lowerCamelCase : List[Any] = FlaxBlenderbotForConditionalGeneration.from_pretrained('facebook/blenderbot-3B' , from_pt=a )
__lowerCamelCase : Optional[int] = BlenderbotTokenizer.from_pretrained('facebook/blenderbot-3B' )
__lowerCamelCase : Tuple = ['Sam']
__lowerCamelCase : Union[str, Any] = tokenizer(a , return_tensors='jax' )
__lowerCamelCase : Optional[Any] = model.generate(**a , **a )
__lowerCamelCase : Tuple = 'Sam is a great name. It means "sun" in Gaelic.'
__lowerCamelCase : Optional[Any] = tokenizer.batch_decode(a , **a )
assert generated_txt[0].strip() == tgt_text
| 361 |
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
# Load configuration defined in the metadata file
with open(SCREAMING_SNAKE_CASE__ ) as metadata_file:
__lowerCamelCase : List[str] = json.load(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : int = LukeConfig(use_entity_aware_attention=SCREAMING_SNAKE_CASE__ , **metadata['model_config'] )
# Load in the weights from the checkpoint_path
__lowerCamelCase : Union[str, Any] = torch.load(SCREAMING_SNAKE_CASE__ , map_location='cpu' )
# Load the entity vocab file
__lowerCamelCase : List[Any] = load_entity_vocab(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : Optional[int] = RobertaTokenizer.from_pretrained(metadata['model_config']['bert_model_name'] )
# Add special tokens to the token vocabulary for downstream tasks
__lowerCamelCase : str = AddedToken('<ent>' , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : Dict = AddedToken('<ent2>' , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ )
tokenizer.add_special_tokens({'additional_special_tokens': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f'Saving tokenizer to {pytorch_dump_folder_path}' )
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE__ )
with open(os.path.join(SCREAMING_SNAKE_CASE__ , LukeTokenizer.vocab_files_names['entity_vocab_file'] ) , 'w' ) as f:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : str = LukeTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ )
# Initialize the embeddings of the special tokens
__lowerCamelCase : Union[str, Any] = state_dict['embeddings.word_embeddings.weight']
__lowerCamelCase : Tuple = word_emb[tokenizer.convert_tokens_to_ids(['@'] )[0]].unsqueeze(0 )
__lowerCamelCase : Any = word_emb[tokenizer.convert_tokens_to_ids(['#'] )[0]].unsqueeze(0 )
__lowerCamelCase : List[str] = torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
__lowerCamelCase : Optional[int] = f'encoder.layer.{layer_index}.attention.self.'
__lowerCamelCase : Dict = state_dict[prefix + matrix_name]
__lowerCamelCase : List[Any] = state_dict[prefix + matrix_name]
__lowerCamelCase : Union[str, Any] = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
__lowerCamelCase : Optional[int] = state_dict['entity_embeddings.entity_embeddings.weight']
__lowerCamelCase : Union[str, Any] = entity_emb[entity_vocab['[MASK]']]
__lowerCamelCase : Optional[Any] = LukeModel(config=SCREAMING_SNAKE_CASE__ ).eval()
__lowerCamelCase , __lowerCamelCase : List[Any] = model.load_state_dict(SCREAMING_SNAKE_CASE__ , strict=SCREAMING_SNAKE_CASE__ )
if not (len(SCREAMING_SNAKE_CASE__ ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(f'Missing keys {", ".join(SCREAMING_SNAKE_CASE__ )}. Expected only missing embeddings.position_ids' )
if not (all(key.startswith('entity_predictions' ) or key.startswith('lm_head' ) for key in unexpected_keys )):
raise ValueError(
'Unexpected keys'
f' {", ".join([key for key in unexpected_keys if not (key.startswith("entity_predictions" ) or key.startswith("lm_head" ))] )}' )
# Check outputs
__lowerCamelCase : Optional[Any] = LukeTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ , task='entity_classification' )
__lowerCamelCase : Dict = (
'Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the'
' new world number one avoid a humiliating second- round exit at Wimbledon .'
)
__lowerCamelCase : Union[str, Any] = (39, 42)
__lowerCamelCase : Optional[Any] = tokenizer(SCREAMING_SNAKE_CASE__ , entity_spans=[span] , add_prefix_space=SCREAMING_SNAKE_CASE__ , return_tensors='pt' )
__lowerCamelCase : List[str] = model(**SCREAMING_SNAKE_CASE__ )
# Verify word hidden states
if model_size == "large":
__lowerCamelCase : Dict = torch.Size((1, 42, 1_024) )
__lowerCamelCase : int = torch.tensor(
[[0.0_133, 0.0_865, 0.0_095], [0.3_093, -0.2_576, -0.7_418], [-0.1_720, -0.2_117, -0.2_869]] )
else: # base
__lowerCamelCase : Union[str, Any] = torch.Size((1, 42, 768) )
__lowerCamelCase : Tuple = torch.tensor([[0.0_037, 0.1_368, -0.0_091], [0.1_099, 0.3_329, -0.1_095], [0.0_765, 0.5_335, 0.1_179]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f'Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
__lowerCamelCase : Union[str, Any] = torch.Size((1, 1, 1_024) )
__lowerCamelCase : Dict = torch.tensor([[0.0_466, -0.0_106, -0.0_179]] )
else: # base
__lowerCamelCase : int = torch.Size((1, 1, 768) )
__lowerCamelCase : Dict = torch.tensor([[0.1_457, 0.1_044, 0.0_174]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
f'Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'
f' {expected_shape}' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print('Saving PyTorch model to {}'.format(SCREAMING_SNAKE_CASE__ ) )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : Tuple = {}
with open(SCREAMING_SNAKE_CASE__ , 'r' , encoding='utf-8' ) as f:
for index, line in enumerate(SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase , __lowerCamelCase : List[Any] = line.rstrip().split('\t' )
__lowerCamelCase : Any = index
return entity_vocab
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.')
parser.add_argument(
'--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.'
)
parser.add_argument(
'--entity_vocab_path',
default=None,
type=str,
help='Path to an entity_vocab.tsv file, containing the entity vocabulary.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.'
)
parser.add_argument(
'--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.'
)
lowercase_ = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 194 | 0 |
"""simple docstring"""
import math
import sys
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
if number != int(UpperCamelCase__ ):
raise ValueError("""the value of input must be a natural number""" )
if number < 0:
raise ValueError("""the value of input must not be a negative number""" )
if number == 0:
return 1
_a : str = [-1] * (number + 1)
_a : List[Any] = 0
for i in range(1 , number + 1 ):
_a : List[str] = sys.maxsize
_a : str = int(math.sqrt(UpperCamelCase__ ) )
for j in range(1 , root + 1 ):
_a : Union[str, Any] = 1 + answers[i - (j**2)]
_a : Optional[int] = min(UpperCamelCase__ , UpperCamelCase__ )
_a : List[str] = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 294 |
"""simple docstring"""
from __future__ import annotations
import time
import numpy as np
_snake_case = [8, 5, 9, 7]
_snake_case = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
_snake_case = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class UpperCamelCase :
def __init__( self : List[Any] , UpperCAmelCase__ : list[int] , UpperCAmelCase__ : list[list[int]] , UpperCAmelCase__ : list[list[int]] , ) -> None:
_a : List[str] = claim_vector
_a : List[Any] = allocated_resources_table
_a : Union[str, Any] = maximum_claim_table
def _lowercase ( self : Tuple ) -> list[int]:
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def _lowercase ( self : int ) -> list[int]:
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def _lowercase ( self : List[str] ) -> list[list[int]]:
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(UpperCAmelCase__ ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def _lowercase ( self : Optional[Any] ) -> dict[int, list[int]]:
return {self.__need().index(UpperCAmelCase__ ): i for i in self.__need()}
def _lowercase ( self : Dict , **UpperCAmelCase__ : Optional[Any] ) -> None:
_a : List[Any] = self.__need()
_a : Optional[int] = self.__allocated_resources_table
_a : str = self.__available_resources()
_a : Optional[Any] = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print("""_""" * 50 + """\n""" )
while need_list:
_a : int = False
for each_need in need_list:
_a : Optional[int] = True
for index, need in enumerate(UpperCAmelCase__ ):
if need > available_resources[index]:
_a : List[Any] = False
break
if execution:
_a : str = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
_a : Any = original_need_index
print(f"""Process {process_number + 1} is executing.""" )
# remove the process run from stack
need_list.remove(UpperCAmelCase__ )
# update available/freed resources stack
_a : Union[str, Any] = np.array(UpperCAmelCase__ ) + np.array(
alloc_resources_table[process_number] )
print(
"""Updated available resource stack for processes: """
+ """ """.join([str(UpperCAmelCase__ ) for x in available_resources] ) )
break
if safe:
print("""The process is in a safe state.\n""" )
else:
print("""System in unsafe state. Aborting...\n""" )
break
def _lowercase ( self : Any ) -> Optional[int]:
print(""" """ * 9 + """Allocated Resource Table""" )
for item in self.__allocated_resources_table:
print(
f"""P{self.__allocated_resources_table.index(UpperCAmelCase__ ) + 1}"""
+ """ """.join(f"""{it:>8}""" for it in item )
+ """\n""" )
print(""" """ * 9 + """System Resource Table""" )
for item in self.__maximum_claim_table:
print(
f"""P{self.__maximum_claim_table.index(UpperCAmelCase__ ) + 1}"""
+ """ """.join(f"""{it:>8}""" for it in item )
+ """\n""" )
print(
"""Current Usage by Active Processes: """
+ """ """.join(str(UpperCAmelCase__ ) for x in self.__claim_vector ) )
print(
"""Initial Available Resources: """
+ """ """.join(str(UpperCAmelCase__ ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 294 | 1 |
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> str:
'''simple docstring'''
if not (isinstance(UpperCamelCase_ , UpperCamelCase_ ) and isinstance(UpperCamelCase_ , UpperCamelCase_ )):
raise ValueError("""longest_common_substring() takes two strings for inputs""" )
UpperCamelCase = len(UpperCamelCase_ )
UpperCamelCase = len(UpperCamelCase_ )
UpperCamelCase = [[0] * (texta_length + 1) for _ in range(texta_length + 1 )]
UpperCamelCase = 0
UpperCamelCase = 0
for i in range(1 , texta_length + 1 ):
for j in range(1 , texta_length + 1 ):
if texta[i - 1] == texta[j - 1]:
UpperCamelCase = 1 + dp[i - 1][j - 1]
if dp[i][j] > ans_length:
UpperCamelCase = i
UpperCamelCase = dp[i][j]
return texta[ans_index - ans_length : ans_index]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 366 |
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
_SCREAMING_SNAKE_CASE = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
_SCREAMING_SNAKE_CASE = [file for file in filepaths if file != file.lower()]
if upper_files:
print(F'''{len(upper_files)} files contain uppercase characters:''')
print("""\n""".join(upper_files) + """\n""")
_SCREAMING_SNAKE_CASE = [file for file in filepaths if """ """ in file]
if space_files:
print(F'''{len(space_files)} files contain space characters:''')
print("""\n""".join(space_files) + """\n""")
_SCREAMING_SNAKE_CASE = [file for file in filepaths if """-""" in file]
if hyphen_files:
print(F'''{len(hyphen_files)} files contain hyphen characters:''')
print("""\n""".join(hyphen_files) + """\n""")
_SCREAMING_SNAKE_CASE = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(F'''{len(nodir_files)} files are not in a directory:''')
print("""\n""".join(nodir_files) + """\n""")
_SCREAMING_SNAKE_CASE = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 165 | 0 |
"""simple docstring"""
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class _UpperCAmelCase( lowerCamelCase ):
def __init__( self , __a , __a , __a , __a , ) -> Optional[int]:
'''simple docstring'''
super().__init__()
_UpperCamelCase = value_function
_UpperCamelCase = unet
_UpperCamelCase = scheduler
_UpperCamelCase = env
_UpperCamelCase = env.get_dataset()
_UpperCamelCase = {}
for key in self.data.keys():
try:
_UpperCamelCase = self.data[key].mean()
except: # noqa: E722
pass
_UpperCamelCase = {}
for key in self.data.keys():
try:
_UpperCamelCase = self.data[key].std()
except: # noqa: E722
pass
_UpperCamelCase = env.observation_space.shape[0]
_UpperCamelCase = env.action_space.shape[0]
def UpperCAmelCase ( self , __a , __a) -> int:
'''simple docstring'''
return (x_in - self.means[key]) / self.stds[key]
def UpperCAmelCase ( self , __a , __a) -> List[str]:
'''simple docstring'''
return x_in * self.stds[key] + self.means[key]
def UpperCAmelCase ( self , __a) -> Union[str, Any]:
'''simple docstring'''
if type(__a) is dict:
return {k: self.to_torch(__a) for k, v in x_in.items()}
elif torch.is_tensor(__a):
return x_in.to(self.unet.device)
return torch.tensor(__a , device=self.unet.device)
def UpperCAmelCase ( self , __a , __a , __a) -> str:
'''simple docstring'''
for key, val in cond.items():
_UpperCamelCase = val.clone()
return x_in
def UpperCAmelCase ( self , __a , __a , __a , __a) -> int:
'''simple docstring'''
_UpperCamelCase = x.shape[0]
_UpperCamelCase = None
for i in tqdm.tqdm(self.scheduler.timesteps):
# create batch of timesteps to pass into model
_UpperCamelCase = torch.full((batch_size,) , __a , device=self.unet.device , dtype=torch.long)
for _ in range(__a):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
_UpperCamelCase = self.value_function(x.permute(0 , 2 , 1) , __a).sample
_UpperCamelCase = torch.autograd.grad([y.sum()] , [x])[0]
_UpperCamelCase = self.scheduler._get_variance(__a)
_UpperCamelCase = torch.exp(0.5 * posterior_variance)
_UpperCamelCase = model_std * grad
_UpperCamelCase = 0
_UpperCamelCase = x.detach()
_UpperCamelCase = x + scale * grad
_UpperCamelCase = self.reset_xa(__a , __a , self.action_dim)
_UpperCamelCase = self.unet(x.permute(0 , 2 , 1) , __a).sample.permute(0 , 2 , 1)
# TODO: verify deprecation of this kwarg
_UpperCamelCase = self.scheduler.step(__a , __a , __a , predict_epsilon=__a)['''prev_sample''']
# apply conditions to the trajectory (set the initial state)
_UpperCamelCase = self.reset_xa(__a , __a , self.action_dim)
_UpperCamelCase = self.to_torch(__a)
return x, y
def __call__( self , __a , __a=64 , __a=32 , __a=2 , __a=0.1) -> Optional[Any]:
'''simple docstring'''
# normalize the observations and create batch dimension
_UpperCamelCase = self.normalize(__a , '''observations''')
_UpperCamelCase = obs[None].repeat(__a , axis=0)
_UpperCamelCase = {0: self.to_torch(__a)}
_UpperCamelCase = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
_UpperCamelCase = randn_tensor(__a , device=self.unet.device)
_UpperCamelCase = self.reset_xa(__a , __a , self.action_dim)
_UpperCamelCase = self.to_torch(__a)
# run the diffusion process
_UpperCamelCase , _UpperCamelCase = self.run_diffusion(__a , __a , __a , __a)
# sort output trajectories by value
_UpperCamelCase = y.argsort(0 , descending=__a).squeeze()
_UpperCamelCase = x[sorted_idx]
_UpperCamelCase = sorted_values[:, :, : self.action_dim]
_UpperCamelCase = actions.detach().cpu().numpy()
_UpperCamelCase = self.de_normalize(__a , key='''actions''')
# select the action with the highest value
if y is not None:
_UpperCamelCase = 0
else:
# if we didn't run value guiding, select a random action
_UpperCamelCase = np.random.randint(0 , __a)
_UpperCamelCase = denorm_actions[selected_index, 0]
return denorm_actions
| 194 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPanoramaPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
@skip_mps
class _UpperCAmelCase( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
lowercase__ = StableDiffusionPanoramaPipeline
lowercase__ = TEXT_TO_IMAGE_PARAMS
lowercase__ = TEXT_TO_IMAGE_BATCH_PARAMS
lowercase__ = TEXT_TO_IMAGE_IMAGE_PARAMS
lowercase__ = TEXT_TO_IMAGE_IMAGE_PARAMS
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
torch.manual_seed(0)
_UpperCamelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
_UpperCamelCase = DDIMScheduler()
torch.manual_seed(0)
_UpperCamelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0)
_UpperCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
_UpperCamelCase = CLIPTextModel(__a)
_UpperCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''')
_UpperCamelCase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def UpperCAmelCase ( self , __a , __a=0) -> int:
'''simple docstring'''
_UpperCamelCase = torch.manual_seed(__a)
_UpperCamelCase = {
'''prompt''': '''a photo of the dolomites''',
'''generator''': generator,
# Setting height and width to None to prevent OOMs on CPU.
'''height''': None,
'''width''': None,
'''num_inference_steps''': 1,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
_UpperCamelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_UpperCamelCase = self.get_dummy_components()
_UpperCamelCase = StableDiffusionPanoramaPipeline(**__a)
_UpperCamelCase = sd_pipe.to(__a)
sd_pipe.set_progress_bar_config(disable=__a)
_UpperCamelCase = self.get_dummy_inputs(__a)
_UpperCamelCase = sd_pipe(**__a).images
_UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_UpperCamelCase = np.array([0.6186, 0.5374, 0.4915, 0.4135, 0.4114, 0.4563, 0.5128, 0.4977, 0.4757])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
super().test_inference_batch_consistent(batch_sizes=[1, 2])
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.25e-3)
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCamelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_UpperCamelCase = self.get_dummy_components()
_UpperCamelCase = StableDiffusionPanoramaPipeline(**__a)
_UpperCamelCase = sd_pipe.to(__a)
sd_pipe.set_progress_bar_config(disable=__a)
_UpperCamelCase = self.get_dummy_inputs(__a)
_UpperCamelCase = '''french fries'''
_UpperCamelCase = sd_pipe(**__a , negative_prompt=__a)
_UpperCamelCase = output.images
_UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_UpperCamelCase = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCamelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_UpperCamelCase = self.get_dummy_components()
_UpperCamelCase = StableDiffusionPanoramaPipeline(**__a)
_UpperCamelCase = sd_pipe.to(__a)
sd_pipe.set_progress_bar_config(disable=__a)
_UpperCamelCase = self.get_dummy_inputs(__a)
_UpperCamelCase = sd_pipe(**__a , view_batch_size=2)
_UpperCamelCase = output.images
_UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_UpperCamelCase = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
_UpperCamelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_UpperCamelCase = self.get_dummy_components()
_UpperCamelCase = EulerAncestralDiscreteScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='''scaled_linear''')
_UpperCamelCase = StableDiffusionPanoramaPipeline(**__a)
_UpperCamelCase = sd_pipe.to(__a)
sd_pipe.set_progress_bar_config(disable=__a)
_UpperCamelCase = self.get_dummy_inputs(__a)
_UpperCamelCase = sd_pipe(**__a).images
_UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_UpperCamelCase = np.array([0.4024, 0.6510, 0.4901, 0.5378, 0.5813, 0.5622, 0.4795, 0.4467, 0.4952])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCamelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_UpperCamelCase = self.get_dummy_components()
_UpperCamelCase = PNDMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , skip_prk_steps=__a)
_UpperCamelCase = StableDiffusionPanoramaPipeline(**__a)
_UpperCamelCase = sd_pipe.to(__a)
sd_pipe.set_progress_bar_config(disable=__a)
_UpperCamelCase = self.get_dummy_inputs(__a)
_UpperCamelCase = sd_pipe(**__a).images
_UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_UpperCamelCase = np.array([0.6391, 0.6291, 0.4861, 0.5134, 0.5552, 0.4578, 0.5032, 0.5023, 0.4539])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
@slow
@require_torch_gpu
class _UpperCAmelCase( unittest.TestCase ):
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase ( self , __a=0) -> List[str]:
'''simple docstring'''
_UpperCamelCase = torch.manual_seed(__a)
_UpperCamelCase = {
'''prompt''': '''a photo of the dolomites''',
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCamelCase = '''stabilityai/stable-diffusion-2-base'''
_UpperCamelCase = DDIMScheduler.from_pretrained(__a , subfolder='''scheduler''')
_UpperCamelCase = StableDiffusionPanoramaPipeline.from_pretrained(__a , scheduler=__a , safety_checker=__a)
pipe.to(__a)
pipe.set_progress_bar_config(disable=__a)
pipe.enable_attention_slicing()
_UpperCamelCase = self.get_inputs()
_UpperCamelCase = pipe(**__a).images
_UpperCamelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 20_48, 3)
_UpperCamelCase = np.array(
[
0.3696_8392,
0.2702_5372,
0.3244_6766,
0.2837_9387,
0.3636_3274,
0.3073_3347,
0.2710_0027,
0.2705_4125,
0.2553_6096,
])
assert np.abs(expected_slice - image_slice).max() < 1e-2
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
_UpperCamelCase = StableDiffusionPanoramaPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2-base''' , safety_checker=__a)
_UpperCamelCase = LMSDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.to(__a)
pipe.set_progress_bar_config(disable=__a)
pipe.enable_attention_slicing()
_UpperCamelCase = self.get_inputs()
_UpperCamelCase = pipe(**__a).images
_UpperCamelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 20_48, 3)
_UpperCamelCase = np.array(
[
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
])
assert np.abs(expected_slice - image_slice).max() < 1e-3
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
_UpperCamelCase = 0
def callback_fn(__a , __a , __a) -> None:
_UpperCamelCase = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
_UpperCamelCase = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 2_56)
_UpperCamelCase = latents[0, -3:, -3:, -1]
_UpperCamelCase = np.array(
[
0.1868_1869,
0.3390_7816,
0.536_1276,
0.1443_2865,
-0.0285_6611,
-0.7394_1123,
0.2339_7987,
0.4732_2682,
-0.3782_3164,
])
assert np.abs(latents_slice.flatten() - expected_slice).max() < 5e-2
elif step == 2:
_UpperCamelCase = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 2_56)
_UpperCamelCase = latents[0, -3:, -3:, -1]
_UpperCamelCase = np.array(
[
0.1853_9645,
0.3398_7248,
0.537_8559,
0.1443_7142,
-0.0245_5261,
-0.733_8317,
0.2399_0755,
0.4735_6272,
-0.378_6505,
])
assert np.abs(latents_slice.flatten() - expected_slice).max() < 5e-2
_UpperCamelCase = False
_UpperCamelCase = '''stabilityai/stable-diffusion-2-base'''
_UpperCamelCase = DDIMScheduler.from_pretrained(__a , subfolder='''scheduler''')
_UpperCamelCase = StableDiffusionPanoramaPipeline.from_pretrained(__a , scheduler=__a , safety_checker=__a)
_UpperCamelCase = pipe.to(__a)
pipe.set_progress_bar_config(disable=__a)
pipe.enable_attention_slicing()
_UpperCamelCase = self.get_inputs()
pipe(**__a , callback=__a , callback_steps=1)
assert callback_fn.has_been_called
assert number_of_steps == 3
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_UpperCamelCase = '''stabilityai/stable-diffusion-2-base'''
_UpperCamelCase = DDIMScheduler.from_pretrained(__a , subfolder='''scheduler''')
_UpperCamelCase = StableDiffusionPanoramaPipeline.from_pretrained(__a , scheduler=__a , safety_checker=__a)
_UpperCamelCase = pipe.to(__a)
pipe.set_progress_bar_config(disable=__a)
pipe.enable_attention_slicing(1)
pipe.enable_sequential_cpu_offload()
_UpperCamelCase = self.get_inputs()
_UpperCamelCase = pipe(**__a)
_UpperCamelCase = torch.cuda.max_memory_allocated()
# make sure that less than 5.2 GB is allocated
assert mem_bytes < 5.5 * 10**9
| 194 | 1 |
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
lowercase__ : List[Any] = "python tqdm regex requests packaging filelock numpy tokenizers".split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append("dataclasses")
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append("importlib_metadata")
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F'can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py')
def SCREAMING_SNAKE_CASE ( __UpperCamelCase , __UpperCamelCase=None) -> Dict:
require_version(deps[pkg] , __UpperCamelCase)
| 180 |
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(UpperCamelCase__ ) , """Tatoeba directory does not exist.""" )
class a__ ( unittest.TestCase ):
@cached_property
def lowerCAmelCase_ ( self ) -> List[str]:
'''simple docstring'''
a = tempfile.mkdtemp()
return TatoebaConverter(save_dir=A )
@slow
def lowerCAmelCase_ ( self ) -> int:
'''simple docstring'''
self.resolver.convert_models(["heb-eng"] )
@slow
def lowerCAmelCase_ ( self ) -> Dict:
'''simple docstring'''
a , a = self.resolver.write_model_card("opus-mt-he-en" , dry_run=A )
assert mmeta["long_pair"] == "heb-eng"
| 180 | 1 |
"""simple docstring"""
import logging
import os
from .state import PartialState
class __lowerCAmelCase ( logging.LoggerAdapter ):
'''simple docstring'''
@staticmethod
def __UpperCAmelCase ( _a ):
__a = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def __UpperCAmelCase ( self , _a , _a , *_a , **_a ):
if PartialState._shared_state == {}:
raise RuntimeError(
'''You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.''' )
__a = kwargs.pop('''main_process_only''' , _a )
__a = kwargs.pop('''in_order''' , _a )
if self.isEnabledFor(_a ):
if self._should_log(_a ):
__a , __a = self.process(_a , _a )
self.logger.log(_a , _a , *_a , **_a )
elif in_order:
__a = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
__a , __a = self.process(_a , _a )
self.logger.log(_a , _a , *_a , **_a )
state.wait_for_everyone()
def lowercase ( lowerCAmelCase__ : str , lowerCAmelCase__ : str = None ) -> Any:
if log_level is None:
__a = os.environ.get('''ACCELERATE_LOG_LEVEL''' , lowerCAmelCase__ )
__a = logging.getLogger(lowerCAmelCase__ )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(lowerCAmelCase__ , {} )
| 45 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self , _a , ):
__a = parent
__a = 13
__a = 7
__a = True
__a = True
__a = True
__a = 99
__a = 32
__a = 2
__a = 4
__a = 37
__a = '''gelu'''
__a = 0.1
__a = 0.1
__a = 512
__a = 16
__a = 2
__a = 0.02
__a = 3
__a = 4
__a = None
def __UpperCAmelCase ( self ):
__a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a = None
if self.use_input_mask:
__a = random_attention_mask([self.batch_size, self.seq_length] )
__a = None
__a = None
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__a = ids_tensor([self.batch_size] , self.num_choices )
__a = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCAmelCase ( self ):
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) = self.prepare_config_and_inputs()
__a = True
__a = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__a = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , _a ):
__a = TFEsmModel(config=_a )
__a = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
__a = model(_a )
__a = [input_ids, input_mask]
__a = model(_a )
__a = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , _a , _a , _a , ):
__a = True
__a = TFEsmModel(config=_a )
__a = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''encoder_hidden_states''': encoder_hidden_states,
'''encoder_attention_mask''': encoder_attention_mask,
}
__a = model(_a )
__a = [input_ids, input_mask]
__a = model(_a , encoder_hidden_states=_a )
# Also check the case where encoder outputs are not passed
__a = model(_a , attention_mask=_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , _a ):
__a = TFEsmForMaskedLM(config=_a )
__a = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , _a ):
__a = self.num_labels
__a = TFEsmForTokenClassification(config=_a )
__a = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
__a = model(_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCAmelCase ( self ):
__a = self.prepare_config_and_inputs()
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) = config_and_inputs
__a = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : int = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
__UpperCAmelCase : Tuple = (
{
'feature-extraction': TFEsmModel,
'fill-mask': TFEsmForMaskedLM,
'text-classification': TFEsmForSequenceClassification,
'token-classification': TFEsmForTokenClassification,
'zero-shot': TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
__UpperCAmelCase : Tuple = False
__UpperCAmelCase : Union[str, Any] = False
def __UpperCAmelCase ( self ):
__a = TFEsmModelTester(self )
__a = ConfigTester(self , config_class=_a , hidden_size=37 )
def __UpperCAmelCase ( self ):
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*_a )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_a )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_a )
@slow
def __UpperCAmelCase ( self ):
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a = TFEsmModel.from_pretrained(_a )
self.assertIsNotNone(_a )
@unittest.skip('''Protein models do not support embedding resizing.''' )
def __UpperCAmelCase ( self ):
pass
@unittest.skip('''Protein models do not support embedding resizing.''' )
def __UpperCAmelCase ( self ):
pass
def __UpperCAmelCase ( self ):
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(_a )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
__a = model.get_bias()
assert isinstance(_a , _a )
for k, v in name.items():
assert isinstance(_a , tf.Variable )
else:
__a = model.get_output_embeddings()
assert x is None
__a = model.get_bias()
assert name is None
@require_tf
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCAmelCase ( self ):
__a = TFEsmForMaskedLM.from_pretrained('''facebook/esm2_t6_8M_UR50D''' )
__a = tf.constant([[0, 1, 2, 3, 4, 5]] )
__a = model(_a )[0]
__a = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ) , _a )
# compare the actual values for a slice.
__a = tf.constant(
[
[
[8.92_1518, -10.58_9814, -6.467_1307],
[-6.396_7156, -13.91_1377, -1.121_1915],
[-7.78_1247, -13.95_1557, -3.74_0592],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-2 ) )
@slow
def __UpperCAmelCase ( self ):
__a = TFEsmModel.from_pretrained('''facebook/esm2_t6_8M_UR50D''' )
__a = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
__a = model(_a )[0]
# compare the actual values for a slice.
__a = tf.constant(
[
[
[0.1444_3092, 0.5412_5327, 0.324_7739],
[0.3034_0484, 0.0052_6676, 0.3107_7722],
[0.3227_8043, -0.2498_7096, 0.341_4628],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 45 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
snake_case_ = {
'''configuration_data2vec_audio''': ['''DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Data2VecAudioConfig'''],
'''configuration_data2vec_text''': [
'''DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Data2VecTextConfig''',
'''Data2VecTextOnnxConfig''',
],
'''configuration_data2vec_vision''': [
'''DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Data2VecVisionConfig''',
'''Data2VecVisionOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = [
'''DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Data2VecAudioForAudioFrameClassification''',
'''Data2VecAudioForCTC''',
'''Data2VecAudioForSequenceClassification''',
'''Data2VecAudioForXVector''',
'''Data2VecAudioModel''',
'''Data2VecAudioPreTrainedModel''',
]
snake_case_ = [
'''DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Data2VecTextForCausalLM''',
'''Data2VecTextForMaskedLM''',
'''Data2VecTextForMultipleChoice''',
'''Data2VecTextForQuestionAnswering''',
'''Data2VecTextForSequenceClassification''',
'''Data2VecTextForTokenClassification''',
'''Data2VecTextModel''',
'''Data2VecTextPreTrainedModel''',
]
snake_case_ = [
'''DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Data2VecVisionForImageClassification''',
'''Data2VecVisionForMaskedImageModeling''',
'''Data2VecVisionForSemanticSegmentation''',
'''Data2VecVisionModel''',
'''Data2VecVisionPreTrainedModel''',
]
if is_tf_available():
snake_case_ = [
'''TFData2VecVisionForImageClassification''',
'''TFData2VecVisionForSemanticSegmentation''',
'''TFData2VecVisionModel''',
'''TFData2VecVisionPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
snake_case_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 367 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
'''facebook/s2t-wav2vec2-large-en-de''': (
'''https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json'''
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class SCREAMING_SNAKE_CASE__ (__snake_case ):
__lowerCamelCase : List[Any] = """speech_to_text_2"""
__lowerCamelCase : str = ["""past_key_values"""]
__lowerCamelCase : List[Any] = {"""num_attention_heads""": """decoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self , a=1_0000 , a=6 , a=2048 , a=4 , a=0.0 , a=True , a="relu" , a=256 , a=0.1 , a=0.0 , a=0.0 , a=0.02 , a=2 , a=True , a=1 , a=0 , a=2 , a=1024 , **a , ):
lowercase__ : Optional[int] = vocab_size
lowercase__ : List[str] = d_model
lowercase__ : int = decoder_ffn_dim
lowercase__ : Optional[Any] = decoder_layers
lowercase__ : int = decoder_attention_heads
lowercase__ : Dict = dropout
lowercase__ : Optional[int] = attention_dropout
lowercase__ : Optional[int] = activation_dropout
lowercase__ : Optional[int] = activation_function
lowercase__ : Dict = init_std
lowercase__ : List[Any] = decoder_layerdrop
lowercase__ : int = use_cache
lowercase__ : Any = decoder_layers
lowercase__ : Optional[int] = scale_embedding # scale factor will be sqrt(d_model) if True
lowercase__ : Tuple = max_target_positions
super().__init__(
pad_token_id=a , bos_token_id=a , eos_token_id=a , decoder_start_token_id=a , **a , )
| 216 | 0 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class a (unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self : Optional[int] ) -> Tuple:
__snake_case : Tuple = tempfile.mkdtemp()
__snake_case : int = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
__snake_case : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
__snake_case : Any = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
"image_std": [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
}
__snake_case : Any = os.path.join(self.tmpdirname , lowerCamelCase )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(lowerCamelCase , lowerCamelCase )
def __snake_case ( self : Optional[int] , **lowerCamelCase : int ) -> Dict:
return BertTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase )
def __snake_case ( self : str , **lowerCamelCase : Union[str, Any] ) -> Any:
return BertTokenizerFast.from_pretrained(self.tmpdirname , **lowerCamelCase )
def __snake_case ( self : Dict , **lowerCamelCase : Dict ) -> List[str]:
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **lowerCamelCase )
def __snake_case ( self : Optional[int] ) -> Optional[Any]:
shutil.rmtree(self.tmpdirname )
def __snake_case ( self : Union[str, Any] ) -> Optional[int]:
__snake_case : Optional[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__snake_case : int = [Image.fromarray(np.moveaxis(lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __snake_case ( self : str ) -> Optional[Any]:
__snake_case : Optional[Any] = self.get_tokenizer()
__snake_case : Tuple = self.get_rust_tokenizer()
__snake_case : Union[str, Any] = self.get_image_processor()
__snake_case : List[str] = AlignProcessor(tokenizer=lowerCamelCase , image_processor=lowerCamelCase )
processor_slow.save_pretrained(self.tmpdirname )
__snake_case : str = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=lowerCamelCase )
__snake_case : List[str] = AlignProcessor(tokenizer=lowerCamelCase , image_processor=lowerCamelCase )
processor_fast.save_pretrained(self.tmpdirname )
__snake_case : str = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , lowerCamelCase )
self.assertIsInstance(processor_fast.tokenizer , lowerCamelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , lowerCamelCase )
self.assertIsInstance(processor_fast.image_processor , lowerCamelCase )
def __snake_case ( self : str ) -> int:
__snake_case : Union[str, Any] = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__snake_case : Tuple = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
__snake_case : List[Any] = self.get_image_processor(do_normalize=lowerCamelCase , padding_value=1.0 )
__snake_case : List[str] = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=lowerCamelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCamelCase )
def __snake_case ( self : Union[str, Any] ) -> Tuple:
__snake_case : Optional[int] = self.get_image_processor()
__snake_case : Optional[int] = self.get_tokenizer()
__snake_case : Any = AlignProcessor(tokenizer=lowerCamelCase , image_processor=lowerCamelCase )
__snake_case : Optional[Any] = self.prepare_image_inputs()
__snake_case : Tuple = image_processor(lowerCamelCase , return_tensors="np" )
__snake_case : str = processor(images=lowerCamelCase , return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __snake_case ( self : Union[str, Any] ) -> Any:
__snake_case : str = self.get_image_processor()
__snake_case : Tuple = self.get_tokenizer()
__snake_case : Dict = AlignProcessor(tokenizer=lowerCamelCase , image_processor=lowerCamelCase )
__snake_case : Optional[int] = "lower newer"
__snake_case : str = processor(text=lowerCamelCase )
__snake_case : List[str] = tokenizer(lowerCamelCase , padding="max_length" , max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __snake_case ( self : List[str] ) -> List[Any]:
__snake_case : int = self.get_image_processor()
__snake_case : Tuple = self.get_tokenizer()
__snake_case : Optional[Any] = AlignProcessor(tokenizer=lowerCamelCase , image_processor=lowerCamelCase )
__snake_case : List[Any] = "lower newer"
__snake_case : Tuple = self.prepare_image_inputs()
__snake_case : Any = processor(text=lowerCamelCase , images=lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "token_type_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase ):
processor()
def __snake_case ( self : List[str] ) -> Any:
__snake_case : List[Any] = self.get_image_processor()
__snake_case : Tuple = self.get_tokenizer()
__snake_case : List[str] = AlignProcessor(tokenizer=lowerCamelCase , image_processor=lowerCamelCase )
__snake_case : Any = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__snake_case : Optional[Any] = processor.batch_decode(lowerCamelCase )
__snake_case : List[str] = tokenizer.batch_decode(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
def __snake_case ( self : List[Any] ) -> List[str]:
__snake_case : Any = self.get_image_processor()
__snake_case : Optional[Any] = self.get_tokenizer()
__snake_case : Optional[Any] = AlignProcessor(tokenizer=lowerCamelCase , image_processor=lowerCamelCase )
__snake_case : str = "lower newer"
__snake_case : Union[str, Any] = self.prepare_image_inputs()
__snake_case : List[str] = processor(text=lowerCamelCase , images=lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 123 |
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
_snake_case : Any = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class a (_lowerCAmelCase ):
"""simple docstring"""
def __init__( self : Optional[Any] , lowerCamelCase : int = 101 ) -> Dict:
__snake_case : str = length
def __len__( self : Optional[Any] ) -> Any:
return self.length
def __getitem__( self : int , lowerCamelCase : Optional[Any] ) -> int:
return i
class a :
"""simple docstring"""
def __call__( self : List[Any] , lowerCamelCase : Any ) -> Tuple:
return {"input_ids": torch.tensor(lowerCamelCase ), "labels": torch.tensor(lowerCamelCase )}
class a (nn.Module ):
"""simple docstring"""
def __init__( self : Optional[Any] ) -> str:
super().__init__()
# Add some (unused) params otherwise DDP will complain.
__snake_case : Optional[Any] = nn.Linear(120 , 80 )
def __snake_case ( self : int , lowerCamelCase : Optional[Any] , lowerCamelCase : Union[str, Any]=None ) -> Optional[Any]:
if labels is not None:
return torch.tensor(0.0 , device=input_ids.device ), input_ids
else:
return input_ids
class a (_lowerCAmelCase ):
"""simple docstring"""
@require_torch_neuroncore
def __snake_case ( self : Union[str, Any] ) -> Optional[Any]:
__snake_case : Dict = F'--nproc_per_node=2\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n '.split()
__snake_case : Optional[int] = self.get_auto_remove_tmp_dir()
__snake_case : int = F'--output_dir {output_dir}'.split()
__snake_case : str = ["torchrun"] + distributed_args + args
execute_subprocess_async(lowerCamelCase , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
class a (_lowerCAmelCase ):
"""simple docstring"""
@require_torch_multi_gpu
def __snake_case ( self : str ) -> int:
__snake_case : List[str] = F'--nproc_per_node={torch.cuda.device_count()}\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n '.split()
__snake_case : Optional[int] = self.get_auto_remove_tmp_dir()
__snake_case : int = F'--output_dir {output_dir}'.split()
__snake_case : Optional[int] = ["torchrun"] + distributed_args + args
execute_subprocess_async(lowerCamelCase , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
_snake_case : Optional[int] = HfArgumentParser((TrainingArguments,))
_snake_case : int = parser.parse_args_into_dataclasses()[0]
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, '''
f'''distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}'''
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [101, 40, 7]:
_snake_case : Optional[int] = DummyDataset(dataset_length)
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : Optional[Any] = list(range(len(__lowerCamelCase ) ) )
__snake_case : Union[str, Any] = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
"Predictions and/or labels do not match expected results:\n - predictions: "
F'{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}' )
return {"success": success}
_snake_case : List[str] = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
_snake_case : List[str] = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
_snake_case : str = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
_snake_case : List[Any] = 2
_snake_case : Union[str, Any] = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
_snake_case : Dict = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
_snake_case : int = None
| 123 | 1 |
from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase=1e-12 ) ->Dict:
"""simple docstring"""
lowercase : Tuple = jnp.divide(emb_a.T, jnp.clip(jnp.linalg.norm(_UpperCamelCase, axis=1 ), a_min=_UpperCamelCase ) ).T
lowercase : int = jnp.divide(emb_a.T, jnp.clip(jnp.linalg.norm(_UpperCamelCase, axis=1 ), a_min=_UpperCamelCase ) ).T
return jnp.matmul(_UpperCamelCase, norm_emb_a.T )
class __SCREAMING_SNAKE_CASE ( nn.Module ):
A : CLIPConfig
A : jnp.dtype = jnp.floataa
def __lowerCamelCase ( self ):
lowercase : Optional[Any] = FlaxCLIPVisionModule(self.config.vision_config )
lowercase : str = nn.Dense(self.config.projection_dim , use_bias=SCREAMING_SNAKE_CASE__ , dtype=self.dtype )
lowercase : Optional[Any] = self.param('''concept_embeds''' , jax.nn.initializers.ones , (17, self.config.projection_dim) )
lowercase : List[str] = self.param(
'''special_care_embeds''' , jax.nn.initializers.ones , (3, self.config.projection_dim) )
lowercase : int = self.param('''concept_embeds_weights''' , jax.nn.initializers.ones , (17,) )
lowercase : Any = self.param('''special_care_embeds_weights''' , jax.nn.initializers.ones , (3,) )
def __call__( self , SCREAMING_SNAKE_CASE__ ):
lowercase : Tuple = self.vision_model(SCREAMING_SNAKE_CASE__ )[1]
lowercase : List[Any] = self.visual_projection(SCREAMING_SNAKE_CASE__ )
lowercase : Optional[int] = jax_cosine_distance(SCREAMING_SNAKE_CASE__ , self.special_care_embeds )
lowercase : List[str] = jax_cosine_distance(SCREAMING_SNAKE_CASE__ , self.concept_embeds )
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
lowercase : Dict = 0.0
lowercase : Union[str, Any] = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
lowercase : int = jnp.round(SCREAMING_SNAKE_CASE__ , 3 )
lowercase : Dict = jnp.any(special_scores > 0 , axis=1 , keepdims=SCREAMING_SNAKE_CASE__ )
# Use a lower threshold if an image has any special care concept
lowercase : Union[str, Any] = is_special_care * 0.01
lowercase : Any = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
lowercase : str = jnp.round(SCREAMING_SNAKE_CASE__ , 3 )
lowercase : List[Any] = jnp.any(concept_scores > 0 , axis=1 )
return has_nsfw_concepts
class __SCREAMING_SNAKE_CASE ( A__ ):
A : int = CLIPConfig
A : Optional[Any] = 'clip_input'
A : Any = FlaxStableDiffusionSafetyCheckerModule
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = 0 , SCREAMING_SNAKE_CASE__ = jnp.floataa , SCREAMING_SNAKE_CASE__ = True , **SCREAMING_SNAKE_CASE__ , ):
if input_shape is None:
lowercase : Tuple = (1, 224, 224, 3)
lowercase : Any = self.module_class(config=SCREAMING_SNAKE_CASE__ , dtype=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
super().__init__(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , input_shape=SCREAMING_SNAKE_CASE__ , seed=SCREAMING_SNAKE_CASE__ , dtype=SCREAMING_SNAKE_CASE__ , _do_init=_do_init )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
# init input tensor
lowercase : str = jax.random.normal(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase , lowercase : str = jax.random.split(SCREAMING_SNAKE_CASE__ )
lowercase : Optional[Any] = {'''params''': params_rng, '''dropout''': dropout_rng}
lowercase : Dict = self.module.init(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )['''params''']
return random_params
def __call__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , ):
lowercase : Tuple = jnp.transpose(SCREAMING_SNAKE_CASE__ , (0, 2, 3, 1) )
return self.module.apply(
{'''params''': params or self.params} , jnp.array(SCREAMING_SNAKE_CASE__ , dtype=jnp.floataa ) , rngs={} , )
| 173 |
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def __lowercase ( _UpperCamelCase ) ->Tuple:
"""simple docstring"""
lowercase : List[str] = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(_UpperCamelCase, _UpperCamelCase )
def __lowercase ( _UpperCamelCase ) ->List[str]:
"""simple docstring"""
lowercase , lowercase : str = emb.weight.shape
lowercase : Optional[int] = nn.Linear(_UpperCamelCase, _UpperCamelCase, bias=_UpperCamelCase )
lowercase : Any = emb.weight.data
return lin_layer
def __lowercase ( _UpperCamelCase ) ->List[str]:
"""simple docstring"""
lowercase : Optional[int] = torch.load(_UpperCamelCase, map_location='''cpu''' )
lowercase : List[str] = mam_aaa['''args'''] or mam_aaa['''cfg''']['''model''']
lowercase : int = mam_aaa['''model''']
remove_ignore_keys_(_UpperCamelCase )
lowercase : Any = state_dict['''encoder.embed_tokens.weight'''].shape[0]
lowercase : Dict = MaMaaaConfig(
vocab_size=_UpperCamelCase, max_position_embeddings=1024, encoder_layers=args.encoder_layers, decoder_layers=args.decoder_layers, encoder_attention_heads=args.encoder_attention_heads, decoder_attention_heads=args.decoder_attention_heads, encoder_ffn_dim=args.encoder_ffn_embed_dim, decoder_ffn_dim=args.decoder_ffn_embed_dim, d_model=args.encoder_embed_dim, encoder_layerdrop=args.encoder_layerdrop, decoder_layerdrop=args.decoder_layerdrop, dropout=args.dropout, attention_dropout=args.attention_dropout, activation_dropout=args.activation_dropout, activation_function='''relu''', )
lowercase : Union[str, Any] = state_dict['''decoder.embed_tokens.weight''']
lowercase : Dict = MaMaaaForConditionalGeneration(_UpperCamelCase )
model.model.load_state_dict(_UpperCamelCase, strict=_UpperCamelCase )
lowercase : Dict = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''fairseq_path''', type=str, help='''path to a model.pt on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
__a = parser.parse_args()
__a = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 173 | 1 |
"""simple docstring"""
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
__UpperCamelCase : Optional[Any] = {
'''/attention/''': '''/0/SelfAttention/''',
'''/self_attention/''': '''/0/SelfAttention/''',
'''/encoder_decoder_attention/''': '''/1/EncDecAttention/''',
'''value''': '''v''',
'''query''': '''q''',
'''key''': '''k''',
'''out''': '''o''',
'''pre_self_attention_layer_norm''': '''0/layer_norm''',
'''pre_cross_attention_layer_norm''': '''1/layer_norm''',
'''pre_attention_layer_norm''': '''0/layer_norm''', # previously 1, but seems wrong
'''token_embedder''': '''shared''',
'''encoder_norm''': '''final_layer_norm''',
'''decoder_norm''': '''final_layer_norm''',
'''relpos_bias/rel_embedding''': '''block/0/layer/0/SelfAttention/relative_attention_bias/weight''',
'''router/router_weights/w/''': '''router/classifier/''',
'''roer/roer_weights/w/''': '''router/classifier/''',
'''logits_dense''': '''lm_head''',
}
def __SCREAMING_SNAKE_CASE ( A_ ):
lowerCAmelCase__ : Any = list(s_dict.keys() )
for key in keys:
lowerCAmelCase__ : List[str] = r'''.*/layers_(\d+)'''
lowerCAmelCase__ : Union[str, Any] = key
if re.match(lowercase__ , lowercase__ ):
lowerCAmelCase__ : Union[str, Any] = re.sub(r'''layers_(\d+)''' , r'''block/\1/layer''' , lowercase__ )
lowerCAmelCase__ : str = r'''(encoder|decoder)\/'''
if re.match(lowercase__ , lowercase__ ):
lowerCAmelCase__ : List[str] = re.match(lowercase__ , lowercase__ ).groups()
if groups[0] == "encoder":
lowerCAmelCase__ : Dict = re.sub(r'''/mlp/''' , r'''/1/mlp/''' , lowercase__ )
lowerCAmelCase__ : List[str] = re.sub(r'''/pre_mlp_layer_norm/''' , r'''/1/layer_norm/''' , lowercase__ )
elif groups[0] == "decoder":
lowerCAmelCase__ : Union[str, Any] = re.sub(r'''/mlp/''' , r'''/2/mlp/''' , lowercase__ )
lowerCAmelCase__ : Optional[Any] = re.sub(r'''/pre_mlp_layer_norm/''' , r'''/2/layer_norm/''' , lowercase__ )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
lowerCAmelCase__ : Union[str, Any] = new_key.replace(lowercase__ , lowercase__ )
print(f'{key} -> {new_key}' )
lowerCAmelCase__ : int = s_dict.pop(lowercase__ )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
lowerCAmelCase__ : Dict = s_dict[
'''encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'''
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
lowerCAmelCase__ : Optional[int] = s_dict[
'''decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'''
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
lowerCAmelCase__ : Optional[int] = s_dict[key].shape[0]
lowerCAmelCase__ : Tuple = s_dict[key]
for idx in range(lowercase__ ):
lowerCAmelCase__ : int = expert_weihts[idx]
print(f'{key} -> {key.replace("expert/" , "nested fstring" )}' )
s_dict.pop(lowercase__ )
return s_dict
__UpperCamelCase : Optional[Any] = {
'''NUM_ENCODER_LAYERS''': '''num_layers''',
'''NUM_DECODER_LAYERS''': '''num_decoder_layers''',
'''NUM_HEADS''': '''num_heads''',
'''HEAD_DIM''': '''d_kv''',
'''EMBED_DIM''': '''d_model''',
'''MLP_DIM''': '''d_ff''',
'''NUM_SELECTED_EXPERTS''': '''num_selected_experts''',
'''NUM_ENCODER_SPARSE_LAYERS''': '''num_sparse_encoder_layers''',
'''NUM_DECODER_SPARSE_LAYERS''': '''num_sparse_decoder_layers''',
'''dense.MlpBlock.activations''': '''feed_forward_proj''',
}
def __SCREAMING_SNAKE_CASE ( A_ , A_ ):
import regex as re
with open(lowercase__ , '''r''' ) as f:
lowerCAmelCase__ : str = f.read()
lowerCAmelCase__ : Optional[int] = re.findall(r'''(.*) = ([0-9.]*)''' , lowercase__ )
lowerCAmelCase__ : List[str] = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
lowerCAmelCase__ : int = float(lowercase__ ) if '''.''' in value else int(lowercase__ )
lowerCAmelCase__ : List[Any] = re.findall(r'''(.*activations) = \(\'(.*)\',\)''' , lowercase__ )[0]
lowerCAmelCase__ : Any = str(activation[1] )
lowerCAmelCase__ : Dict = num_experts
lowerCAmelCase__ : Optional[Any] = SwitchTransformersConfig(**lowercase__ )
return config
def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_=None , A_="./" , A_=8 ):
print(f'Loading flax weights from : {flax_checkpoint_path}' )
lowerCAmelCase__ : Tuple = checkpoints.load_tax_checkpoint(lowercase__ )
if gin_file is not None:
lowerCAmelCase__ : Dict = convert_gin_to_config(lowercase__ , lowercase__ )
else:
lowerCAmelCase__ : Dict = SwitchTransformersConfig.from_pretrained(lowercase__ )
lowerCAmelCase__ : List[Any] = SwitchTransformersForConditionalGeneration(lowercase__ )
lowerCAmelCase__ : List[Any] = flax_params['''target''']
lowerCAmelCase__ : List[str] = flatten_dict(lowercase__ , sep='''/''' )
lowerCAmelCase__ : Dict = rename_keys(lowercase__ )
lowerCAmelCase__ : Any = unflatten_dict(lowercase__ , sep='''/''' )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(lowercase__ , lowercase__ )
print(f'Save PyTorch model to {pytorch_dump_path}' )
pt_model.save_pretrained(lowercase__ )
if __name__ == "__main__":
__UpperCamelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--switch_t5x_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the'''
''' model architecture. If not provided, a `gin_file` has to be provided.'''
),
)
parser.add_argument(
'''--gin_file''',
default=None,
type=str,
required=False,
help='''Path to the gin config file. If not provided, a `config_file` has to be passed ''',
)
parser.add_argument(
'''--config_name''', default=None, type=str, required=False, help='''Config name of SwitchTransformers model.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output pytorch model.'''
)
parser.add_argument('''--num_experts''', default=8, type=int, required=False, help='''Number of experts''')
__UpperCamelCase : Optional[Any] = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 106 |
import os
import numpy
import onnx
def _lowerCamelCase( lowercase__ , lowercase__ ) -> Union[str, Any]:
'''simple docstring'''
__lowercase= a.name
__lowercase= b.name
__lowercase= ''
__lowercase= ''
__lowercase= a == b
__lowercase= name_a
__lowercase= name_b
return res
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ ) -> Union[str, Any]:
'''simple docstring'''
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(lowercase__ , lowercase__ )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , lowercase__ , lowercase__ )
_graph_replace_input_with(node_proto.attribute[1].g , lowercase__ , lowercase__ )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , lowercase__ , lowercase__ )
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ ) -> str:
'''simple docstring'''
for n in graph_proto.node:
_node_replace_input_with(lowercase__ , lowercase__ , lowercase__ )
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ ) -> Any:
'''simple docstring'''
__lowercase= list(model.graph.initializer )
__lowercase= list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
__lowercase= inits[i].name
__lowercase= inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , lowercase__ , lowercase__ )
def _lowerCamelCase( lowercase__ ) -> Dict:
'''simple docstring'''
__lowercase= os.path.dirname(lowercase__ )
__lowercase= os.path.basename(lowercase__ )
__lowercase= onnx.load(os.path.join(lowercase__ , lowercase__ ) )
__lowercase= list(model.graph.initializer )
__lowercase= set()
__lowercase= {}
__lowercase= []
__lowercase= 0
for i in range(len(lowercase__ ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(lowercase__ ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(lowercase__ )
dup_set.add(lowercase__ )
__lowercase= inits[j].data_type
__lowercase= numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 1_1:
mem_size *= 8
else:
print('unexpected data type: ' , lowercase__ )
total_reduced_size += mem_size
__lowercase= inits[i].name
__lowercase= inits[j].name
if name_i in dup_map:
dup_map[name_i].append(lowercase__ )
else:
__lowercase= [name_j]
ind_to_replace.append((j, i) )
print('total reduced size: ' , total_reduced_size / 1_0_2_4 / 1_0_2_4 / 1_0_2_4 , 'GB' )
__lowercase= sorted(lowercase__ )
_remove_dup_initializers_from_model(lowercase__ , lowercase__ , lowercase__ )
__lowercase= 'optimized_' + model_file_name
__lowercase= os.path.join(lowercase__ , lowercase__ )
onnx.save(lowercase__ , lowercase__ )
return new_model
| 295 | 0 |
'''simple docstring'''
import re
from filelock import FileLock
try:
import nltk
lowerCAmelCase : Dict = True
except (ImportError, ModuleNotFoundError):
lowerCAmelCase : str = False
if NLTK_AVAILABLE:
with FileLock(""".lock""") as lock:
nltk.download("""punkt""", quiet=True)
def lowercase (_A ):
"""simple docstring"""
re.sub('<n>' , '' , _A ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(_A ) )
| 25 |
'''simple docstring'''
from ....configuration_utils import PretrainedConfig
from ....utils import logging
lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase : Optional[Any] = {
"""CarlCochet/trajectory-transformer-halfcheetah-medium-v2""": (
"""https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json"""
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = "trajectory_transformer"
__magic_name__ = ["past_key_values"]
__magic_name__ = {
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , snake_case__=100 , snake_case__=5 , snake_case__=1 , snake_case__=1 , snake_case__=249 , snake_case__=6 , snake_case__=17 , snake_case__=25 , snake_case__=4 , snake_case__=4 , snake_case__=128 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=0.0006 , snake_case__=512 , snake_case__=0.02 , snake_case__=1E-12 , snake_case__=1 , snake_case__=True , snake_case__=1 , snake_case__=5_0256 , snake_case__=5_0256 , **snake_case__ , ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = vocab_size
_lowerCAmelCase : Any = action_weight
_lowerCAmelCase : Optional[int] = reward_weight
_lowerCAmelCase : Union[str, Any] = value_weight
_lowerCAmelCase : List[str] = max_position_embeddings
_lowerCAmelCase : Tuple = block_size
_lowerCAmelCase : List[Any] = action_dim
_lowerCAmelCase : List[Any] = observation_dim
_lowerCAmelCase : Union[str, Any] = transition_dim
_lowerCAmelCase : Tuple = learning_rate
_lowerCAmelCase : int = n_layer
_lowerCAmelCase : Any = n_head
_lowerCAmelCase : Tuple = n_embd
_lowerCAmelCase : Optional[Any] = embd_pdrop
_lowerCAmelCase : Union[str, Any] = attn_pdrop
_lowerCAmelCase : Any = resid_pdrop
_lowerCAmelCase : Optional[Any] = initializer_range
_lowerCAmelCase : List[Any] = layer_norm_eps
_lowerCAmelCase : Union[str, Any] = kaiming_initializer_range
_lowerCAmelCase : List[Any] = use_cache
super().__init__(pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ )
| 25 | 1 |
'''simple docstring'''
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class __magic_name__ :
def __init__( self : Union[str, Any] ,_UpperCAmelCase : Dict ,_UpperCAmelCase : Tuple=2 ,_UpperCAmelCase : Optional[int]=32 ,_UpperCAmelCase : Any=16 ,_UpperCAmelCase : Tuple=3 ,_UpperCAmelCase : Any=True ,_UpperCAmelCase : List[Any]=True ,_UpperCAmelCase : Union[str, Any]=32 ,_UpperCAmelCase : Optional[int]=4 ,_UpperCAmelCase : str=[0, 1, 2, 3] ,_UpperCAmelCase : Optional[Any]=4 ,_UpperCAmelCase : int=37 ,_UpperCAmelCase : int="gelu" ,_UpperCAmelCase : Tuple=0.1 ,_UpperCAmelCase : Dict=0.1 ,_UpperCAmelCase : List[Any]=0.02 ,_UpperCAmelCase : str=3 ,_UpperCAmelCase : Dict=[1, 384, 24, 24] ,_UpperCAmelCase : str=True ,_UpperCAmelCase : Any=None ,):
_a : Optional[Any] = parent
_a : Any = batch_size
_a : str = image_size
_a : Any = patch_size
_a : Dict = num_channels
_a : int = is_training
_a : str = use_labels
_a : List[Any] = hidden_size
_a : Dict = num_hidden_layers
_a : int = backbone_out_indices
_a : Any = num_attention_heads
_a : Dict = intermediate_size
_a : Dict = hidden_act
_a : Optional[int] = hidden_dropout_prob
_a : Dict = attention_probs_dropout_prob
_a : Union[str, Any] = initializer_range
_a : Tuple = num_labels
_a : Any = backbone_featmap_shape
_a : Any = scope
_a : Dict = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
_a : List[str] = (image_size // patch_size) ** 2
_a : Tuple = num_patches + 1
def __lowercase ( self : Optional[Any] ):
_a : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a : Dict = None
if self.use_labels:
_a : Optional[int] = ids_tensor([self.batch_size, self.image_size, self.image_size] ,self.num_labels )
_a : Any = self.get_config()
return config, pixel_values, labels
def __lowercase ( self : Optional[int] ):
_a : Union[str, Any] = {
'global_padding': 'same',
'layer_type': 'bottleneck',
'depths': [3, 4, 9],
'out_features': ['stage1', 'stage2', 'stage3'],
'embedding_dynamic_padding': True,
'hidden_sizes': [96, 192, 384, 768],
'num_groups': 2,
}
return DPTConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,backbone_out_indices=self.backbone_out_indices ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=_UpperCAmelCase ,initializer_range=self.initializer_range ,is_hybrid=self.is_hybrid ,backbone_config=_UpperCAmelCase ,backbone_featmap_shape=self.backbone_featmap_shape ,)
def __lowercase ( self : Any ,_UpperCAmelCase : str ,_UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : Optional[int] ):
_a : Tuple = DPTModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_a : Optional[int] = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def __lowercase ( self : str ,_UpperCAmelCase : Dict ,_UpperCAmelCase : List[str] ,_UpperCAmelCase : Union[str, Any] ):
_a : Dict = self.num_labels
_a : List[Any] = DPTForDepthEstimation(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_a : Union[str, Any] = model(_UpperCAmelCase )
self.parent.assertEqual(result.predicted_depth.shape ,(self.batch_size, self.image_size, self.image_size) )
def __lowercase ( self : str ,_UpperCAmelCase : str ,_UpperCAmelCase : str ,_UpperCAmelCase : str ):
_a : Union[str, Any] = self.num_labels
_a : Union[str, Any] = DPTForSemanticSegmentation(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_a : Tuple = model(_UpperCAmelCase ,labels=_UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape ,(self.batch_size, self.num_labels, self.image_size, self.image_size) )
def __lowercase ( self : Dict ):
_a : Dict = self.prepare_config_and_inputs()
_a , _a , _a : Any = config_and_inputs
_a : Optional[Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
lowerCAmelCase : List[Any] = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
lowerCAmelCase : Tuple = (
{
'depth-estimation': DPTForDepthEstimation,
'feature-extraction': DPTModel,
'image-segmentation': DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowerCAmelCase : Dict = False
lowerCAmelCase : Any = False
lowerCAmelCase : Optional[Any] = False
def __lowercase ( self : Optional[int] ):
_a : Union[str, Any] = DPTModelTester(self )
_a : List[Any] = ConfigTester(self ,config_class=_UpperCAmelCase ,has_text_modality=_UpperCAmelCase ,hidden_size=37 )
def __lowercase ( self : Dict ):
self.config_tester.run_common_tests()
@unittest.skip(reason='DPT does not use inputs_embeds' )
def __lowercase ( self : List[Any] ):
pass
def __lowercase ( self : str ):
_a , _a : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : str = model_class(_UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
_a : List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCAmelCase ,nn.Linear ) )
def __lowercase ( self : Dict ):
_a , _a : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : str = model_class(_UpperCAmelCase )
_a : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a : str = [*signature.parameters.keys()]
_a : Optional[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] ,_UpperCAmelCase )
def __lowercase ( self : Any ):
_a : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def __lowercase ( self : List[str] ):
_a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*_UpperCAmelCase )
def __lowercase ( self : Optional[int] ):
_a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_UpperCAmelCase )
def __lowercase ( self : str ):
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
_a , _a : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
_a : Optional[Any] = True
if model_class in get_values(_UpperCAmelCase ):
continue
_a : List[Any] = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.train()
_a : int = self._prepare_for_class(_UpperCAmelCase ,_UpperCAmelCase ,return_labels=_UpperCAmelCase )
_a : str = model(**_UpperCAmelCase ).loss
loss.backward()
def __lowercase ( self : Optional[Any] ):
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
_a , _a : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
_a : Optional[int] = False
_a : Optional[int] = True
if model_class in get_values(_UpperCAmelCase ) or not model_class.supports_gradient_checkpointing:
continue
_a : Any = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.gradient_checkpointing_enable()
model.train()
_a : Optional[Any] = self._prepare_for_class(_UpperCAmelCase ,_UpperCAmelCase ,return_labels=_UpperCAmelCase )
_a : str = model(**_UpperCAmelCase ).loss
loss.backward()
def __lowercase ( self : Optional[Any] ):
_a , _a : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_a : List[str] = _config_zero_init(_UpperCAmelCase )
for model_class in self.all_model_classes:
_a : Optional[Any] = model_class(config=_UpperCAmelCase )
# Skip the check for the backbone
_a : Optional[Any] = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
_a : int = [F"""{name}.{key}""" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() ,[0.0, 1.0] ,msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" ,)
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def __lowercase ( self : Tuple ):
pass
@slow
def __lowercase ( self : Tuple ):
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
_a : int = DPTModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def __lowercase ( self : str ):
# We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type
_a , _a : Any = self.model_tester.prepare_config_and_inputs_for_common()
_a : int = 'add'
with self.assertRaises(_UpperCAmelCase ):
_a : Dict = DPTForDepthEstimation(_UpperCAmelCase )
def __lowerCamelCase ( ) -> Tuple:
_a : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
@slow
class __magic_name__ ( unittest.TestCase ):
def __lowercase ( self : str ):
_a : Optional[int] = DPTImageProcessor.from_pretrained('Intel/dpt-hybrid-midas' )
_a : List[Any] = DPTForDepthEstimation.from_pretrained('Intel/dpt-hybrid-midas' ).to(_UpperCAmelCase )
_a : Optional[int] = prepare_img()
_a : str = image_processor(images=_UpperCAmelCase ,return_tensors='pt' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
_a : int = model(**_UpperCAmelCase )
_a : Union[str, Any] = outputs.predicted_depth
# verify the predicted depth
_a : List[str] = torch.Size((1, 384, 384) )
self.assertEqual(predicted_depth.shape ,_UpperCAmelCase )
_a : Optional[Any] = torch.tensor(
[[[5.64_37, 5.61_46, 5.65_11], [5.43_71, 5.56_49, 5.59_58], [5.52_15, 5.51_84, 5.52_93]]] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 ,_UpperCAmelCase ,atol=1E-4 ) )
| 89 |
"""simple docstring"""
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
__A = TypeVar("""KEY""")
__A = TypeVar("""VAL""")
@dataclass(frozen=a , slots=a )
class _lowerCAmelCase ( Generic[KEY, VAL] ):
"""simple docstring"""
__magic_name__ :KEY
__magic_name__ :VAL
class _lowerCAmelCase ( _Item ):
"""simple docstring"""
def __init__( self ):
'''simple docstring'''
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
def __bool__( self ):
'''simple docstring'''
return False
__A = _DeletedItem()
class _lowerCAmelCase ( MutableMapping[KEY, VAL] ):
"""simple docstring"""
def __init__( self , __UpperCAmelCase = 8 , __UpperCAmelCase = 0.75 ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = initial_block_size
lowerCAmelCase__ :list[_Item | None] = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
lowerCAmelCase__ :Tuple = capacity_factor
lowerCAmelCase__ :str = 0
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
return hash(__UpperCAmelCase ) % len(self._buckets )
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
return (ind + 1) % len(self._buckets )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Any = self._buckets[ind]
if not stored:
lowerCAmelCase__ :Dict = _Item(__UpperCAmelCase , __UpperCAmelCase )
self._len += 1
return True
elif stored.key == key:
lowerCAmelCase__ :Optional[Any] = _Item(__UpperCAmelCase , __UpperCAmelCase )
return True
else:
return False
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :int = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
if len(self._buckets ) <= self._initial_block_size:
return False
lowerCAmelCase__ :Optional[Any] = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = self._buckets
lowerCAmelCase__ :Tuple = [None] * new_size
lowerCAmelCase__ :List[Any] = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def snake_case ( self ):
'''simple docstring'''
self._resize(len(self._buckets ) * 2 )
def snake_case ( self ):
'''simple docstring'''
self._resize(len(self._buckets ) // 2 )
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = self._get_bucket_index(__UpperCAmelCase )
for _ in range(len(self._buckets ) ):
yield ind
lowerCAmelCase__ :Tuple = self._get_next_ind(__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
for ind in self._iterate_buckets(__UpperCAmelCase ):
if self._try_set(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
break
def __setitem__( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
if self._is_full():
self._size_up()
self._add_item(__UpperCAmelCase , __UpperCAmelCase )
def __delitem__( self , __UpperCAmelCase ):
'''simple docstring'''
for ind in self._iterate_buckets(__UpperCAmelCase ):
lowerCAmelCase__ :int = self._buckets[ind]
if item is None:
raise KeyError(__UpperCAmelCase )
if item is _deleted:
continue
if item.key == key:
lowerCAmelCase__ :List[str] = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self , __UpperCAmelCase ):
'''simple docstring'''
for ind in self._iterate_buckets(__UpperCAmelCase ):
lowerCAmelCase__ :str = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(__UpperCAmelCase )
def __len__( self ):
'''simple docstring'''
return self._len
def __iter__( self ):
'''simple docstring'''
yield from (item.key for item in self._buckets if item)
def __repr__( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = ' ,'.join(
F"{item.key}: {item.val}" for item in self._buckets if item )
return F"HashMap({val_string})"
| 293 | 0 |
import datasets
_snake_case = "\\n@InProceedings{conneau2018xnli,\n author = \"Conneau, Alexis\n and Rinott, Ruty\n and Lample, Guillaume\n and Williams, Adina\n and Bowman, Samuel R.\n and Schwenk, Holger\n and Stoyanov, Veselin\",\n title = \"XNLI: Evaluating Cross-lingual Sentence Representations\",\n booktitle = \"Proceedings of the 2018 Conference on Empirical Methods\n in Natural Language Processing\",\n year = \"2018\",\n publisher = \"Association for Computational Linguistics\",\n location = \"Brussels, Belgium\",\n}\n"
_snake_case = "\\nXNLI is a subset of a few thousand examples from MNLI which has been translated\ninto a 14 different languages (some low-ish resource). As with MNLI, the goal is\nto predict textual entailment (does sentence A imply/contradict/neither sentence\nB) and is a classification task (given two sentences, predict one of three\nlabels).\n"
_snake_case = "\nComputes XNLI score which is just simple accuracy.\nArgs:\n predictions: Predicted labels.\n references: Ground truth labels.\nReturns:\n \'accuracy\': accuracy\nExamples:\n\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> xnli_metric = datasets.load_metric(\"xnli\")\n >>> results = xnli_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n"
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
return (preds == labels).mean()
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION,_KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
def a__ ( self ) -> Union[str, Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""int64""" if self.config_name != """sts-b""" else """float32""" ),
"""references""": datasets.Value("""int64""" if self.config_name != """sts-b""" else """float32""" ),
} ) , codebase_urls=[] , reference_urls=[] , format="""numpy""" , )
def a__ ( self , _a , _a ) -> Optional[Any]:
return {"accuracy": simple_accuracy(__UpperCAmelCase , __UpperCAmelCase )}
| 366 |
from __future__ import annotations
from collections.abc import Callable
_snake_case = list[list[float | int]]
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : int = len(snake_case_ )
_A : Matrix = [[0 for _ in range(size + 1 )] for _ in range(snake_case_ )]
_A : int
_A : int
_A : int
_A : int
_A : int
_A : float
for row in range(snake_case_ ):
for col in range(snake_case_ ):
_A : Dict = matrix[row][col]
_A : List[Any] = vector[row][0]
_A : List[Any] = 0
_A : Optional[Any] = 0
while row < size and col < size:
# pivoting
_A : Any = max((abs(augmented[rowa][col] ), rowa) for rowa in range(snake_case_,snake_case_ ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
_A , _A : Optional[Any] = augmented[pivot_row], augmented[row]
for rowa in range(row + 1,snake_case_ ):
_A : str = augmented[rowa][col] / augmented[row][col]
_A : List[Any] = 0
for cola in range(col + 1,size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1,snake_case_ ):
for row in range(snake_case_ ):
_A : int = augmented[row][col] / augmented[col][col]
for cola in range(snake_case_,size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row],10 )] for row in range(snake_case_ )
]
def lowerCAmelCase_ ( snake_case_ ):
_A : int = len(snake_case_ )
_A : Matrix = [[0 for _ in range(snake_case_ )] for _ in range(snake_case_ )]
_A : Matrix = [[0] for _ in range(snake_case_ )]
_A : Matrix
_A : int
_A : int
_A : int
for x_val, y_val in enumerate(snake_case_ ):
for col in range(snake_case_ ):
_A : str = (x_val + 1) ** (size - col - 1)
_A : List[str] = y_val
_A : Any = solve(snake_case_,snake_case_ )
def interpolated_func(snake_case_ ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(snake_case_ ) )
return interpolated_func
def lowerCAmelCase_ ( snake_case_ ):
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def lowerCAmelCase_ ( snake_case_ = question_function,snake_case_ = 10 ):
_A : list[int] = [func(snake_case_ ) for x_val in range(1,order + 1 )]
_A : list[Callable[[int], int]] = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1,order + 1 )
]
_A : int = 0
_A : Callable[[int], int]
_A : int
for poly in polynomials:
_A : Optional[int] = 1
while func(snake_case_ ) == poly(snake_case_ ):
x_val += 1
ret += poly(snake_case_ )
return ret
if __name__ == "__main__":
print(f"""{solution() = }""")
| 343 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.