code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ : Optional[int] = logging.get_logger(__name__)
lowerCamelCase__ : List[Any] = {
"""google/mobilenet_v1_1.0_224""": """https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json""",
"""google/mobilenet_v1_0.75_192""": """https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json""",
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class _snake_case ( UpperCAmelCase_ ):
__lowerCAmelCase : Optional[int] = 'mobilenet_v1'
def __init__( self , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=2_24 , SCREAMING_SNAKE_CASE_=1.0 , SCREAMING_SNAKE_CASE_=8 , SCREAMING_SNAKE_CASE_="relu6" , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=0.9_9_9 , SCREAMING_SNAKE_CASE_=0.0_2 , SCREAMING_SNAKE_CASE_=0.0_0_1 , **SCREAMING_SNAKE_CASE_ , ):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_)
if depth_multiplier <= 0:
raise ValueError("""depth_multiplier must be greater than zero.""")
lowercase__ : List[Any] = num_channels
lowercase__ : Optional[Any] = image_size
lowercase__ : Union[str, Any] = depth_multiplier
lowercase__ : Optional[Any] = min_depth
lowercase__ : Tuple = hidden_act
lowercase__ : Any = tf_padding
lowercase__ : int = classifier_dropout_prob
lowercase__ : Optional[int] = initializer_range
lowercase__ : Tuple = layer_norm_eps
class _snake_case ( UpperCAmelCase_ ):
__lowerCAmelCase : Tuple = version.parse('1.11' )
@property
def lowercase__ ( self):
'''simple docstring'''
return OrderedDict([("""pixel_values""", {0: """batch"""})])
@property
def lowercase__ ( self):
'''simple docstring'''
if self.task == "image-classification":
return OrderedDict([("""logits""", {0: """batch"""})])
else:
return OrderedDict([("""last_hidden_state""", {0: """batch"""}), ("""pooler_output""", {0: """batch"""})])
@property
def lowercase__ ( self):
'''simple docstring'''
return 1E-4
| 12 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFXLMRobertaModel
@require_tf
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def __lowercase ( self :Union[str, Any] ):
__lowerCamelCase : List[Any] =TFXLMRobertaModel.from_pretrained('''jplu/tf-xlm-roberta-base''' )
__lowerCamelCase : Dict ={
'''input_ids''': tf.convert_to_tensor([[0, 2646, 1_0269, 83, 9_9942, 2]] , dtype=tf.intaa ), # "My dog is cute"
'''attention_mask''': tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]] , dtype=tf.intaa ),
}
__lowerCamelCase : Optional[int] =model(__lowercase )['''last_hidden_state''']
__lowerCamelCase : str =tf.TensorShape((1, 6, 768) )
self.assertEqual(output.shape , __lowercase )
# compare the actual values for a slice.
__lowerCamelCase : str =tf.convert_to_tensor(
[
[
[0.0681762, 0.10894451, 0.06772504],
[-0.06423668, 0.02366615, 0.04329344],
[-0.06057295, 0.09974135, -0.00070584],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 179 | 0 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
__A : Optional[int] = logging.get_logger(__name__)
# General docstring
__A : List[Any] = 'RegNetConfig'
# Base docstring
__A : List[Any] = 'facebook/regnet-y-040'
__A : str = [1, 1_088, 7, 7]
# Image classification docstring
__A : Any = 'facebook/regnet-y-040'
__A : str = 'tabby, tabby cat'
__A : List[str] = [
'facebook/regnet-y-040',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class __UpperCamelCase ( tf.keras.layers.Layer ):
def __init__( self :str ,_UpperCamelCase :int ,_UpperCamelCase :int = 3 ,_UpperCamelCase :int = 1 ,_UpperCamelCase :int = 1 ,_UpperCamelCase :Optional[str] = "relu" ,**_UpperCamelCase :Union[str, Any] ,):
super().__init__(**_UpperCamelCase )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
snake_case_ : Any = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
snake_case_ : Any = tf.keras.layers.ConvaD(
filters=_UpperCamelCase ,kernel_size=_UpperCamelCase ,strides=_UpperCamelCase ,padding="""VALID""" ,groups=_UpperCamelCase ,use_bias=_UpperCamelCase ,name="""convolution""" ,)
snake_case_ : Tuple = tf.keras.layers.BatchNormalization(epsilon=1E-5 ,momentum=0.9 ,name="""normalization""" )
snake_case_ : str = ACTaFN[activation] if activation is not None else tf.identity
def a__ ( self :Optional[int] ,_UpperCamelCase :Tuple ):
snake_case_ : List[Any] = self.convolution(self.padding(_UpperCamelCase ) )
snake_case_ : Optional[int] = self.normalization(_UpperCamelCase )
snake_case_ : int = self.activation(_UpperCamelCase )
return hidden_state
class __UpperCamelCase ( tf.keras.layers.Layer ):
def __init__( self :Optional[int] ,_UpperCamelCase :RegNetConfig ,**_UpperCamelCase :int ):
super().__init__(**_UpperCamelCase )
snake_case_ : List[Any] = config.num_channels
snake_case_ : Union[str, Any] = TFRegNetConvLayer(
out_channels=config.embedding_size ,kernel_size=3 ,stride=2 ,activation=config.hidden_act ,name="""embedder""" ,)
def a__ ( self :Optional[Any] ,_UpperCamelCase :str ):
snake_case_ : str = shape_list(_UpperCamelCase )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
"""Make sure that the channel dimension of the pixel values match with the one set in the configuration.""" )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
snake_case_ : Optional[Any] = tf.transpose(_UpperCamelCase ,perm=(0, 2, 3, 1) )
snake_case_ : Optional[Any] = self.embedder(_UpperCamelCase )
return hidden_state
class __UpperCamelCase ( tf.keras.layers.Layer ):
def __init__( self :Dict ,_UpperCamelCase :int ,_UpperCamelCase :int = 2 ,**_UpperCamelCase :Tuple ):
super().__init__(**_UpperCamelCase )
snake_case_ : Tuple = tf.keras.layers.ConvaD(
filters=_UpperCamelCase ,kernel_size=1 ,strides=_UpperCamelCase ,use_bias=_UpperCamelCase ,name="""convolution""" )
snake_case_ : int = tf.keras.layers.BatchNormalization(epsilon=1E-5 ,momentum=0.9 ,name="""normalization""" )
def a__ ( self :Union[str, Any] ,_UpperCamelCase :tf.Tensor ,_UpperCamelCase :bool = False ):
return self.normalization(self.convolution(_UpperCamelCase ) ,training=_UpperCamelCase )
class __UpperCamelCase ( tf.keras.layers.Layer ):
def __init__( self :Tuple ,_UpperCamelCase :int ,_UpperCamelCase :int ,**_UpperCamelCase :List[Any] ):
super().__init__(**_UpperCamelCase )
snake_case_ : Any = tf.keras.layers.GlobalAveragePoolingaD(keepdims=_UpperCamelCase ,name="""pooler""" )
snake_case_ : int = [
tf.keras.layers.ConvaD(filters=_UpperCamelCase ,kernel_size=1 ,activation="""relu""" ,name="""attention.0""" ),
tf.keras.layers.ConvaD(filters=_UpperCamelCase ,kernel_size=1 ,activation="""sigmoid""" ,name="""attention.2""" ),
]
def a__ ( self :Any ,_UpperCamelCase :Dict ):
# [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels]
snake_case_ : Optional[int] = self.pooler(_UpperCamelCase )
for layer_module in self.attention:
snake_case_ : Optional[int] = layer_module(_UpperCamelCase )
snake_case_ : str = hidden_state * pooled
return hidden_state
class __UpperCamelCase ( tf.keras.layers.Layer ):
def __init__( self :List[str] ,_UpperCamelCase :RegNetConfig ,_UpperCamelCase :int ,_UpperCamelCase :int ,_UpperCamelCase :int = 1 ,**_UpperCamelCase :Dict ):
super().__init__(**_UpperCamelCase )
snake_case_ : List[str] = in_channels != out_channels or stride != 1
snake_case_ : Optional[int] = max(1 ,out_channels // config.groups_width )
snake_case_ : List[str] = (
TFRegNetShortCut(_UpperCamelCase ,stride=_UpperCamelCase ,name="""shortcut""" )
if should_apply_shortcut
else tf.keras.layers.Activation("""linear""" ,name="""shortcut""" )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
snake_case_ : Any = [
TFRegNetConvLayer(_UpperCamelCase ,kernel_size=1 ,activation=config.hidden_act ,name="""layer.0""" ),
TFRegNetConvLayer(
_UpperCamelCase ,stride=_UpperCamelCase ,groups=_UpperCamelCase ,activation=config.hidden_act ,name="""layer.1""" ),
TFRegNetConvLayer(_UpperCamelCase ,kernel_size=1 ,activation=_UpperCamelCase ,name="""layer.2""" ),
]
snake_case_ : Any = ACTaFN[config.hidden_act]
def a__ ( self :List[str] ,_UpperCamelCase :int ):
snake_case_ : Optional[Any] = hidden_state
for layer_module in self.layers:
snake_case_ : List[Any] = layer_module(_UpperCamelCase )
snake_case_ : str = self.shortcut(_UpperCamelCase )
hidden_state += residual
snake_case_ : List[str] = self.activation(_UpperCamelCase )
return hidden_state
class __UpperCamelCase ( tf.keras.layers.Layer ):
def __init__( self :List[str] ,_UpperCamelCase :RegNetConfig ,_UpperCamelCase :int ,_UpperCamelCase :int ,_UpperCamelCase :int = 1 ,**_UpperCamelCase :Tuple ):
super().__init__(**_UpperCamelCase )
snake_case_ : int = in_channels != out_channels or stride != 1
snake_case_ : Optional[int] = max(1 ,out_channels // config.groups_width )
snake_case_ : Optional[int] = (
TFRegNetShortCut(_UpperCamelCase ,stride=_UpperCamelCase ,name="""shortcut""" )
if should_apply_shortcut
else tf.keras.layers.Activation("""linear""" ,name="""shortcut""" )
)
snake_case_ : Dict = [
TFRegNetConvLayer(_UpperCamelCase ,kernel_size=1 ,activation=config.hidden_act ,name="""layer.0""" ),
TFRegNetConvLayer(
_UpperCamelCase ,stride=_UpperCamelCase ,groups=_UpperCamelCase ,activation=config.hidden_act ,name="""layer.1""" ),
TFRegNetSELayer(_UpperCamelCase ,reduced_channels=int(round(in_channels / 4 ) ) ,name="""layer.2""" ),
TFRegNetConvLayer(_UpperCamelCase ,kernel_size=1 ,activation=_UpperCamelCase ,name="""layer.3""" ),
]
snake_case_ : Any = ACTaFN[config.hidden_act]
def a__ ( self :List[Any] ,_UpperCamelCase :Union[str, Any] ):
snake_case_ : Dict = hidden_state
for layer_module in self.layers:
snake_case_ : int = layer_module(_UpperCamelCase )
snake_case_ : Optional[Any] = self.shortcut(_UpperCamelCase )
hidden_state += residual
snake_case_ : List[Any] = self.activation(_UpperCamelCase )
return hidden_state
class __UpperCamelCase ( tf.keras.layers.Layer ):
def __init__( self :str ,_UpperCamelCase :RegNetConfig ,_UpperCamelCase :int ,_UpperCamelCase :int ,_UpperCamelCase :int = 2 ,_UpperCamelCase :int = 2 ,**_UpperCamelCase :Dict ):
super().__init__(**_UpperCamelCase )
snake_case_ : Any = TFRegNetXLayer if config.layer_type == """x""" else TFRegNetYLayer
snake_case_ : Dict = [
# downsampling is done in the first layer with stride of 2
layer(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,stride=_UpperCamelCase ,name="""layers.0""" ),
*[layer(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,name=F'''layers.{i+1}''' ) for i in range(depth - 1 )],
]
def a__ ( self :List[str] ,_UpperCamelCase :Any ):
for layer_module in self.layers:
snake_case_ : Optional[int] = layer_module(_UpperCamelCase )
return hidden_state
class __UpperCamelCase ( tf.keras.layers.Layer ):
def __init__( self :str ,_UpperCamelCase :RegNetConfig ,**_UpperCamelCase :List[Any] ):
super().__init__(**_UpperCamelCase )
snake_case_ : Union[str, Any] = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
_UpperCamelCase ,config.embedding_size ,config.hidden_sizes[0] ,stride=2 if config.downsample_in_first_stage else 1 ,depth=config.depths[0] ,name="""stages.0""" ,) )
snake_case_ : str = zip(config.hidden_sizes ,config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(_UpperCamelCase ,config.depths[1:] ) ):
self.stages.append(TFRegNetStage(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,depth=_UpperCamelCase ,name=F'''stages.{i+1}''' ) )
def a__ ( self :Any ,_UpperCamelCase :tf.Tensor ,_UpperCamelCase :bool = False ,_UpperCamelCase :bool = True ):
snake_case_ : Union[str, Any] = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
snake_case_ : Dict = hidden_states + (hidden_state,)
snake_case_ : Optional[int] = stage_module(_UpperCamelCase )
if output_hidden_states:
snake_case_ : Dict = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=_UpperCamelCase ,hidden_states=_UpperCamelCase )
@keras_serializable
class __UpperCamelCase ( tf.keras.layers.Layer ):
lowercase : List[str] = RegNetConfig
def __init__( self :List[Any] ,_UpperCamelCase :Optional[int] ,**_UpperCamelCase :Optional[Any] ):
super().__init__(**_UpperCamelCase )
snake_case_ : int = config
snake_case_ : Union[str, Any] = TFRegNetEmbeddings(_UpperCamelCase ,name="""embedder""" )
snake_case_ : List[str] = TFRegNetEncoder(_UpperCamelCase ,name="""encoder""" )
snake_case_ : Union[str, Any] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=_UpperCamelCase ,name="""pooler""" )
@unpack_inputs
def a__ ( self :Tuple ,_UpperCamelCase :tf.Tensor ,_UpperCamelCase :Optional[bool] = None ,_UpperCamelCase :Optional[bool] = None ,_UpperCamelCase :bool = False ,):
snake_case_ : Optional[int] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
snake_case_ : Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict
snake_case_ : str = self.embedder(_UpperCamelCase ,training=_UpperCamelCase )
snake_case_ : int = self.encoder(
_UpperCamelCase ,output_hidden_states=_UpperCamelCase ,return_dict=_UpperCamelCase ,training=_UpperCamelCase )
snake_case_ : Optional[int] = encoder_outputs[0]
snake_case_ : Union[str, Any] = self.pooler(_UpperCamelCase )
# Change to NCHW output format have uniformity in the modules
snake_case_ : Dict = tf.transpose(_UpperCamelCase ,perm=(0, 3, 1, 2) )
snake_case_ : Optional[int] = tf.transpose(_UpperCamelCase ,perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
snake_case_ : Dict = tuple([tf.transpose(_UpperCamelCase ,perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=_UpperCamelCase ,pooler_output=_UpperCamelCase ,hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states ,)
class __UpperCamelCase ( lowercase__ ):
lowercase : Union[str, Any] = RegNetConfig
lowercase : Optional[Any] = 'regnet'
lowercase : Tuple = 'pixel_values'
@property
def a__ ( self :Tuple ):
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_2_4, 2_2_4) ,dtype=tf.floataa )}
__A : Dict = r'\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n'
__A : Optional[Any] = r'\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
'The bare RegNet model outputting raw features without any specific head on top.' , lowercase__ , )
class __UpperCamelCase ( lowercase__ ):
def __init__( self :str ,_UpperCamelCase :RegNetConfig ,*_UpperCamelCase :Optional[Any] ,**_UpperCamelCase :Optional[Any] ):
super().__init__(_UpperCamelCase ,*_UpperCamelCase ,**_UpperCamelCase )
snake_case_ : List[Any] = TFRegNetMainLayer(_UpperCamelCase ,name="""regnet""" )
@unpack_inputs
@add_start_docstrings_to_model_forward(_UpperCamelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC ,output_type=_UpperCamelCase ,config_class=_CONFIG_FOR_DOC ,modality="""vision""" ,expected_output=_EXPECTED_OUTPUT_SHAPE ,)
def a__ ( self :Any ,_UpperCamelCase :tf.Tensor ,_UpperCamelCase :Optional[bool] = None ,_UpperCamelCase :Optional[bool] = None ,_UpperCamelCase :Any=False ,):
snake_case_ : Union[str, Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
snake_case_ : List[Any] = return_dict if return_dict is not None else self.config.use_return_dict
snake_case_ : Optional[Any] = self.regnet(
pixel_values=_UpperCamelCase ,output_hidden_states=_UpperCamelCase ,return_dict=_UpperCamelCase ,training=_UpperCamelCase ,)
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state ,pooler_output=outputs.pooler_output ,hidden_states=outputs.hidden_states ,)
@add_start_docstrings(
'\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , lowercase__ , )
class __UpperCamelCase ( lowercase__ , lowercase__ ):
def __init__( self :Dict ,_UpperCamelCase :RegNetConfig ,*_UpperCamelCase :List[str] ,**_UpperCamelCase :List[str] ):
super().__init__(_UpperCamelCase ,*_UpperCamelCase ,**_UpperCamelCase )
snake_case_ : List[Any] = config.num_labels
snake_case_ : int = TFRegNetMainLayer(_UpperCamelCase ,name="""regnet""" )
# classification head
snake_case_ : Any = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels ,name="""classifier.1""" ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(_UpperCamelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT ,output_type=_UpperCamelCase ,config_class=_CONFIG_FOR_DOC ,expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT ,)
def a__ ( self :List[str] ,_UpperCamelCase :tf.Tensor = None ,_UpperCamelCase :tf.Tensor = None ,_UpperCamelCase :bool = None ,_UpperCamelCase :bool = None ,_UpperCamelCase :Dict=False ,):
snake_case_ : Optional[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
snake_case_ : Any = return_dict if return_dict is not None else self.config.use_return_dict
snake_case_ : List[Any] = self.regnet(
_UpperCamelCase ,output_hidden_states=_UpperCamelCase ,return_dict=_UpperCamelCase ,training=_UpperCamelCase )
snake_case_ : List[str] = outputs.pooler_output if return_dict else outputs[1]
snake_case_ : Optional[int] = self.classifier[0](_UpperCamelCase )
snake_case_ : Union[str, Any] = self.classifier[1](_UpperCamelCase )
snake_case_ : int = None if labels is None else self.hf_compute_loss(labels=_UpperCamelCase ,logits=_UpperCamelCase )
if not return_dict:
snake_case_ : Tuple = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=_UpperCamelCase ,logits=_UpperCamelCase ,hidden_states=outputs.hidden_states ) | 707 |
'''simple docstring'''
import random
from typing import Any
def UpperCAmelCase ( lowerCamelCase_ :list ):
'''simple docstring'''
for _ in range(len(lowerCamelCase_ ) ):
snake_case_ : Union[str, Any] = random.randint(0 , len(lowerCamelCase_ ) - 1 )
snake_case_ : Any = random.randint(0 , len(lowerCamelCase_ ) - 1 )
snake_case_ , snake_case_ : List[str] = data[b], data[a]
return data
if __name__ == "__main__":
__A : Optional[int] = [0, 1, 2, 3, 4, 5, 6, 7]
__A : Optional[int] = ['python', 'says', 'hello', '!']
print('Fisher-Yates Shuffle:')
print('List', integers, strings)
print('FY Shuffle', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings)) | 267 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class UpperCamelCase__ :
"""simple docstring"""
A__ : Union[str, Any] = 4_2
A__ : Tuple = 4_2
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE__ ) -> int:
A__ = [[] for _ in range(__lowerCAmelCase )]
A__ = size
def __getitem__( self , SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
return iter(self._graph[vertex] )
@property
def snake_case__ ( self ) -> Tuple:
return self._size
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
if weight not in (0, 1):
raise ValueError("Edge weight must be either 0 or 1." )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError("Vertex indexes must be in [0; size)." )
self._graph[from_vertex].append(Edge(__lowerCAmelCase , __lowerCAmelCase ) )
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
A__ = deque([start_vertex] )
A__ = [None] * self.size
A__ = 0
while queue:
A__ = queue.popleft()
A__ = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
A__ = current_distance + edge.weight
A__ = distances[edge.destination_vertex]
if (
isinstance(__lowerCAmelCase , __lowerCAmelCase )
and new_distance >= dest_vertex_distance
):
continue
A__ = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError("No path from start_vertex to finish_vertex." )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 104 | '''simple docstring'''
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class SCREAMING_SNAKE_CASE ( __a ):
"""simple docstring"""
__A = ""
__A = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
__A = None # compression type in fsspec. ex: "gzip"
__A = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self : List[str] , __lowerCAmelCase : str = "" , __lowerCAmelCase : Optional[str] = None , __lowerCAmelCase : Optional[dict] = None , **__lowerCAmelCase : Optional[int] ):
"""simple docstring"""
super().__init__(self , **__lowerCAmelCase )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
_lowerCAmelCase = fsspec.open(
__lowerCAmelCase , mode='rb' , protocol=__lowerCAmelCase , compression=self.compression , client_kwargs={
'requote_redirect_url': False, # see https://github.com/huggingface/datasets/pull/5459
'trust_env': True, # Enable reading proxy env variables.
**(target_options or {}).pop('client_kwargs' , {} ), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
_lowerCAmelCase = os.path.basename(self.file.path.split('::' )[0] )
_lowerCAmelCase = (
self.compressed_name[: self.compressed_name.rindex('.' )]
if '.' in self.compressed_name
else self.compressed_name
)
_lowerCAmelCase = None
@classmethod
def a ( cls : int , __lowerCAmelCase : Optional[int] ):
"""simple docstring"""
return super()._strip_protocol(__lowerCAmelCase ).lstrip('/' )
def a ( self : Union[str, Any] ):
"""simple docstring"""
if self.dir_cache is None:
_lowerCAmelCase = {**self.file.fs.info(self.file.path ), 'name': self.uncompressed_name}
_lowerCAmelCase = {f['name']: f}
def a ( self : Optional[Any] , __lowerCAmelCase : str ):
"""simple docstring"""
return self.file.open().read()
def a ( self : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : str = "rb" , __lowerCAmelCase : Union[str, Any]=None , __lowerCAmelCase : Dict=True , __lowerCAmelCase : Tuple=None , **__lowerCAmelCase : Dict , ):
"""simple docstring"""
_lowerCAmelCase = self._strip_protocol(__lowerCAmelCase )
if mode != "rb":
raise ValueError(F"Tried to read with mode {mode} on file {self.file.path} opened with mode 'rb'" )
return self.file.open()
class SCREAMING_SNAKE_CASE ( __a ):
"""simple docstring"""
__A = "bz2"
__A = "bz2"
__A = ".bz2"
class SCREAMING_SNAKE_CASE ( __a ):
"""simple docstring"""
__A = "gzip"
__A = "gzip"
__A = ".gz"
class SCREAMING_SNAKE_CASE ( __a ):
"""simple docstring"""
__A = "lz4"
__A = "lz4"
__A = ".lz4"
class SCREAMING_SNAKE_CASE ( __a ):
"""simple docstring"""
__A = "xz"
__A = "xz"
__A = ".xz"
class SCREAMING_SNAKE_CASE ( __a ):
"""simple docstring"""
__A = "zstd"
__A = "zstd"
__A = ".zst"
def __init__( self : Optional[int] , __lowerCAmelCase : str , __lowerCAmelCase : str = "rb" , __lowerCAmelCase : Optional[str] = None , __lowerCAmelCase : Optional[dict] = None , __lowerCAmelCase : int = DEFAULT_BLOCK_SIZE , **__lowerCAmelCase : Optional[int] , ):
"""simple docstring"""
super().__init__(
fo=__lowerCAmelCase , mode=__lowerCAmelCase , target_protocol=__lowerCAmelCase , target_options=__lowerCAmelCase , block_size=__lowerCAmelCase , **__lowerCAmelCase , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
_lowerCAmelCase = self.file.__enter__
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Dict , __lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
_lowerCAmelCase = file_
def __enter__( self : List[Any] ):
"""simple docstring"""
self._file.__enter__()
return self
def __exit__( self : List[str] , *__lowerCAmelCase : int , **__lowerCAmelCase : Optional[int] ):
"""simple docstring"""
self._file.__exit__(*__lowerCAmelCase , **__lowerCAmelCase )
def __iter__( self : Any ):
"""simple docstring"""
return iter(self._file )
def a ( self : int ):
"""simple docstring"""
return next(self._file )
def __getattr__( self : Tuple , __lowerCAmelCase : Dict ):
"""simple docstring"""
return getattr(self._file , __lowerCAmelCase )
def fixed_enter(*__lowerCAmelCase : List[str] , **__lowerCAmelCase : int ):
return WrappedFile(_enter(*__lowerCAmelCase , **__lowerCAmelCase ) )
_lowerCAmelCase = fixed_enter
| 309 | 0 |
"""simple docstring"""
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case = logging.get_logger(__name__)
snake_case = {
'''huggingface/time-series-transformer-tourism-monthly''': (
'''https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json'''
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
A__ : Any = '''time_series_transformer'''
A__ : List[str] = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
'''num_hidden_layers''': '''encoder_layers''',
}
def __init__( self : Optional[Any] , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : str = "student_t" , __lowerCamelCase : str = "nll" , __lowerCamelCase : int = 1 , __lowerCamelCase : List[int] = [1, 2, 3, 4, 5, 6, 7] , __lowerCamelCase : Optional[Union[str, bool]] = "mean" , __lowerCamelCase : int = 0 , __lowerCamelCase : int = 0 , __lowerCamelCase : int = 0 , __lowerCamelCase : int = 0 , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : int = 3_2 , __lowerCamelCase : int = 3_2 , __lowerCamelCase : int = 2 , __lowerCamelCase : int = 2 , __lowerCamelCase : int = 2 , __lowerCamelCase : int = 2 , __lowerCamelCase : bool = True , __lowerCamelCase : str = "gelu" , __lowerCamelCase : int = 6_4 , __lowerCamelCase : float = 0.1 , __lowerCamelCase : float = 0.1 , __lowerCamelCase : float = 0.1 , __lowerCamelCase : float = 0.1 , __lowerCamelCase : float = 0.1 , __lowerCamelCase : int = 1_0_0 , __lowerCamelCase : float = 0.0_2 , __lowerCamelCase : Optional[Any]=True , **__lowerCamelCase : List[Any] , ):
"""simple docstring"""
# time series specific configuration
_snake_case = prediction_length
_snake_case = context_length or prediction_length
_snake_case = distribution_output
_snake_case = loss
_snake_case = input_size
_snake_case = num_time_features
_snake_case = lags_sequence
_snake_case = scaling
_snake_case = num_dynamic_real_features
_snake_case = num_static_real_features
_snake_case = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(__lowerCamelCase ) != num_static_categorical_features:
raise ValueError(
'''The cardinality should be a list of the same length as `num_static_categorical_features`''' )
_snake_case = cardinality
else:
_snake_case = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(__lowerCamelCase ) != num_static_categorical_features:
raise ValueError(
'''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' )
_snake_case = embedding_dimension
else:
_snake_case = [min(5_0 , (cat + 1) // 2 ) for cat in self.cardinality]
_snake_case = num_parallel_samples
# Transformer architecture configuration
_snake_case = input_size * len(__lowerCamelCase ) + self._number_of_features
_snake_case = d_model
_snake_case = encoder_attention_heads
_snake_case = decoder_attention_heads
_snake_case = encoder_ffn_dim
_snake_case = decoder_ffn_dim
_snake_case = encoder_layers
_snake_case = decoder_layers
_snake_case = dropout
_snake_case = attention_dropout
_snake_case = activation_dropout
_snake_case = encoder_layerdrop
_snake_case = decoder_layerdrop
_snake_case = activation_function
_snake_case = init_std
_snake_case = use_cache
super().__init__(is_encoder_decoder=__lowerCamelCase , **__lowerCamelCase )
@property
def __UpperCAmelCase ( self : Dict ):
"""simple docstring"""
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 404 |
"""simple docstring"""
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
snake_case = logging.get_logger(__name__)
snake_case = OrderedDict(
[
# Base model mapping
('''albert''', '''FlaxAlbertModel'''),
('''bart''', '''FlaxBartModel'''),
('''beit''', '''FlaxBeitModel'''),
('''bert''', '''FlaxBertModel'''),
('''big_bird''', '''FlaxBigBirdModel'''),
('''blenderbot''', '''FlaxBlenderbotModel'''),
('''blenderbot-small''', '''FlaxBlenderbotSmallModel'''),
('''clip''', '''FlaxCLIPModel'''),
('''distilbert''', '''FlaxDistilBertModel'''),
('''electra''', '''FlaxElectraModel'''),
('''gpt-sw3''', '''FlaxGPT2Model'''),
('''gpt2''', '''FlaxGPT2Model'''),
('''gpt_neo''', '''FlaxGPTNeoModel'''),
('''gptj''', '''FlaxGPTJModel'''),
('''longt5''', '''FlaxLongT5Model'''),
('''marian''', '''FlaxMarianModel'''),
('''mbart''', '''FlaxMBartModel'''),
('''mt5''', '''FlaxMT5Model'''),
('''opt''', '''FlaxOPTModel'''),
('''pegasus''', '''FlaxPegasusModel'''),
('''regnet''', '''FlaxRegNetModel'''),
('''resnet''', '''FlaxResNetModel'''),
('''roberta''', '''FlaxRobertaModel'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormModel'''),
('''roformer''', '''FlaxRoFormerModel'''),
('''t5''', '''FlaxT5Model'''),
('''vision-text-dual-encoder''', '''FlaxVisionTextDualEncoderModel'''),
('''vit''', '''FlaxViTModel'''),
('''wav2vec2''', '''FlaxWav2Vec2Model'''),
('''whisper''', '''FlaxWhisperModel'''),
('''xglm''', '''FlaxXGLMModel'''),
('''xlm-roberta''', '''FlaxXLMRobertaModel'''),
]
)
snake_case = OrderedDict(
[
# Model for pre-training mapping
('''albert''', '''FlaxAlbertForPreTraining'''),
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''bert''', '''FlaxBertForPreTraining'''),
('''big_bird''', '''FlaxBigBirdForPreTraining'''),
('''electra''', '''FlaxElectraForPreTraining'''),
('''longt5''', '''FlaxLongT5ForConditionalGeneration'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''mt5''', '''FlaxMT5ForConditionalGeneration'''),
('''roberta''', '''FlaxRobertaForMaskedLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMaskedLM'''),
('''roformer''', '''FlaxRoFormerForMaskedLM'''),
('''t5''', '''FlaxT5ForConditionalGeneration'''),
('''wav2vec2''', '''FlaxWav2Vec2ForPreTraining'''),
('''whisper''', '''FlaxWhisperForConditionalGeneration'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMaskedLM'''),
]
)
snake_case = OrderedDict(
[
# Model for Masked LM mapping
('''albert''', '''FlaxAlbertForMaskedLM'''),
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''bert''', '''FlaxBertForMaskedLM'''),
('''big_bird''', '''FlaxBigBirdForMaskedLM'''),
('''distilbert''', '''FlaxDistilBertForMaskedLM'''),
('''electra''', '''FlaxElectraForMaskedLM'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''roberta''', '''FlaxRobertaForMaskedLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMaskedLM'''),
('''roformer''', '''FlaxRoFormerForMaskedLM'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMaskedLM'''),
]
)
snake_case = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''blenderbot''', '''FlaxBlenderbotForConditionalGeneration'''),
('''blenderbot-small''', '''FlaxBlenderbotSmallForConditionalGeneration'''),
('''encoder-decoder''', '''FlaxEncoderDecoderModel'''),
('''longt5''', '''FlaxLongT5ForConditionalGeneration'''),
('''marian''', '''FlaxMarianMTModel'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''mt5''', '''FlaxMT5ForConditionalGeneration'''),
('''pegasus''', '''FlaxPegasusForConditionalGeneration'''),
('''t5''', '''FlaxT5ForConditionalGeneration'''),
]
)
snake_case = OrderedDict(
[
# Model for Image-classsification
('''beit''', '''FlaxBeitForImageClassification'''),
('''regnet''', '''FlaxRegNetForImageClassification'''),
('''resnet''', '''FlaxResNetForImageClassification'''),
('''vit''', '''FlaxViTForImageClassification'''),
]
)
snake_case = OrderedDict(
[
('''vision-encoder-decoder''', '''FlaxVisionEncoderDecoderModel'''),
]
)
snake_case = OrderedDict(
[
# Model for Causal LM mapping
('''bart''', '''FlaxBartForCausalLM'''),
('''bert''', '''FlaxBertForCausalLM'''),
('''big_bird''', '''FlaxBigBirdForCausalLM'''),
('''electra''', '''FlaxElectraForCausalLM'''),
('''gpt-sw3''', '''FlaxGPT2LMHeadModel'''),
('''gpt2''', '''FlaxGPT2LMHeadModel'''),
('''gpt_neo''', '''FlaxGPTNeoForCausalLM'''),
('''gptj''', '''FlaxGPTJForCausalLM'''),
('''opt''', '''FlaxOPTForCausalLM'''),
('''roberta''', '''FlaxRobertaForCausalLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForCausalLM'''),
('''xglm''', '''FlaxXGLMForCausalLM'''),
('''xlm-roberta''', '''FlaxXLMRobertaForCausalLM'''),
]
)
snake_case = OrderedDict(
[
# Model for Sequence Classification mapping
('''albert''', '''FlaxAlbertForSequenceClassification'''),
('''bart''', '''FlaxBartForSequenceClassification'''),
('''bert''', '''FlaxBertForSequenceClassification'''),
('''big_bird''', '''FlaxBigBirdForSequenceClassification'''),
('''distilbert''', '''FlaxDistilBertForSequenceClassification'''),
('''electra''', '''FlaxElectraForSequenceClassification'''),
('''mbart''', '''FlaxMBartForSequenceClassification'''),
('''roberta''', '''FlaxRobertaForSequenceClassification'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForSequenceClassification'''),
('''roformer''', '''FlaxRoFormerForSequenceClassification'''),
('''xlm-roberta''', '''FlaxXLMRobertaForSequenceClassification'''),
]
)
snake_case = OrderedDict(
[
# Model for Question Answering mapping
('''albert''', '''FlaxAlbertForQuestionAnswering'''),
('''bart''', '''FlaxBartForQuestionAnswering'''),
('''bert''', '''FlaxBertForQuestionAnswering'''),
('''big_bird''', '''FlaxBigBirdForQuestionAnswering'''),
('''distilbert''', '''FlaxDistilBertForQuestionAnswering'''),
('''electra''', '''FlaxElectraForQuestionAnswering'''),
('''mbart''', '''FlaxMBartForQuestionAnswering'''),
('''roberta''', '''FlaxRobertaForQuestionAnswering'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForQuestionAnswering'''),
('''roformer''', '''FlaxRoFormerForQuestionAnswering'''),
('''xlm-roberta''', '''FlaxXLMRobertaForQuestionAnswering'''),
]
)
snake_case = OrderedDict(
[
# Model for Token Classification mapping
('''albert''', '''FlaxAlbertForTokenClassification'''),
('''bert''', '''FlaxBertForTokenClassification'''),
('''big_bird''', '''FlaxBigBirdForTokenClassification'''),
('''distilbert''', '''FlaxDistilBertForTokenClassification'''),
('''electra''', '''FlaxElectraForTokenClassification'''),
('''roberta''', '''FlaxRobertaForTokenClassification'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForTokenClassification'''),
('''roformer''', '''FlaxRoFormerForTokenClassification'''),
('''xlm-roberta''', '''FlaxXLMRobertaForTokenClassification'''),
]
)
snake_case = OrderedDict(
[
# Model for Multiple Choice mapping
('''albert''', '''FlaxAlbertForMultipleChoice'''),
('''bert''', '''FlaxBertForMultipleChoice'''),
('''big_bird''', '''FlaxBigBirdForMultipleChoice'''),
('''distilbert''', '''FlaxDistilBertForMultipleChoice'''),
('''electra''', '''FlaxElectraForMultipleChoice'''),
('''roberta''', '''FlaxRobertaForMultipleChoice'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMultipleChoice'''),
('''roformer''', '''FlaxRoFormerForMultipleChoice'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMultipleChoice'''),
]
)
snake_case = OrderedDict(
[
('''bert''', '''FlaxBertForNextSentencePrediction'''),
]
)
snake_case = OrderedDict(
[
('''speech-encoder-decoder''', '''FlaxSpeechEncoderDecoderModel'''),
('''whisper''', '''FlaxWhisperForConditionalGeneration'''),
]
)
snake_case = OrderedDict(
[
('''whisper''', '''FlaxWhisperForAudioClassification'''),
]
)
snake_case = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
snake_case = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
snake_case = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
snake_case = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
snake_case = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
snake_case = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
snake_case = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
snake_case = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
snake_case = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
snake_case = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
snake_case = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
snake_case = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
snake_case = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
snake_case = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class UpperCAmelCase ( _BaseAutoModelClass ):
A__ : List[Any] = FLAX_MODEL_MAPPING
snake_case = auto_class_update(FlaxAutoModel)
class UpperCAmelCase ( _BaseAutoModelClass ):
A__ : List[str] = FLAX_MODEL_FOR_PRETRAINING_MAPPING
snake_case = auto_class_update(FlaxAutoModelForPreTraining, head_doc='''pretraining''')
class UpperCAmelCase ( _BaseAutoModelClass ):
A__ : Dict = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
snake_case = auto_class_update(FlaxAutoModelForCausalLM, head_doc='''causal language modeling''')
class UpperCAmelCase ( _BaseAutoModelClass ):
A__ : Union[str, Any] = FLAX_MODEL_FOR_MASKED_LM_MAPPING
snake_case = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='''masked language modeling''')
class UpperCAmelCase ( _BaseAutoModelClass ):
A__ : Tuple = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
snake_case = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='''sequence-to-sequence language modeling''', checkpoint_for_example='''t5-base'''
)
class UpperCAmelCase ( _BaseAutoModelClass ):
A__ : Dict = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
snake_case = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='''sequence classification'''
)
class UpperCAmelCase ( _BaseAutoModelClass ):
A__ : int = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
snake_case = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='''question answering''')
class UpperCAmelCase ( _BaseAutoModelClass ):
A__ : int = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
snake_case = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='''token classification'''
)
class UpperCAmelCase ( _BaseAutoModelClass ):
A__ : List[Any] = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
snake_case = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='''multiple choice''')
class UpperCAmelCase ( _BaseAutoModelClass ):
A__ : Optional[Any] = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
snake_case = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='''next sentence prediction'''
)
class UpperCAmelCase ( _BaseAutoModelClass ):
A__ : Dict = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
snake_case = auto_class_update(
FlaxAutoModelForImageClassification, head_doc='''image classification'''
)
class UpperCAmelCase ( _BaseAutoModelClass ):
A__ : Optional[int] = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
snake_case = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='''vision-to-text modeling''')
class UpperCAmelCase ( _BaseAutoModelClass ):
A__ : List[Any] = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
snake_case = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='''sequence-to-sequence speech-to-text modeling'''
)
| 404 | 1 |
'''simple docstring'''
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def _lowerCAmelCase ( __magic_name__ : int , __magic_name__ : int , __magic_name__ : int , __magic_name__ : int , __magic_name__ : int , __magic_name__ : int ) -> np.ndarray:
# prepare kernel
# the kernel size have to be odd
if (ksize % 2) == 0:
lowercase : int =ksize + 1
lowercase : str =np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(__magic_name__ ):
for x in range(__magic_name__ ):
# distance from center
lowercase : Optional[Any] =x - ksize // 2
lowercase : List[str] =y - ksize // 2
# degree to radiant
lowercase : Optional[int] =theta / 180 * np.pi
lowercase : Union[str, Any] =np.cos(_theta )
lowercase : Optional[int] =np.sin(_theta )
# get kernel x
lowercase : Tuple =cos_theta * px + sin_theta * py
# get kernel y
lowercase : Dict =-sin_theta * px + cos_theta * py
# fill kernel
lowercase : str =np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
UpperCamelCase_ = imread("""../image_data/lena.jpg""")
# turn image in gray scale value
UpperCamelCase_ = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
UpperCamelCase_ = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
UpperCamelCase_ = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
UpperCamelCase_ = out / out.max() * 255
UpperCamelCase_ = out.astype(np.uinta)
imshow("""Original""", gray)
imshow("""Gabor filter with 20x20 mask and 6 directions""", out)
waitKey(0)
| 92 |
'''simple docstring'''
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def SCREAMING_SNAKE_CASE ( lowercase_ : np.ndarray , lowercase_ : np.ndarray ):
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(lowercase_ , lowercase_ ) ) )
def SCREAMING_SNAKE_CASE ( lowercase_ : np.ndarray , lowercase_ : np.ndarray ):
if dataset.ndim != value_array.ndim:
lowercase = (
"""Wrong input data's dimensions... """
F"""dataset : {dataset.ndim}, value_array : {value_array.ndim}"""
)
raise ValueError(lowercase_ )
try:
if dataset.shape[1] != value_array.shape[1]:
lowercase = (
"""Wrong input data's shape... """
F"""dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}"""
)
raise ValueError(lowercase_ )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError("""Wrong shape""" )
if dataset.dtype != value_array.dtype:
lowercase = (
"""Input data have different datatype... """
F"""dataset : {dataset.dtype}, value_array : {value_array.dtype}"""
)
raise TypeError(lowercase_ )
lowercase = []
for value in value_array:
lowercase = euclidean(lowercase_ , dataset[0] )
lowercase = dataset[0].tolist()
for dataset_value in dataset[1:]:
lowercase = euclidean(lowercase_ , lowercase_ )
if dist > temp_dist:
lowercase = temp_dist
lowercase = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def SCREAMING_SNAKE_CASE ( lowercase_ : np.ndarray , lowercase_ : np.ndarray ):
return np.dot(lowercase_ , lowercase_ ) / (norm(lowercase_ ) * norm(lowercase_ ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 588 | 0 |
'''simple docstring'''
from __future__ import annotations
from fractions import Fraction
def UpperCAmelCase ( A : int , A : int ):
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def UpperCAmelCase ( A : int ):
SCREAMING_SNAKE_CASE : Dict = []
SCREAMING_SNAKE_CASE : Optional[Any] = 11
SCREAMING_SNAKE_CASE : str = int('''1''' + '''0''' * digit_len )
for num in range(_lowerCAmelCase , _lowerCAmelCase ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(_lowerCAmelCase , _lowerCAmelCase ):
solutions.append(F"""{num}/{den}""" )
den += 1
num += 1
SCREAMING_SNAKE_CASE : Any = 10
return solutions
def UpperCAmelCase ( A : int = 2 ):
SCREAMING_SNAKE_CASE : int = 1.0
for fraction in fraction_list(_lowerCAmelCase ):
SCREAMING_SNAKE_CASE : List[Any] = Fraction(_lowerCAmelCase )
result *= frac.denominator / frac.numerator
return int(_lowerCAmelCase )
if __name__ == "__main__":
print(solution())
| 711 |
'''simple docstring'''
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase_ : Dict = get_tests_dir('fixtures/spiece.model')
@require_sentencepiece
@require_tokenizers
class lowerCamelCase_ ( snake_case_ , unittest.TestCase ):
_lowerCAmelCase : Union[str, Any] = DebertaVaTokenizer
_lowerCAmelCase : List[Any] = DebertaVaTokenizerFast
_lowerCAmelCase : Optional[Any] = True
_lowerCAmelCase : int = True
def __lowercase ( self : Optional[Any] ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE : Tuple = DebertaVaTokenizer(lowerCAmelCase__ , unk_token='''<unk>''' )
tokenizer.save_pretrained(self.tmpdirname )
def __lowercase ( self : List[str] , lowerCAmelCase__ : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = '''this is a test'''
SCREAMING_SNAKE_CASE : Tuple = '''this is a test'''
return input_text, output_text
def __lowercase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = '''<pad>'''
SCREAMING_SNAKE_CASE : Tuple = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__ ) , lowerCAmelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__ ) , lowerCAmelCase__ )
def __lowercase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<pad>''' )
self.assertEqual(vocab_keys[1] , '''<unk>''' )
self.assertEqual(vocab_keys[-1] , '''[PAD]''' )
self.assertEqual(len(lowerCAmelCase__ ) , 3_00_01 )
def __lowercase ( self : str ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 3_00_00 )
def __lowercase ( self : List[str] ):
"""simple docstring"""
# fmt: off
SCREAMING_SNAKE_CASE : int = ''' \tHeLLo!how \n Are yoU? '''
SCREAMING_SNAKE_CASE : Optional[Any] = ['''▁hello''', '''!''', '''how''', '''▁are''', '''▁you''', '''?''']
# fmt: on
SCREAMING_SNAKE_CASE : Any = DebertaVaTokenizer(lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Tuple = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Dict = DebertaVaTokenizerFast(lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
@unittest.skip('''There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.''' )
def __lowercase ( self : Dict ):
"""simple docstring"""
pass
@unittest.skip('''There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.''' )
def __lowercase ( self : str ):
"""simple docstring"""
pass
def __lowercase ( self : int ):
"""simple docstring"""
# fmt: off
SCREAMING_SNAKE_CASE : List[Any] = '''I was born in 92000, and this is falsé.'''
SCREAMING_SNAKE_CASE : Any = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ]
# fmt: on
SCREAMING_SNAKE_CASE : Dict = DebertaVaTokenizer(lowerCAmelCase__ , split_by_punct=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = DebertaVaTokenizerFast(lowerCAmelCase__ , split_by_punct=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : str = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def __lowercase ( self : Optional[Any] ):
"""simple docstring"""
# fmt: off
SCREAMING_SNAKE_CASE : Optional[int] = '''I was born in 92000, and this is falsé.'''
SCREAMING_SNAKE_CASE : List[str] = ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ]
# fmt: on
SCREAMING_SNAKE_CASE : Union[str, Any] = DebertaVaTokenizer(lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , split_by_punct=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : int = DebertaVaTokenizerFast(lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , split_by_punct=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Any = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def __lowercase ( self : Any ):
"""simple docstring"""
# fmt: off
SCREAMING_SNAKE_CASE : List[Any] = '''I was born in 92000, and this is falsé.'''
SCREAMING_SNAKE_CASE : Dict = ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.''', ]
# fmt: on
SCREAMING_SNAKE_CASE : Tuple = DebertaVaTokenizer(lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , split_by_punct=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Any = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Any = DebertaVaTokenizerFast(lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , split_by_punct=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def __lowercase ( self : Dict ):
"""simple docstring"""
# fmt: off
SCREAMING_SNAKE_CASE : str = '''I was born in 92000, and this is falsé.'''
SCREAMING_SNAKE_CASE : Union[str, Any] = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ]
# fmt: on
SCREAMING_SNAKE_CASE : List[Any] = DebertaVaTokenizer(lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , split_by_punct=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : str = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = DebertaVaTokenizerFast(lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , split_by_punct=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : List[str] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def __lowercase ( self : Union[str, Any] ):
"""simple docstring"""
# fmt: off
SCREAMING_SNAKE_CASE : Optional[int] = ''' \tHeLLo!how \n Are yoU? '''
SCREAMING_SNAKE_CASE : Optional[Any] = ['''▁''', '''<unk>''', '''e''', '''<unk>''', '''o''', '''!''', '''how''', '''▁''', '''<unk>''', '''re''', '''▁yo''', '''<unk>''', '''?''']
# fmt: on
SCREAMING_SNAKE_CASE : List[str] = DebertaVaTokenizer(lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , split_by_punct=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Any = DebertaVaTokenizerFast(lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , split_by_punct=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def __lowercase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = self.get_tokenizer()
SCREAMING_SNAKE_CASE : Optional[int] = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE : Optional[int] = '''I was born in 92000, and this is falsé.'''
SCREAMING_SNAKE_CASE : Tuple = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) )
SCREAMING_SNAKE_CASE : List[str] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : str = rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Tuple = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.encode(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Any = rust_tokenizer.encode(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def __lowercase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = '''This is a test'''
SCREAMING_SNAKE_CASE : List[str] = [13, 1, 43_98, 25, 21, 12_89]
SCREAMING_SNAKE_CASE : Tuple = ['''▁''', '''T''', '''his''', '''▁is''', '''▁a''', '''▁test''']
SCREAMING_SNAKE_CASE : Any = ['''▁''', '''<unk>''', '''his''', '''▁is''', '''▁a''', '''▁test''']
SCREAMING_SNAKE_CASE : List[str] = DebertaVaTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : str = DebertaVaTokenizerFast(lowerCAmelCase__ , keep_accents=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Any = tokenizer.convert_ids_to_tokens(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : List[str] = rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : str = rust_tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Any = rust_tokenizer.convert_ids_to_tokens(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
# fmt: off
SCREAMING_SNAKE_CASE : Optional[Any] = '''I was born in 92000, and this is falsé.'''
SCREAMING_SNAKE_CASE : int = [13, 1, 23, 3_86, 19, 5_61, 30_50, 15, 17, 48, 25, 82_56, 18, 1, 9]
SCREAMING_SNAKE_CASE : Dict = ['''▁''', '''I''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''é''', '''.''', ]
SCREAMING_SNAKE_CASE : List[Any] = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.''', ]
# fmt: on
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Tuple = tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.convert_ids_to_tokens(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : int = rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Tuple = rust_tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = rust_tokenizer.convert_ids_to_tokens(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def __lowercase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = DebertaVaTokenizer(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : List[str] = tokenizer.encode('''sequence builders''' )
SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.encode('''multi-sequence build''' )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Tuple = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , lowerCAmelCase__ )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , lowerCAmelCase__ , )
@slow
def __lowercase ( self : Optional[Any] ):
"""simple docstring"""
# fmt: off
SCREAMING_SNAKE_CASE : str = {'''input_ids''': [[1, 3_98_67, 36, 1_93_90, 4_86, 27, 3_50_52, 8_14_36, 18, 6_06_85, 12_25, 7, 3_50_52, 8_14_36, 18, 93_67, 1_68_99, 18, 1_59_37, 53, 5_94, 7_73, 18, 1_62_87, 3_04_65, 36, 1_59_37, 6, 4_11_39, 38, 3_69_79, 6_07_63, 1_91, 6, 3_41_32, 99, 6, 5_05_38, 3_90, 4_32_30, 6, 3_41_32, 27_79, 2_08_50, 14, 6_99, 10_72, 11_94, 36, 3_82, 1_09_01, 53, 7, 6_99, 10_72, 20_84, 36, 2_04_22, 6_30, 53, 19, 1_05, 30_49, 18_96, 10_53, 1_68_99, 15_06, 11, 3_79_78, 42_43, 7, 12_37, 3_18_69, 2_00, 1_65_66, 6_54, 6, 3_50_52, 8_14_36, 7, 5_56_30, 1_35_93, 4, 2], [1, 26, 1_50_11, 13, 6_67, 8, 10_53, 18, 2_36_11, 12_37, 7_23_56, 1_28_20, 34, 10_41_34, 12_09, 35, 1_33_13, 66_27, 21, 2_02, 3_47, 7, 1_64, 23_99, 11, 46, 44_85, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 12_32, 28_64, 1_57_85, 1_49_51, 1_05, 5, 85_81, 12_50, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase__ , model_name='''microsoft/deberta-v2-xlarge''' , revision='''ad6e42c1532ddf3a15c39246b63f5559d558b670''' , )
| 464 | 0 |
'''simple docstring'''
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.esm.modeling_esmfold import EsmForProteinFolding
class lowercase_ :
"""simple docstring"""
def __init__( self : List[str], UpperCamelCase__ : Dict, UpperCamelCase__ : Dict=13, UpperCamelCase__ : Union[str, Any]=7, UpperCamelCase__ : List[str]=False, UpperCamelCase__ : List[Any]=True, UpperCamelCase__ : str=False, UpperCamelCase__ : Tuple=False, UpperCamelCase__ : str=19, UpperCamelCase__ : Tuple=32, UpperCamelCase__ : Optional[Any]=5, UpperCamelCase__ : int=4, UpperCamelCase__ : str=37, UpperCamelCase__ : List[str]="gelu", UpperCamelCase__ : Union[str, Any]=0.1, UpperCamelCase__ : List[Any]=0.1, UpperCamelCase__ : int=5_12, UpperCamelCase__ : int=16, UpperCamelCase__ : str=2, UpperCamelCase__ : Any=0.02, UpperCamelCase__ : Any=3, UpperCamelCase__ : Any=4, UpperCamelCase__ : Union[str, Any]=None, ) -> Tuple:
_A = parent
_A = batch_size
_A = seq_length
_A = is_training
_A = use_input_mask
_A = use_token_type_ids
_A = use_labels
_A = vocab_size
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_act
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = max_position_embeddings
_A = type_vocab_size
_A = type_sequence_label_size
_A = initializer_range
_A = num_labels
_A = num_choices
_A = scope
def __UpperCAmelCase ( self : Tuple ) -> List[Any]:
_A = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
_A = None
if self.use_input_mask:
_A = random_attention_mask([self.batch_size, self.seq_length] )
_A = None
_A = None
_A = None
if self.use_labels:
_A = ids_tensor([self.batch_size], self.type_sequence_label_size )
_A = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
_A = ids_tensor([self.batch_size], self.num_choices )
_A = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCAmelCase ( self : str ) -> Dict:
_A = EsmConfig(
vocab_size=33, hidden_size=self.hidden_size, pad_token_id=1, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, is_folding_model=UpperCamelCase__, esmfold_config={'trunk': {'num_blocks': 2}, 'fp16_esm': False}, )
return config
def __UpperCAmelCase ( self : Dict, UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : Optional[int], UpperCamelCase__ : str, UpperCamelCase__ : Dict, UpperCamelCase__ : List[Any], UpperCamelCase__ : int ) -> List[str]:
_A = EsmForProteinFolding(config=UpperCamelCase__ ).float()
model.to(UpperCamelCase__ )
model.eval()
_A = model(UpperCamelCase__, attention_mask=UpperCamelCase__ )
_A = model(UpperCamelCase__ )
_A = model(UpperCamelCase__ )
self.parent.assertEqual(result.positions.shape, (8, self.batch_size, self.seq_length, 14, 3) )
self.parent.assertEqual(result.angles.shape, (8, self.batch_size, self.seq_length, 7, 2) )
def __UpperCAmelCase ( self : Any ) -> Optional[int]:
_A = self.prepare_config_and_inputs()
(
(
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) ,
) = config_and_inputs
_A = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowercase_ ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase = False
__lowerCAmelCase = (EsmForProteinFolding,) if is_torch_available() else ()
__lowerCAmelCase = ()
__lowerCAmelCase = {} if is_torch_available() else {}
__lowerCAmelCase = False
def __UpperCAmelCase ( self : List[str] ) -> Dict:
_A = EsmFoldModelTester(self )
_A = ConfigTester(self, config_class=UpperCamelCase__, hidden_size=37 )
def __UpperCAmelCase ( self : List[Any] ) -> Optional[Any]:
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self : Optional[int] ) -> str:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
@unittest.skip('Does not support attention outputs' )
def __UpperCAmelCase ( self : Optional[Any] ) -> Tuple:
pass
@unittest.skip
def __UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
pass
@unittest.skip('Esm does not support embedding resizing' )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Any:
pass
@unittest.skip('Esm does not support embedding resizing' )
def __UpperCAmelCase ( self : str ) -> Any:
pass
@unittest.skip('ESMFold does not support passing input embeds!' )
def __UpperCAmelCase ( self : Tuple ) -> Tuple:
pass
@unittest.skip('ESMFold does not support head pruning.' )
def __UpperCAmelCase ( self : str ) -> Optional[Any]:
pass
@unittest.skip('ESMFold does not support head pruning.' )
def __UpperCAmelCase ( self : Any ) -> Union[str, Any]:
pass
@unittest.skip('ESMFold does not support head pruning.' )
def __UpperCAmelCase ( self : str ) -> Optional[int]:
pass
@unittest.skip('ESMFold does not support head pruning.' )
def __UpperCAmelCase ( self : Dict ) -> int:
pass
@unittest.skip('ESMFold does not support head pruning.' )
def __UpperCAmelCase ( self : Dict ) -> Optional[int]:
pass
@unittest.skip('ESMFold does not output hidden states in the normal way.' )
def __UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
pass
@unittest.skip('ESMfold does not output hidden states in the normal way.' )
def __UpperCAmelCase ( self : Tuple ) -> Optional[Any]:
pass
@unittest.skip('ESMFold only has one output format.' )
def __UpperCAmelCase ( self : Tuple ) -> Any:
pass
@unittest.skip('This test doesn\'t work for ESMFold and doesn\'t test core functionality' )
def __UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
pass
@unittest.skip('ESMFold does not support input chunking.' )
def __UpperCAmelCase ( self : int ) -> int:
pass
@unittest.skip('ESMFold doesn\'t respect you and it certainly doesn\'t respect your initialization arguments.' )
def __UpperCAmelCase ( self : Optional[int] ) -> str:
pass
@unittest.skip('ESMFold doesn\'t support torchscript compilation.' )
def __UpperCAmelCase ( self : str ) -> str:
pass
@unittest.skip('ESMFold doesn\'t support torchscript compilation.' )
def __UpperCAmelCase ( self : str ) -> Tuple:
pass
@unittest.skip('ESMFold doesn\'t support torchscript compilation.' )
def __UpperCAmelCase ( self : List[Any] ) -> Optional[Any]:
pass
@unittest.skip('ESMFold doesn\'t support data parallel.' )
def __UpperCAmelCase ( self : Tuple ) -> Optional[int]:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def __UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]:
pass
@require_torch
class lowercase_ ( _UpperCamelCase ):
"""simple docstring"""
@slow
def __UpperCAmelCase ( self : Union[str, Any] ) -> str:
_A = EsmForProteinFolding.from_pretrained('facebook/esmfold_v1' ).float()
model.eval()
_A = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
_A = model(UpperCamelCase__ )['positions']
_A = torch.tensor([2.5_828, 0.7_993, -10.9_334], dtype=torch.floataa )
self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0], UpperCamelCase__, atol=1e-4 ) )
| 107 |
"""simple docstring"""
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: Optional[int] , lowerCamelCase_: Union[str, Any] ):
"""simple docstring"""
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
snake_case : List[str] = flax_key_tuple[:-1] + ("weight",)
snake_case : Union[str, Any] = torch.permute(lowerCamelCase_ , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(lowerCamelCase_ ):
# linear layer
snake_case : int = flax_key_tuple[:-1] + ("weight",)
snake_case : Dict = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
snake_case : Any = flax_key_tuple[:-1] + ("weight",)
return flax_key_tuple, flax_tensor
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: Union[str, Any] , lowerCamelCase_: Union[str, Any] , lowerCamelCase_: str ):
"""simple docstring"""
if "metadata" in layer:
snake_case : Dict = layer.split("metadata" )
snake_case : Optional[Any] = "".join(split_layer[0] )[:-1]
snake_case : Any = [tuple(("metadata" + split_layer[1]).split("/" ) )]
elif "kvstore" in layer:
snake_case : List[str] = layer.split("kvstore" )
snake_case : Tuple = "".join(split_layer[0] )[:-1]
snake_case : Union[str, Any] = [tuple(("kvstore" + split_layer[1]).split("/" ) )]
else:
snake_case : List[Any] = layer.split("/" )
snake_case : Union[str, Any] = "/".join(split_layer[:-1] )
snake_case : int = (split_layer[-1],)
if "kvstore/path" in layer:
snake_case : str = f'''{switch_checkpoint_path}/{checkpoint_info[layer]}'''
elif "kvstore/driver" in layer:
snake_case : Tuple = "file"
else:
snake_case : int = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: Optional[Any] , lowerCamelCase_: Dict ):
"""simple docstring"""
snake_case : Optional[int] = rename_keys(lowerCamelCase_ )
snake_case : str = {}
for k, v in current_block.items():
snake_case : List[str] = v
snake_case : List[str] = new_current_block
torch.save(lowerCamelCase_ , lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: Tuple , lowerCamelCase_: Optional[Any] , lowerCamelCase_: Dict , lowerCamelCase_: int , lowerCamelCase_: str = WEIGHTS_NAME ):
"""simple docstring"""
snake_case : List[str] = convert_file_size_to_int(lowerCamelCase_ )
snake_case : List[Any] = []
snake_case : Dict = {}
snake_case : str = 0
snake_case : List[str] = 0
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_ )
with gfile.GFile(switch_checkpoint_path + "/checkpoint" , "rb" ) as fp:
snake_case : List[Any] = serialization.msgpack_restore(fp.read() )["optimizer"]["target"]
snake_case : Union[str, Any] = flatten_dict(lowerCamelCase_ , sep="/" )
snake_case : Optional[int] = {}
for layer in checkpoint_info.keys():
snake_case , snake_case , snake_case : Union[str, Any] = get_key_and_tensorstore_dict(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
if curr_real_layer_name in all_layers:
snake_case : str = content
else:
snake_case : Any = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
snake_case : Optional[Any] = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
snake_case : Tuple = torch.tensor(lowerCamelCase_ )
snake_case : str = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
snake_case , snake_case : Dict = rename_base_flax_keys(tuple(key.split("/" ) ) , lowerCamelCase_ )
snake_case : Union[str, Any] = "/".join(lowerCamelCase_ )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
snake_case : str = os.path.join(
lowerCamelCase_ , weights_name.replace(".bin" , f'''-{len(lowerCamelCase_ )+1:05d}-of-???.bin''' ) )
rename_and_save_block(lowerCamelCase_ , lowerCamelCase_ )
sharded_state_dicts.append(current_block.keys() )
del current_block
snake_case : Any = {}
snake_case : Union[str, Any] = 0
snake_case : Any = raw_weights.to(getattr(lowerCamelCase_ , lowerCamelCase_ ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
snake_case : List[Any] = os.path.join(lowerCamelCase_ , weights_name.replace(".bin" , f'''-{len(lowerCamelCase_ )+1:05d}-of-???.bin''' ) )
rename_and_save_block(lowerCamelCase_ , lowerCamelCase_ )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(lowerCamelCase_ ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
snake_case : List[Any] = {}
snake_case : Dict = {}
for idx, shard in enumerate(lowerCamelCase_ ):
snake_case : List[Any] = weights_name.replace(
".bin" , f'''-{idx+1:05d}-of-{len(lowerCamelCase_ ):05d}.bin''' ) # len(sharded_state_dicts):05d}
snake_case : Tuple = os.path.join(lowerCamelCase_ , weights_name.replace(".bin" , f'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(lowerCamelCase_ , os.path.join(lowerCamelCase_ , lowerCamelCase_ ) )
snake_case : Union[str, Any] = shard
for key in shard:
snake_case : List[Any] = shard_file
# Add the metadata
snake_case : Optional[int] = {"total_size": total_size}
snake_case : List[str] = {"metadata": metadata, "weight_map": weight_map}
with open(os.path.join(lowerCamelCase_ , lowerCamelCase_ ) , "w" , encoding="utf-8" ) as f:
snake_case : Tuple = json.dumps(lowerCamelCase_ , indent=2 , sort_keys=lowerCamelCase_ ) + "\n"
f.write(lowerCamelCase_ )
return metadata, index
if __name__ == "__main__":
A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--switch_t5x_checkpoint_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600',
type=str,
required=False,
help='Path to a directory containing a folder per layer. Follows the original Google format.',
)
parser.add_argument('--max_shard_size', default='10GB', required=False, help='Max shard size')
parser.add_argument('--dtype', default='bfloat16', type=str, required=False, help='dtype of the saved model')
parser.add_argument(
'--pytorch_dump_folder_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted',
type=str,
required=False,
help='Path to the output pytorch model.',
)
A = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
snake_case : List[Any] = SwitchTransformersConfig.from_pretrained("google/switch-base-8" )
config.save_pretrained("/home/arthur_huggingface_co/transformers/switch_converted" )
snake_case : List[Any] = SwitchTransformersForConditionalGeneration.from_pretrained(
"/home/arthur_huggingface_co/transformers/switch_converted" , device_map="auto" )
snake_case : Dict = TaTokenizer.from_pretrained("t5-small" )
snake_case : Tuple = "A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>."
snake_case : Dict = tokenizer(lowerCamelCase_ , return_tensors="pt" ).input_ids
snake_case : Optional[int] = model.generate(lowerCamelCase_ , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 449 | 0 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__snake_case = logging.get_logger(__name__)
class _lowerCAmelCase ( snake_case_ ):
__UpperCAmelCase : List[str] = ["""pixel_values"""]
def __init__( self , UpperCamelCase__ = True , UpperCamelCase__ = None , UpperCamelCase__ = PILImageResampling.BILINEAR , UpperCamelCase__ = True , UpperCamelCase__ = None , UpperCamelCase__ = True , UpperCamelCase__ = 1 / 255 , UpperCamelCase__ = True , UpperCamelCase__ = None , UpperCamelCase__ = None , **UpperCamelCase__ , ) -> Optional[int]:
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
snake_case : str = size if size is not None else {"shortest_edge": 256}
snake_case : List[str] = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ )
snake_case : List[Any] = crop_size if crop_size is not None else {"height": 224, "width": 224}
snake_case : str = get_size_dict(UpperCamelCase__ )
snake_case : int = do_resize
snake_case : List[str] = size
snake_case : int = resample
snake_case : Optional[int] = do_center_crop
snake_case : Any = crop_size
snake_case : Any = do_rescale
snake_case : Dict = rescale_factor
snake_case : Optional[int] = do_normalize
snake_case : Optional[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
snake_case : Optional[Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = PILImageResampling.BICUBIC , UpperCamelCase__ = None , **UpperCamelCase__ , ) -> Optional[Any]:
'''simple docstring'''
snake_case : Optional[Any] = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ )
if "shortest_edge" not in size:
raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
snake_case : Union[str, Any] = get_resize_output_image_size(UpperCamelCase__ , size=size["shortest_edge"] , default_to_square=UpperCamelCase__ )
return resize(UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = None , **UpperCamelCase__ , ) -> Optional[Any]:
'''simple docstring'''
snake_case : Union[str, Any] = get_size_dict(UpperCamelCase__ )
return center_crop(UpperCamelCase__ , size=(size["height"], size["width"]) , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = None , **UpperCamelCase__ ) -> Dict:
'''simple docstring'''
return rescale(UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = None , **UpperCamelCase__ , ) -> Optional[Any]:
'''simple docstring'''
return normalize(UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = ChannelDimension.FIRST , **UpperCamelCase__ , ) -> Dict:
'''simple docstring'''
snake_case : List[str] = do_resize if do_resize is not None else self.do_resize
snake_case : int = size if size is not None else self.size
snake_case : Optional[Any] = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ )
snake_case : Tuple = resample if resample is not None else self.resample
snake_case : Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case : List[Any] = crop_size if crop_size is not None else self.crop_size
snake_case : List[Any] = get_size_dict(UpperCamelCase__ )
snake_case : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
snake_case : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case : Dict = do_normalize if do_normalize is not None else self.do_normalize
snake_case : Tuple = image_mean if image_mean is not None else self.image_mean
snake_case : Any = image_std if image_std is not None else self.image_std
snake_case : str = make_list_of_images(UpperCamelCase__ )
if not valid_images(UpperCamelCase__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
snake_case : int = [to_numpy_array(UpperCamelCase__ ) for image in images]
if do_resize:
snake_case : List[str] = [self.resize(image=UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ ) for image in images]
if do_center_crop:
snake_case : Dict = [self.center_crop(image=UpperCamelCase__ , size=UpperCamelCase__ ) for image in images]
if do_rescale:
snake_case : Union[str, Any] = [self.rescale(image=UpperCamelCase__ , scale=UpperCamelCase__ ) for image in images]
if do_normalize:
snake_case : Dict = [self.normalize(image=UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ ) for image in images]
snake_case : Optional[Any] = [to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__ ) for image in images]
snake_case : Optional[int] = {"pixel_values": images}
return BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
| 716 |
"""simple docstring"""
from typing import Any
def __lowerCAmelCase ( lowercase : list , lowercase : list , lowercase : dict , lowercase : dict , lowercase : dict , ) -> list:
"""simple docstring"""
_validation(
lowercase , lowercase , lowercase , lowercase , lowercase , )
# Creates data structures and fill initial step
snake_case : dict = {}
snake_case : dict = {}
for state in states_space:
snake_case : int = observations_space[0]
snake_case : Any = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
snake_case : Union[str, Any] = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(lowercase ) ):
snake_case : Optional[Any] = observations_space[o]
snake_case : str = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
snake_case : str = ""
snake_case : List[Any] = -1
for k_state in states_space:
snake_case : Tuple = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
snake_case : Optional[Any] = probability
snake_case : int = k_state
# Update probabilities and pointers dicts
snake_case : List[str] = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
snake_case : List[Any] = arg_max
# The final observation
snake_case : Dict = observations_space[len(lowercase ) - 1]
# argmax for given final observation
snake_case : str = ""
snake_case : Optional[int] = -1
for k_state in states_space:
snake_case : int = probabilities[(k_state, final_observation)]
if probability > max_probability:
snake_case : Optional[int] = probability
snake_case : List[Any] = k_state
snake_case : str = arg_max
# Process pointers backwards
snake_case : List[str] = last_state
snake_case : Optional[int] = []
for o in range(len(lowercase ) - 1 , -1 , -1 ):
result.append(lowercase )
snake_case : List[str] = pointers[previous, observations_space[o]]
result.reverse()
return result
def __lowerCAmelCase ( lowercase : Any , lowercase : Any , lowercase : Any , lowercase : Any , lowercase : Any , ) -> None:
"""simple docstring"""
_validate_not_empty(
lowercase , lowercase , lowercase , lowercase , lowercase , )
_validate_lists(lowercase , lowercase )
_validate_dicts(
lowercase , lowercase , lowercase )
def __lowerCAmelCase ( lowercase : Any , lowercase : Any , lowercase : Any , lowercase : Any , lowercase : Any , ) -> None:
"""simple docstring"""
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError("There's an empty parameter" )
def __lowerCAmelCase ( lowercase : Any , lowercase : Any ) -> None:
"""simple docstring"""
_validate_list(lowercase , "observations_space" )
_validate_list(lowercase , "states_space" )
def __lowerCAmelCase ( lowercase : Any , lowercase : str ) -> None:
"""simple docstring"""
if not isinstance(_object , lowercase ):
snake_case : List[str] = F'{var_name} must be a list'
raise ValueError(lowercase )
else:
for x in _object:
if not isinstance(lowercase , lowercase ):
snake_case : Tuple = F'{var_name} must be a list of strings'
raise ValueError(lowercase )
def __lowerCAmelCase ( lowercase : Any , lowercase : Any , lowercase : Any , ) -> None:
"""simple docstring"""
_validate_dict(lowercase , "initial_probabilities" , lowercase )
_validate_nested_dict(lowercase , "transition_probabilities" )
_validate_nested_dict(lowercase , "emission_probabilities" )
def __lowerCAmelCase ( lowercase : Any , lowercase : str ) -> None:
"""simple docstring"""
_validate_dict(_object , lowercase , lowercase )
for x in _object.values():
_validate_dict(lowercase , lowercase , lowercase , lowercase )
def __lowerCAmelCase ( lowercase : Any , lowercase : str , lowercase : type , lowercase : bool = False ) -> None:
"""simple docstring"""
if not isinstance(_object , lowercase ):
snake_case : int = F'{var_name} must be a dict'
raise ValueError(lowercase )
if not all(isinstance(lowercase , lowercase ) for x in _object ):
snake_case : Optional[Any] = F'{var_name} all keys must be strings'
raise ValueError(lowercase )
if not all(isinstance(lowercase , lowercase ) for x in _object.values() ):
snake_case : Optional[int] = "nested dictionary " if nested else ""
snake_case : int = F'{var_name} {nested_text}all values must be {value_type.__name__}'
raise ValueError(lowercase )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 117 | 0 |
def lowerCAmelCase ( UpperCAmelCase = 100_0000 ) ->int:
"""simple docstring"""
__magic_name__ : int = 1
__magic_name__ : Union[str, Any] = 1
__magic_name__ : Union[str, Any] = {1: 1}
for inputa in range(2, UpperCAmelCase ):
__magic_name__ : int = 0
__magic_name__ : Any = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
__magic_name__ : List[str] = (3 * number) + 1
counter += 1
if inputa not in counters:
__magic_name__ : Optional[int] = counter
if counter > pre_counter:
__magic_name__ : List[Any] = inputa
__magic_name__ : List[str] = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 154 |
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
lowercase_ = logging.get_logger(__name__)
lowercase_ = TypeVar('''DatasetType''', Dataset, IterableDataset)
def lowerCAmelCase ( UpperCAmelCase, UpperCAmelCase = None, UpperCAmelCase = None, UpperCAmelCase = None, UpperCAmelCase = None, UpperCAmelCase = "first_exhausted", ) ->DatasetType:
"""simple docstring"""
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError('''Unable to interleave an empty list of datasets.''' )
for i, dataset in enumerate(UpperCAmelCase ):
if not isinstance(UpperCAmelCase, (Dataset, IterableDataset) ):
if isinstance(UpperCAmelCase, (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F'''Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '''
'''is an empty dataset dictionary.''' )
raise ValueError(
F'''Dataset at position {i} has at least one split: {list(UpperCAmelCase )}\n'''
F'''Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(UpperCAmelCase ) )}\']''' )
raise ValueError(
F'''Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(UpperCAmelCase ).__name__}.''' )
if i == 0:
__magic_name__ , __magic_name__ : Union[str, Any] = (
(Dataset, IterableDataset) if isinstance(UpperCAmelCase, UpperCAmelCase ) else (IterableDataset, Dataset)
)
elif not isinstance(UpperCAmelCase, UpperCAmelCase ):
raise ValueError(
F'''Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.''' )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(F'''{stopping_strategy} is not supported. Please enter a valid stopping_strategy.''' )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, info=UpperCAmelCase, split=UpperCAmelCase, stopping_strategy=UpperCAmelCase )
else:
return _interleave_iterable_datasets(
UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, info=UpperCAmelCase, split=UpperCAmelCase, stopping_strategy=UpperCAmelCase )
def lowerCAmelCase ( UpperCAmelCase, UpperCAmelCase = None, UpperCAmelCase = None, UpperCAmelCase = 0, ) ->DatasetType:
"""simple docstring"""
if not dsets:
raise ValueError('''Unable to concatenate an empty list of datasets.''' )
for i, dataset in enumerate(UpperCAmelCase ):
if not isinstance(UpperCAmelCase, (Dataset, IterableDataset) ):
if isinstance(UpperCAmelCase, (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F'''Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '''
'''is an empty dataset dictionary.''' )
raise ValueError(
F'''Dataset at position {i} has at least one split: {list(UpperCAmelCase )}\n'''
F'''Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(UpperCAmelCase ) )}\']''' )
raise ValueError(
F'''Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(UpperCAmelCase ).__name__}.''' )
if i == 0:
__magic_name__ , __magic_name__ : int = (
(Dataset, IterableDataset) if isinstance(UpperCAmelCase, UpperCAmelCase ) else (IterableDataset, Dataset)
)
elif not isinstance(UpperCAmelCase, UpperCAmelCase ):
raise ValueError(
F'''Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.''' )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(UpperCAmelCase, info=UpperCAmelCase, split=UpperCAmelCase, axis=UpperCAmelCase )
else:
return _concatenate_iterable_datasets(UpperCAmelCase, info=UpperCAmelCase, split=UpperCAmelCase, axis=UpperCAmelCase )
| 154 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCamelCase : List[str] = {
'configuration_nllb_moe': [
'NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP',
'NllbMoeConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : List[str] = [
'NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST',
'NllbMoeForConditionalGeneration',
'NllbMoeModel',
'NllbMoePreTrainedModel',
'NllbMoeTop2Router',
'NllbMoeSparseMLP',
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
_UpperCamelCase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 710 |
"""simple docstring"""
def snake_case (A_ :int ):
'''simple docstring'''
if upper_limit < 0:
raise ValueError('Limit for the Catalan sequence must be ≥ 0' )
a : Any = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
a : Optional[int] = 1
if upper_limit > 0:
a : List[str] = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(A_ ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print('\n********* Catalan Numbers Using Dynamic Programming ************\n')
print('\n*** Enter -1 at any time to quit ***')
print('\nEnter the upper limit (≥ 0) for the Catalan number sequence: ', end='')
try:
while True:
_UpperCamelCase : Union[str, Any] = int(input().strip())
if N < 0:
print('\n********* Goodbye!! ************')
break
else:
print(f'''The Catalan numbers from 0 through {N} are:''')
print(catalan_numbers(N))
print('Try another upper limit for the sequence: ', end='')
except (NameError, ValueError):
print('\n********* Invalid input, goodbye! ************\n')
import doctest
doctest.testmod()
| 118 | 0 |
import argparse
from argparse import Namespace
import torch
from torch import nn
from transformers import XGLMConfig, XGLMForCausalLM
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = [
"decoder.version",
"decoder.output_projection.weight",
"_float_tensor",
"decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
state_dict.pop(lowercase , lowercase )
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = emb.weight.shape
SCREAMING_SNAKE_CASE : Optional[Any] = nn.Linear(lowercase , lowercase , bias=lowercase )
SCREAMING_SNAKE_CASE : Any = emb.weight.data
return lin_layer
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = torch.load(lowercase , map_location="cpu" )
SCREAMING_SNAKE_CASE : Optional[int] = Namespace(**checkpoint["cfg"]["model"] )
SCREAMING_SNAKE_CASE : str = checkpoint["model"]
remove_ignore_keys_(lowercase )
SCREAMING_SNAKE_CASE : str = state_dict["decoder.embed_tokens.weight"].shape[0]
SCREAMING_SNAKE_CASE : Any = {key.replace("decoder" , "model" ): val for key, val in state_dict.items()}
SCREAMING_SNAKE_CASE : List[str] = XGLMConfig(
vocab_size=lowercase , max_position_embeddings=args.max_target_positions , num_layers=args.decoder_layers , attention_heads=args.decoder_attention_heads , ffn_dim=args.decoder_ffn_embed_dim , d_model=args.decoder_embed_dim , layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="gelu" , scale_embedding=not args.no_scale_embedding , tie_word_embeddings=args.share_decoder_input_output_embed , )
SCREAMING_SNAKE_CASE : Tuple = XGLMForCausalLM(lowercase )
SCREAMING_SNAKE_CASE : List[Any] = model.load_state_dict(lowercase , strict=lowercase )
print(lowercase )
SCREAMING_SNAKE_CASE : int = make_linear_from_emb(model.model.embed_tokens )
return model
if __name__ == "__main__":
snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""fairseq_path""", type=str, help="""path to a model.pt on local filesystem.""")
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
snake_case = parser.parse_args()
snake_case = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path)
model.save_pretrained(args.pytorch_dump_folder_path)
| 62 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_SCREAMING_SNAKE_CASE = {
"""configuration_maskformer""": ["""MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MaskFormerConfig"""],
"""configuration_maskformer_swin""": ["""MaskFormerSwinConfig"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ["""MaskFormerFeatureExtractor"""]
_SCREAMING_SNAKE_CASE = ["""MaskFormerImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"""MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MaskFormerForInstanceSegmentation""",
"""MaskFormerModel""",
"""MaskFormerPreTrainedModel""",
]
_SCREAMING_SNAKE_CASE = [
"""MaskFormerSwinBackbone""",
"""MaskFormerSwinModel""",
"""MaskFormerSwinPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 163 | 0 |
"""simple docstring"""
import os
import jsonlines
import numpy as np
from tqdm import tqdm
lowerCAmelCase__ = 2048
lowerCAmelCase__ = 4096
lowerCAmelCase__ = 42
lowerCAmelCase__ = os.environ.pop('''PROCESS_TRAIN''', '''false''')
lowerCAmelCase__ = {'''null''': 0, '''short''': 1, '''long''': 2, '''yes''': 3, '''no''': 4}
def snake_case_ ( A_ : Optional[int] ):
'''simple docstring'''
def choose_first(A_ : Dict, A_ : Tuple=False ):
assert isinstance(A_, A_ )
if len(A_ ) == 1:
_lowerCamelCase : List[Any] = answer[0]
return {k: [answer[k]] for k in answer} if is_long_answer else answer
for a in answer:
if is_long_answer:
_lowerCamelCase : Union[str, Any] = {k: [a[k]] for k in a}
if len(a['''start_token'''] ) > 0:
break
return a
_lowerCamelCase : List[Any] = {'''id''': example['''id''']}
_lowerCamelCase : Tuple = example['''annotations''']
_lowerCamelCase : Union[str, Any] = annotation['''yes_no_answer''']
if 0 in yes_no_answer or 1 in yes_no_answer:
_lowerCamelCase : str = ['''yes'''] if 1 in yes_no_answer else ['''no''']
_lowerCamelCase : List[Any] = []
_lowerCamelCase : str = []
_lowerCamelCase : str = ['''<cls>''']
else:
_lowerCamelCase : Optional[Any] = ['''short''']
_lowerCamelCase : Optional[int] = choose_first(annotation['''short_answers'''] )
if len(out['''start_token'''] ) == 0:
# answer will be long if short is not available
_lowerCamelCase : List[Any] = ['''long''']
_lowerCamelCase : List[str] = choose_first(annotation['''long_answer'''], is_long_answer=A_ )
_lowerCamelCase : Any = []
answer.update(A_ )
# disregard some samples
if len(answer['''start_token'''] ) > 1 or answer["start_token"] == answer["end_token"]:
_lowerCamelCase : List[str] = True
else:
_lowerCamelCase : List[str] = False
_lowerCamelCase : int = ['''start_token''', '''end_token''', '''start_byte''', '''end_byte''', '''text''']
if not all(isinstance(answer[k], A_ ) for k in cols ):
raise ValueError('''Issue in ID''', example['''id'''] )
return answer
def snake_case_ ( A_ : Dict, A_ : Dict=False ):
'''simple docstring'''
_lowerCamelCase : int = _get_single_answer(A_ )
# bytes are of no use
del answer["start_byte"]
del answer["end_byte"]
# handle yes_no answers explicitly
if answer["category"][0] in ["yes", "no"]: # category is list with one element
_lowerCamelCase : int = example['''document''']['''tokens''']
_lowerCamelCase : int = []
for i in range(len(doc['''token'''] ) ):
if not doc["is_html"][i]:
context.append(doc['''token'''][i] )
return {
"context": " ".join(A_ ),
"answer": {
"start_token": -1_00, # ignore index in cross-entropy
"end_token": -1_00, # ignore index in cross-entropy
"category": answer["category"],
"span": answer["category"], # extra
},
}
# later, help in removing all no answers
if answer["start_token"] == [-1]:
return {
"context": "None",
"answer": {
"start_token": -1,
"end_token": -1,
"category": "null",
"span": "None", # extra
},
}
# handling normal samples
_lowerCamelCase : Tuple = ['''start_token''', '''end_token''']
answer.update({k: answer[k][0] if len(answer[k] ) > 0 else answer[k] for k in cols} ) # e.g. [10] == 10
_lowerCamelCase : Tuple = example['''document''']['''tokens''']
_lowerCamelCase : str = answer['''start_token''']
_lowerCamelCase : List[str] = answer['''end_token''']
_lowerCamelCase : str = []
for i in range(len(doc['''token'''] ) ):
if not doc["is_html"][i]:
context.append(doc['''token'''][i] )
else:
if answer["start_token"] > i:
start_token -= 1
if answer["end_token"] > i:
end_token -= 1
_lowerCamelCase : Any = ''' '''.join(context[start_token:end_token] )
# checking above code
if assertion:
_lowerCamelCase : Optional[Any] = doc['''is_html'''][answer['''start_token'''] : answer['''end_token''']]
_lowerCamelCase : Optional[Any] = doc['''token'''][answer['''start_token'''] : answer['''end_token''']]
_lowerCamelCase : Optional[Any] = ''' '''.join([old[i] for i in range(len(A_ ) ) if not is_html[i]] )
if new != old:
print('''ID:''', example['''id'''] )
print('''New:''', A_, end='''\n''' )
print('''Old:''', A_, end='''\n\n''' )
return {
"context": " ".join(A_ ),
"answer": {
"start_token": start_token,
"end_token": end_token - 1, # this makes it inclusive
"category": answer["category"], # either long or short
"span": new, # extra
},
}
def snake_case_ ( A_ : Any, A_ : List[Any], A_ : str=20_48, A_ : Any=40_96, A_ : Optional[int]=True ):
'''simple docstring'''
_lowerCamelCase : Any = get_context_and_ans(A_, assertion=A_ )
_lowerCamelCase : str = out['''answer''']
# later, removing these samples
if answer["start_token"] == -1:
return {
"example_id": example["id"],
"input_ids": [[-1]],
"labels": {
"start_token": [-1],
"end_token": [-1],
"category": ["null"],
},
}
_lowerCamelCase : Optional[Any] = tokenizer(example['''question''']['''text'''], out['''context'''] ).input_ids
_lowerCamelCase : Dict = input_ids.index(tokenizer.sep_token_id ) + 1
# return yes/no
if answer["category"][0] in ["yes", "no"]: # category is list with one element
_lowerCamelCase : List[Any] = []
_lowerCamelCase : Optional[int] = []
_lowerCamelCase : Any = input_ids[:q_len]
_lowerCamelCase : Tuple = range(A_, len(A_ ), max_length - doc_stride )
for i in doc_start_indices:
_lowerCamelCase : str = i + max_length - q_len
_lowerCamelCase : Tuple = input_ids[i:end_index]
inputs.append(q_indices + slice )
category.append(answer['''category'''][0] )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": [-1_00] * len(A_ ),
"end_token": [-1_00] * len(A_ ),
"category": category,
},
}
_lowerCamelCase : Optional[Any] = out['''context'''].split()
_lowerCamelCase : List[str] = splitted_context[answer['''end_token''']]
_lowerCamelCase : Any = len(
tokenizer(
''' '''.join(splitted_context[: answer['''start_token''']] ), add_special_tokens=A_, ).input_ids )
_lowerCamelCase : Dict = len(
tokenizer(''' '''.join(splitted_context[: answer['''end_token''']] ), add_special_tokens=A_ ).input_ids )
answer["start_token"] += q_len
answer["end_token"] += q_len
# fixing end token
_lowerCamelCase : Tuple = len(tokenizer(A_, add_special_tokens=A_ ).input_ids )
if num_sub_tokens > 1:
answer["end_token"] += num_sub_tokens - 1
_lowerCamelCase : str = input_ids[answer['''start_token'''] : answer['''end_token'''] + 1] # right & left are inclusive
_lowerCamelCase : str = answer['''start_token''']
_lowerCamelCase : Optional[int] = answer['''end_token''']
if assertion:
_lowerCamelCase : Dict = tokenizer.decode(A_ )
if answer["span"] != new:
print('''ISSUE IN TOKENIZATION''' )
print('''OLD:''', answer['''span'''] )
print('''NEW:''', A_, end='''\n\n''' )
if len(A_ ) <= max_length:
return {
"example_id": example["id"],
"input_ids": [input_ids],
"labels": {
"start_token": [answer["start_token"]],
"end_token": [answer["end_token"]],
"category": answer["category"],
},
}
_lowerCamelCase : Tuple = input_ids[:q_len]
_lowerCamelCase : Optional[Any] = range(A_, len(A_ ), max_length - doc_stride )
_lowerCamelCase : Tuple = []
_lowerCamelCase : Dict = []
_lowerCamelCase : List[str] = []
_lowerCamelCase : List[str] = [] # null, yes, no, long, short
for i in doc_start_indices:
_lowerCamelCase : List[str] = i + max_length - q_len
_lowerCamelCase : Union[str, Any] = input_ids[i:end_index]
inputs.append(q_indices + slice )
assert len(inputs[-1] ) <= max_length, "Issue in truncating length"
if start_token >= i and end_token <= end_index - 1:
_lowerCamelCase : Tuple = start_token - i + q_len
_lowerCamelCase : Optional[Any] = end_token - i + q_len
answers_category.append(answer['''category'''][0] ) # ["short"] -> "short"
else:
_lowerCamelCase : Optional[int] = -1_00
_lowerCamelCase : Union[str, Any] = -1_00
answers_category.append('''null''' )
_lowerCamelCase : List[Any] = inputs[-1][start_token : end_token + 1]
answers_start_token.append(A_ )
answers_end_token.append(A_ )
if assertion:
if new != old and new != [tokenizer.cls_token_id]:
print('''ISSUE in strided for ID:''', example['''id'''] )
print('''New:''', tokenizer.decode(A_ ) )
print('''Old:''', tokenizer.decode(A_ ), end='''\n\n''' )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": answers_start_token,
"end_token": answers_end_token,
"category": answers_category,
},
}
def snake_case_ ( A_ : Tuple, A_ : str, A_ : Tuple=20_48, A_ : Union[str, Any]=40_96, A_ : Optional[Any]=False ):
'''simple docstring'''
_lowerCamelCase : List[Any] = get_strided_contexts_and_ans(
A_, A_, doc_stride=A_, max_length=A_, assertion=A_, )
return example
def snake_case_ ( A_ : Union[str, Any], A_ : str ):
'''simple docstring'''
with jsonlines.open(A_, '''a''' ) as writer:
for example in tqdm(A_, total=len(A_ ), desc='''Saving samples ... ''' ):
_lowerCamelCase : str = example['''labels''']
for ids, start, end, cat in zip(
example['''input_ids'''], labels['''start_token'''], labels['''end_token'''], labels['''category'''], ):
if start == -1 and end == -1:
continue # leave waste samples with no answer
if cat == "null" and np.random.rand() < 0.6:
continue # removing 50 % samples
writer.write(
{
'''input_ids''': ids,
'''start_token''': start,
'''end_token''': end,
'''category''': CATEGORY_MAPPING[cat],
} )
if __name__ == "__main__":
from datasets import load_dataset
from transformers import BigBirdTokenizer
lowerCAmelCase__ = load_dataset('''natural_questions''')
lowerCAmelCase__ = BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''')
lowerCAmelCase__ = data['''train''' if PROCESS_TRAIN == '''true''' else '''validation''']
lowerCAmelCase__ = {
'''tokenizer''': tokenizer,
'''doc_stride''': DOC_STRIDE,
'''max_length''': MAX_LENGTH,
'''assertion''': False,
}
lowerCAmelCase__ = data.map(prepare_inputs, fn_kwargs=fn_kwargs)
lowerCAmelCase__ = data.remove_columns(['''annotations''', '''document''', '''id''', '''question'''])
print(data)
np.random.seed(SEED)
lowerCAmelCase__ = '''nq-training.jsonl''' if PROCESS_TRAIN == '''true''' else '''nq-validation.jsonl'''
save_to_disk(data, file_name=cache_file_name)
| 598 |
"""simple docstring"""
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
lowerCAmelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCAmelCase__ = '''
Examples:
```py
>>> import torch
>>> import numpy as np
>>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline
>>> from transformers import pipeline
>>> from diffusers.utils import load_image
>>> def make_hint(image, depth_estimator):
... image = depth_estimator(image)["depth"]
... image = np.array(image)
... image = image[:, :, None]
... image = np.concatenate([image, image, image], axis=2)
... detected_map = torch.from_numpy(image).float() / 255.0
... hint = detected_map.permute(2, 0, 1)
... return hint
>>> depth_estimator = pipeline("depth-estimation")
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16
... )
>>> pipe_prior = pipe_prior.to("cuda")
>>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(
... "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16
... )
>>> pipe = pipe.to("cuda")
>>> img = load_image(
... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
... "/kandinsky/cat.png"
... ).resize((768, 768))
>>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to("cuda")
>>> prompt = "A robot, 4k photo"
>>> negative_prior_prompt = "lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature"
>>> generator = torch.Generator(device="cuda").manual_seed(43)
>>> image_emb, zero_image_emb = pipe_prior(
... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator
... ).to_tuple()
>>> images = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... hint=hint,
... num_inference_steps=50,
... generator=generator,
... height=768,
... width=768,
... ).images
>>> images[0].save("robot_cat.png")
```
'''
def snake_case_ ( A_ : Optional[int], A_ : int, A_ : int=8 ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
_lowerCamelCase : Dict = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class __snake_case ( _lowercase):
def __init__( self : List[str] , __lowerCAmelCase : UNetaDConditionModel , __lowerCAmelCase : DDPMScheduler , __lowerCAmelCase : VQModel , ):
"""simple docstring"""
super().__init__()
self.register_modules(
unet=__lowerCAmelCase , scheduler=__lowerCAmelCase , movq=__lowerCAmelCase , )
_lowerCamelCase : str = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def SCREAMING_SNAKE_CASE ( self : List[str] , __lowerCAmelCase : int , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[Any] ):
"""simple docstring"""
if latents is None:
_lowerCamelCase : Optional[int] = randn_tensor(__lowerCAmelCase , generator=__lowerCAmelCase , device=__lowerCAmelCase , dtype=__lowerCAmelCase )
else:
if latents.shape != shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
_lowerCamelCase : Any = latents.to(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = latents * scheduler.init_noise_sigma
return latents
def SCREAMING_SNAKE_CASE ( self : Tuple , __lowerCAmelCase : Any=0 ):
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
_lowerCamelCase : Tuple = torch.device(f'''cuda:{gpu_id}''' )
_lowerCamelCase : Optional[int] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__lowerCAmelCase , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __lowerCAmelCase : Tuple=0 ):
"""simple docstring"""
if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0''' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' )
_lowerCamelCase : Optional[int] = torch.device(f'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to('''cpu''' , silence_dtype_warnings=__lowerCAmelCase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
_lowerCamelCase : str = None
for cpu_offloaded_model in [self.unet, self.movq]:
_lowerCamelCase , _lowerCamelCase : List[str] = cpu_offload_with_hook(__lowerCAmelCase , __lowerCAmelCase , prev_module_hook=__lowerCAmelCase )
# We'll offload the last model manually.
_lowerCamelCase : Dict = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
if not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(__lowerCAmelCase , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(__lowerCAmelCase )
def __call__( self : Optional[int] , __lowerCAmelCase : Union[torch.FloatTensor, List[torch.FloatTensor]] , __lowerCAmelCase : Union[torch.FloatTensor, List[torch.FloatTensor]] , __lowerCAmelCase : torch.FloatTensor , __lowerCAmelCase : int = 5_1_2 , __lowerCAmelCase : int = 5_1_2 , __lowerCAmelCase : int = 1_0_0 , __lowerCAmelCase : float = 4.0 , __lowerCAmelCase : int = 1 , __lowerCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __lowerCAmelCase : Optional[torch.FloatTensor] = None , __lowerCAmelCase : Optional[str] = "pil" , __lowerCAmelCase : bool = True , ):
"""simple docstring"""
_lowerCamelCase : int = self._execution_device
_lowerCamelCase : List[Any] = guidance_scale > 1.0
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
_lowerCamelCase : List[Any] = torch.cat(__lowerCAmelCase , dim=0 )
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
_lowerCamelCase : Dict = torch.cat(__lowerCAmelCase , dim=0 )
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
_lowerCamelCase : List[str] = torch.cat(__lowerCAmelCase , dim=0 )
_lowerCamelCase : Union[str, Any] = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
_lowerCamelCase : List[str] = image_embeds.repeat_interleave(__lowerCAmelCase , dim=0 )
_lowerCamelCase : Union[str, Any] = negative_image_embeds.repeat_interleave(__lowerCAmelCase , dim=0 )
_lowerCamelCase : int = hint.repeat_interleave(__lowerCAmelCase , dim=0 )
_lowerCamelCase : Dict = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=__lowerCAmelCase )
_lowerCamelCase : List[str] = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=__lowerCAmelCase )
self.scheduler.set_timesteps(__lowerCAmelCase , device=__lowerCAmelCase )
_lowerCamelCase : List[Any] = self.scheduler.timesteps
_lowerCamelCase : Tuple = self.movq.config.latent_channels
_lowerCamelCase , _lowerCamelCase : List[Any] = downscale_height_and_width(__lowerCAmelCase , __lowerCAmelCase , self.movq_scale_factor )
# create initial latent
_lowerCamelCase : Optional[int] = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , self.scheduler , )
for i, t in enumerate(self.progress_bar(__lowerCAmelCase ) ):
# expand the latents if we are doing classifier free guidance
_lowerCamelCase : List[str] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_lowerCamelCase : int = {'''image_embeds''': image_embeds, '''hint''': hint}
_lowerCamelCase : List[str] = self.unet(
sample=__lowerCAmelCase , timestep=__lowerCAmelCase , encoder_hidden_states=__lowerCAmelCase , added_cond_kwargs=__lowerCAmelCase , return_dict=__lowerCAmelCase , )[0]
if do_classifier_free_guidance:
_lowerCamelCase , _lowerCamelCase : str = noise_pred.split(latents.shape[1] , dim=1 )
_lowerCamelCase , _lowerCamelCase : Optional[Any] = noise_pred.chunk(2 )
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = variance_pred.chunk(2 )
_lowerCamelCase : int = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
_lowerCamelCase : int = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , '''variance_type''' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
_lowerCamelCase , _lowerCamelCase : Any = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
_lowerCamelCase : Any = self.scheduler.step(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , generator=__lowerCAmelCase , )[0]
# post-processing
_lowerCamelCase : Optional[int] = self.movq.decode(__lowerCAmelCase , force_not_quantize=__lowerCAmelCase )['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
_lowerCamelCase : Union[str, Any] = image * 0.5 + 0.5
_lowerCamelCase : List[Any] = image.clamp(0 , 1 )
_lowerCamelCase : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
_lowerCamelCase : Union[str, Any] = self.numpy_to_pil(__lowerCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__lowerCAmelCase )
| 598 | 1 |
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCamelCase ( self ):
lowercase : int = Vector([1, 2, 3] )
self.assertEqual(x.component(0 ) , 1 )
self.assertEqual(x.component(2 ) , 3 )
lowercase : Optional[int] = Vector()
def __lowerCamelCase ( self ):
lowercase : List[Any] = Vector([0, 0, 0, 0, 0, 1] )
self.assertEqual(str(SCREAMING_SNAKE_CASE__ ) , '''(0,0,0,0,0,1)''' )
def __lowerCamelCase ( self ):
lowercase : List[Any] = Vector([1, 2, 3, 4] )
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , 4 )
def __lowerCamelCase ( self ):
lowercase : Optional[Any] = Vector([1, 2] )
lowercase : Optional[int] = Vector([1, 2, 3, 4, 5] )
lowercase : Optional[Any] = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] )
lowercase : Any = Vector([1, -1, 1, -1, 2, -3, 4, -5] )
self.assertAlmostEqual(x.euclidean_length() , 2.236 , 3 )
self.assertAlmostEqual(y.euclidean_length() , 7.416 , 3 )
self.assertEqual(z.euclidean_length() , 0 )
self.assertAlmostEqual(w.euclidean_length() , 7.616 , 3 )
def __lowerCamelCase ( self ):
lowercase : str = Vector([1, 2, 3] )
lowercase : int = Vector([1, 1, 1] )
self.assertEqual((x + y).component(0 ) , 2 )
self.assertEqual((x + y).component(1 ) , 3 )
self.assertEqual((x + y).component(2 ) , 4 )
def __lowerCamelCase ( self ):
lowercase : List[str] = Vector([1, 2, 3] )
lowercase : List[Any] = Vector([1, 1, 1] )
self.assertEqual((x - y).component(0 ) , 0 )
self.assertEqual((x - y).component(1 ) , 1 )
self.assertEqual((x - y).component(2 ) , 2 )
def __lowerCamelCase ( self ):
lowercase : Any = Vector([1, 2, 3] )
lowercase : List[str] = Vector([2, -1, 4] ) # for test of dot product
lowercase : int = Vector([1, -2, -1] )
self.assertEqual(str(x * 3.0 ) , '''(3.0,6.0,9.0)''' )
self.assertEqual((a * b) , 0 )
def __lowerCamelCase ( self ):
self.assertEqual(str(zero_vector(10 ) ).count('''0''' ) , 10 )
def __lowerCamelCase ( self ):
self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , '''(0,1,0)''' )
def __lowerCamelCase ( self ):
lowercase : Any = Vector([1, 2, 3] )
lowercase : Tuple = Vector([1, 0, 1] )
self.assertEqual(str(axpy(2 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) , '''(3,4,7)''' )
def __lowerCamelCase ( self ):
lowercase : Optional[Any] = Vector([1, 0, 0, 0, 0, 0] )
lowercase : List[str] = x.copy()
self.assertEqual(str(SCREAMING_SNAKE_CASE__ ) , str(SCREAMING_SNAKE_CASE__ ) )
def __lowerCamelCase ( self ):
lowercase : Union[str, Any] = Vector([1, 0, 0] )
x.change_component(0 , 0 )
x.change_component(1 , 1 )
self.assertEqual(str(SCREAMING_SNAKE_CASE__ ) , '''(0,1,0)''' )
def __lowerCamelCase ( self ):
lowercase : Tuple = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual('''|1,2,3|\n|2,4,5|\n|6,7,8|\n''' , str(SCREAMING_SNAKE_CASE__ ) )
def __lowerCamelCase ( self ):
lowercase : Any = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
lowercase : Optional[int] = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(minors[x][y] , a.minor(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
def __lowerCamelCase ( self ):
lowercase : int = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
lowercase : List[Any] = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(cofactors[x][y] , a.cofactor(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
def __lowerCamelCase ( self ):
lowercase : Any = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(-5 , a.determinant() )
def __lowerCamelCase ( self ):
lowercase : int = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 )
lowercase : Any = Vector([1, 2, 3] )
self.assertEqual('''(14,32,50)''' , str(a * x ) )
self.assertEqual('''|2,4,6|\n|8,10,12|\n|14,16,18|\n''' , str(a * 2 ) )
def __lowerCamelCase ( self ):
lowercase : List[str] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a.change_component(0 , 2 , 5 )
self.assertEqual('''|1,2,5|\n|2,4,5|\n|6,7,8|\n''' , str(SCREAMING_SNAKE_CASE__ ) )
def __lowerCamelCase ( self ):
lowercase : Dict = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(7 , a.component(2 , 1 ) , 0.01 )
def __lowerCamelCase ( self ):
lowercase : Any = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
lowercase : str = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual('''|2,4,10|\n|4,8,10|\n|12,14,18|\n''' , str(a + b ) )
def __lowerCamelCase ( self ):
lowercase : List[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
lowercase : int = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual('''|0,0,-4|\n|0,0,0|\n|0,0,-2|\n''' , str(a - b ) )
def __lowerCamelCase ( self ):
self.assertEqual(
'''|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n''' , str(square_zero_matrix(5 ) ) , )
if __name__ == "__main__":
unittest.main()
| 319 |
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class __SCREAMING_SNAKE_CASE :
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=14 , SCREAMING_SNAKE_CASE__=7 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=99 , SCREAMING_SNAKE_CASE__=32 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=37 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=512 , SCREAMING_SNAKE_CASE__=0.02 , ):
lowercase : Any = parent
lowercase : Any = batch_size
lowercase : Union[str, Any] = seq_length
lowercase : Dict = is_training
lowercase : List[Any] = use_input_mask
lowercase : Optional[int] = use_token_type_ids
lowercase : Union[str, Any] = use_labels
lowercase : Any = vocab_size
lowercase : List[Any] = hidden_size
lowercase : str = rotary_dim
lowercase : Tuple = num_hidden_layers
lowercase : Dict = num_attention_heads
lowercase : Optional[int] = intermediate_size
lowercase : Optional[int] = hidden_act
lowercase : List[str] = hidden_dropout_prob
lowercase : Any = attention_probs_dropout_prob
lowercase : Union[str, Any] = max_position_embeddings
lowercase : List[Any] = initializer_range
lowercase : str = None
lowercase : Dict = vocab_size - 1
lowercase : List[Any] = vocab_size - 1
lowercase : Optional[Any] = vocab_size - 1
def __lowerCamelCase ( self ):
lowercase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : List[Any] = None
if self.use_input_mask:
lowercase : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowercase : Union[str, Any] = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=SCREAMING_SNAKE_CASE__ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def __lowerCamelCase ( self ):
lowercase : str = self.prepare_config_and_inputs()
lowercase , lowercase , lowercase : int = config_and_inputs
lowercase : List[str] = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : Union[str, Any] = 20
lowercase : Optional[int] = model_class_name(SCREAMING_SNAKE_CASE__ )
lowercase : int = model.init_cache(input_ids.shape[0] , SCREAMING_SNAKE_CASE__ )
lowercase : Union[str, Any] = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
lowercase : Union[str, Any] = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
lowercase : str = model(
input_ids[:, :-1] , attention_mask=SCREAMING_SNAKE_CASE__ , past_key_values=SCREAMING_SNAKE_CASE__ , position_ids=SCREAMING_SNAKE_CASE__ , )
lowercase : Dict = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='''i4''' )
lowercase : int = model(
input_ids[:, -1:] , attention_mask=SCREAMING_SNAKE_CASE__ , past_key_values=outputs_cache.past_key_values , position_ids=SCREAMING_SNAKE_CASE__ , )
lowercase : Tuple = model(SCREAMING_SNAKE_CASE__ )
lowercase : Optional[Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"""Max diff is {diff}""" )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : str = 20
lowercase : List[Any] = model_class_name(SCREAMING_SNAKE_CASE__ )
lowercase : Any = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
lowercase : Optional[Any] = model.init_cache(input_ids.shape[0] , SCREAMING_SNAKE_CASE__ )
lowercase : int = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
lowercase : List[Any] = model(
input_ids[:, :-1] , attention_mask=SCREAMING_SNAKE_CASE__ , past_key_values=SCREAMING_SNAKE_CASE__ , position_ids=SCREAMING_SNAKE_CASE__ , )
lowercase : Optional[int] = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='''i4''' )
lowercase : str = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=SCREAMING_SNAKE_CASE__ , position_ids=SCREAMING_SNAKE_CASE__ , )
lowercase : Union[str, Any] = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ )
lowercase : List[str] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"""Max diff is {diff}""" )
@require_flax
class __SCREAMING_SNAKE_CASE ( A__ , A__ , unittest.TestCase ):
A : Optional[Any] = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
A : Union[str, Any] = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def __lowerCamelCase ( self ):
lowercase : List[Any] = FlaxGPTJModelTester(self )
def __lowerCamelCase ( self ):
for model_class_name in self.all_model_classes:
lowercase , lowercase , lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self ):
for model_class_name in self.all_model_classes:
lowercase , lowercase , lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@tooslow
def __lowerCamelCase ( self ):
lowercase : str = GPTaTokenizer.from_pretrained('''gpt2''' , pad_token='''<|endoftext|>''' , padding_side='''left''' )
lowercase : int = tokenizer(['''Hello this is a long string''', '''Hey'''] , return_tensors='''np''' , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ )
lowercase : List[Any] = FlaxGPTJForCausalLM.from_pretrained('''EleutherAI/gpt-j-6B''' )
lowercase : Tuple = False
lowercase : List[Any] = model.config.eos_token_id
lowercase : Optional[int] = jax.jit(model.generate )
lowercase : Optional[Any] = jit_generate(
inputs['''input_ids'''] , attention_mask=inputs['''attention_mask'''] , pad_token_id=tokenizer.pad_token_id ).sequences
lowercase : Union[str, Any] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE__ , skip_special_tokens=SCREAMING_SNAKE_CASE__ )
lowercase : Dict = [
'''Hello this is a long string of text.\n\nI\'m trying to get the text of the''',
'''Hey, I\'m a little late to the party. I\'m going to''',
]
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@is_pt_flax_cross_test
def __lowerCamelCase ( self ):
lowercase , lowercase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
lowercase : Union[str, Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase : Optional[int] = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
lowercase : List[Any] = model_class.__name__[4:] # Skip the "Flax" at the beginning
lowercase : List[str] = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase , lowercase : Union[str, Any] = pt_inputs['''input_ids'''].shape
lowercase : int = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(SCREAMING_SNAKE_CASE__ ):
lowercase : int = 0
lowercase : List[str] = 1
lowercase : int = 0
lowercase : Optional[Any] = 1
lowercase : Dict = pt_model_class(SCREAMING_SNAKE_CASE__ ).eval()
lowercase : int = model_class(SCREAMING_SNAKE_CASE__ , dtype=jnp.floataa )
lowercase : List[str] = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , SCREAMING_SNAKE_CASE__ )
lowercase : Union[str, Any] = fx_state
with torch.no_grad():
lowercase : Optional[int] = pt_model(**SCREAMING_SNAKE_CASE__ ).to_tuple()
lowercase : Union[str, Any] = fx_model(**SCREAMING_SNAKE_CASE__ ).to_tuple()
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , len(SCREAMING_SNAKE_CASE__ ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(SCREAMING_SNAKE_CASE__ )
lowercase : List[str] = model_class.from_pretrained(SCREAMING_SNAKE_CASE__ , from_pt=SCREAMING_SNAKE_CASE__ )
lowercase : Union[str, Any] = fx_model_loaded(**SCREAMING_SNAKE_CASE__ ).to_tuple()
self.assertEqual(
len(SCREAMING_SNAKE_CASE__ ) , len(SCREAMING_SNAKE_CASE__ ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output_loaded, pt_output in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@is_pt_flax_cross_test
def __lowerCamelCase ( self ):
lowercase , lowercase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
lowercase : Optional[Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase : Optional[Any] = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
lowercase : Any = model_class.__name__[4:] # Skip the "Flax" at the beginning
lowercase : Any = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase : Dict = pt_model_class(SCREAMING_SNAKE_CASE__ ).eval()
lowercase : Tuple = model_class(SCREAMING_SNAKE_CASE__ , dtype=jnp.floataa )
lowercase : List[Any] = load_flax_weights_in_pytorch_model(SCREAMING_SNAKE_CASE__ , fx_model.params )
lowercase , lowercase : Optional[int] = pt_inputs['''input_ids'''].shape
lowercase : str = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(SCREAMING_SNAKE_CASE__ ):
lowercase : Dict = 0
lowercase : List[Any] = 1
lowercase : str = 0
lowercase : Tuple = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
lowercase : List[Any] = pt_model(**SCREAMING_SNAKE_CASE__ ).to_tuple()
lowercase : Optional[Any] = fx_model(**SCREAMING_SNAKE_CASE__ ).to_tuple()
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , len(SCREAMING_SNAKE_CASE__ ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(SCREAMING_SNAKE_CASE__ )
lowercase : int = pt_model_class.from_pretrained(SCREAMING_SNAKE_CASE__ , from_flax=SCREAMING_SNAKE_CASE__ )
with torch.no_grad():
lowercase : Dict = pt_model_loaded(**SCREAMING_SNAKE_CASE__ ).to_tuple()
self.assertEqual(
len(SCREAMING_SNAKE_CASE__ ) , len(SCREAMING_SNAKE_CASE__ ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@tooslow
def __lowerCamelCase ( self ):
for model_class_name in self.all_model_classes:
lowercase : Dict = model_class_name.from_pretrained('''EleutherAI/gpt-j-6B''' )
lowercase : str = model(np.ones((1, 1) ) )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
| 319 | 1 |
from random import randint
from tempfile import TemporaryFile
import numpy as np
def _snake_case ( lowerCAmelCase : int , lowerCAmelCase : str , lowerCAmelCase : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = 0
if start < end:
SCREAMING_SNAKE_CASE_ : str = randint(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Any = a[end]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = a[pivot]
SCREAMING_SNAKE_CASE_ : List[str] = temp
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = _in_place_partition(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
count += _in_place_quick_sort(lowerCAmelCase__ , lowerCAmelCase__ , p - 1 )
count += _in_place_quick_sort(lowerCAmelCase__ , p + 1 , lowerCAmelCase__ )
return count
def _snake_case ( lowerCAmelCase : int , lowerCAmelCase : List[Any] , lowerCAmelCase : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = 0
SCREAMING_SNAKE_CASE_ : int = randint(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Dict = a[end]
SCREAMING_SNAKE_CASE_ : int = a[pivot]
SCREAMING_SNAKE_CASE_ : int = temp
SCREAMING_SNAKE_CASE_ : List[str] = start - 1
for index in range(lowerCAmelCase__ , lowerCAmelCase__ ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
SCREAMING_SNAKE_CASE_ : Any = new_pivot_index + 1
SCREAMING_SNAKE_CASE_ : Dict = a[new_pivot_index]
SCREAMING_SNAKE_CASE_ : List[str] = a[index]
SCREAMING_SNAKE_CASE_ : Tuple = temp
SCREAMING_SNAKE_CASE_ : Dict = a[new_pivot_index + 1]
SCREAMING_SNAKE_CASE_ : Any = a[end]
SCREAMING_SNAKE_CASE_ : Optional[Any] = temp
return new_pivot_index + 1, count
__lowerCamelCase : List[str] = TemporaryFile()
__lowerCamelCase : Any = 1_00 # 1000 elements are to be sorted
__lowerCamelCase , __lowerCamelCase : int = 0, 1 # mean and standard deviation
__lowerCamelCase : List[str] = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print('''The array is''')
print(X)
outfile.seek(0) # using the same array
__lowerCamelCase : str = np.load(outfile)
__lowerCamelCase : Tuple = len(M) - 1
__lowerCamelCase : Optional[int] = _in_place_quick_sort(M, 0, r)
print(
'''No of Comparisons for 100 elements selected from a standard normal distribution'''
'''is :'''
)
print(z)
| 714 | import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
__lowerCamelCase : Optional[int] = '''pt'''
elif is_tf_available():
__lowerCamelCase : str = '''tf'''
else:
__lowerCamelCase : int = '''jax'''
class a__ ( A__ , unittest.TestCase ):
A = PerceiverTokenizer
A = False
def __UpperCamelCase ( self : str ):
"""simple docstring"""
super().setUp()
SCREAMING_SNAKE_CASE_ : List[Any] = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __UpperCamelCase ( self : Any ):
"""simple docstring"""
return PerceiverTokenizer.from_pretrained("deepmind/language-perceiver" )
def __UpperCamelCase ( self : Optional[int],**_A : List[Any] ):
"""simple docstring"""
return self.tokenizer_class.from_pretrained(self.tmpdirname,**_A )
def __UpperCamelCase ( self : List[Any],_A : Optional[Any],_A : str=False,_A : Tuple=20,_A : Tuple=5 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = []
for i in range(len(_A ) ):
try:
SCREAMING_SNAKE_CASE_ : Tuple = tokenizer.decode([i],clean_up_tokenization_spaces=_A )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
SCREAMING_SNAKE_CASE_ : int = list(filter(lambda _A : re.match(R"^[ a-zA-Z]+$",t[1] ),_A ) )
SCREAMING_SNAKE_CASE_ : Dict = list(filter(lambda _A : [t[0]] == tokenizer.encode(t[1],add_special_tokens=_A ),_A ) )
if max_length is not None and len(_A ) > max_length:
SCREAMING_SNAKE_CASE_ : List[str] = toks[:max_length]
if min_length is not None and len(_A ) < min_length and len(_A ) > 0:
while len(_A ) < min_length:
SCREAMING_SNAKE_CASE_ : int = toks + toks
# toks_str = [t[1] for t in toks]
SCREAMING_SNAKE_CASE_ : List[Any] = [t[0] for t in toks]
# Ensure consistency
SCREAMING_SNAKE_CASE_ : str = tokenizer.decode(_A,clean_up_tokenization_spaces=_A )
if " " not in output_txt and len(_A ) > 1:
SCREAMING_SNAKE_CASE_ : Dict = (
tokenizer.decode([toks_ids[0]],clean_up_tokenization_spaces=_A )
+ " "
+ tokenizer.decode(toks_ids[1:],clean_up_tokenization_spaces=_A )
)
if with_prefix_space:
SCREAMING_SNAKE_CASE_ : str = " " + output_txt
SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer.encode(_A,add_special_tokens=_A )
return output_txt, output_ids
def __UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = self.perceiver_tokenizer
SCREAMING_SNAKE_CASE_ : Union[str, Any] = "Unicode €."
SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer(_A )
SCREAMING_SNAKE_CASE_ : int = [4, 91, 116, 111, 105, 117, 106, 107, 38, 232, 136, 178, 52, 5]
self.assertEqual(encoded["input_ids"],_A )
# decoding
SCREAMING_SNAKE_CASE_ : str = tokenizer.decode(_A )
self.assertEqual(_A,"[CLS]Unicode €.[SEP]" )
SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenizer("e è é ê ë" )
SCREAMING_SNAKE_CASE_ : Tuple = [4, 107, 38, 201, 174, 38, 201, 175, 38, 201, 176, 38, 201, 177, 5]
self.assertEqual(encoded["input_ids"],_A )
# decoding
SCREAMING_SNAKE_CASE_ : Tuple = tokenizer.decode(_A )
self.assertEqual(_A,"[CLS]e è é ê ë[SEP]" )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode("e è é ê ë" ) ),"[CLS]e è é ê ë[SEP]" )
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.perceiver_tokenizer
SCREAMING_SNAKE_CASE_ : List[str] = ["A long paragraph for summarization.", "Another paragraph for summarization."]
# fmt: off
SCREAMING_SNAKE_CASE_ : Optional[Any] = [4, 71, 38, 114, 117, 116, 109, 38, 118, 103, 120, 103, 109, 120, 103, 118, 110, 38, 108, 117, 120, 38, 121, 123, 115, 115, 103, 120, 111, 128, 103, 122, 111, 117, 116, 52, 5, 0]
# fmt: on
SCREAMING_SNAKE_CASE_ : str = tokenizer(_A,padding=_A,return_tensors=_A )
self.assertIsInstance(_A,_A )
if FRAMEWORK != "jax":
SCREAMING_SNAKE_CASE_ : Union[str, Any] = list(batch.input_ids.numpy()[0] )
else:
SCREAMING_SNAKE_CASE_ : Optional[int] = list(batch.input_ids.tolist()[0] )
self.assertListEqual(_A,_A )
self.assertEqual((2, 38),batch.input_ids.shape )
self.assertEqual((2, 38),batch.attention_mask.shape )
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = self.perceiver_tokenizer
SCREAMING_SNAKE_CASE_ : Any = ["A long paragraph for summarization.", "Another paragraph for summarization."]
SCREAMING_SNAKE_CASE_ : List[str] = tokenizer(_A,padding=_A,return_tensors=_A )
# check if input_ids are returned and no decoder_input_ids
self.assertIn("input_ids",_A )
self.assertIn("attention_mask",_A )
self.assertNotIn("decoder_input_ids",_A )
self.assertNotIn("decoder_attention_mask",_A )
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = self.perceiver_tokenizer
SCREAMING_SNAKE_CASE_ : int = [
"Summary of the text.",
"Another summary.",
]
SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer(
text_target=_A,max_length=32,padding="max_length",truncation=_A,return_tensors=_A )
self.assertEqual(32,targets["input_ids"].shape[1] )
def __UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
self.assertNotEqual(tokenizer.model_max_length,42 )
# Now let's start the test
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
SCREAMING_SNAKE_CASE_ : Tuple = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_ : str = " He is very happy, UNwant\u00E9d,running"
SCREAMING_SNAKE_CASE_ : Tuple = tokenizer.encode(_A,add_special_tokens=_A )
tokenizer.save_pretrained(_A )
SCREAMING_SNAKE_CASE_ : int = tokenizer.__class__.from_pretrained(_A )
SCREAMING_SNAKE_CASE_ : Optional[int] = after_tokenizer.encode(_A,add_special_tokens=_A )
self.assertListEqual(_A,_A )
shutil.rmtree(_A )
SCREAMING_SNAKE_CASE_ : Tuple = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
SCREAMING_SNAKE_CASE_ : Optional[Any] = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_ : Tuple = " He is very happy, UNwant\u00E9d,running"
tokenizer.add_tokens(["bim", "bambam"] )
SCREAMING_SNAKE_CASE_ : int = tokenizer.additional_special_tokens
additional_special_tokens.append("new_additional_special_token" )
tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens} )
SCREAMING_SNAKE_CASE_ : str = tokenizer.encode(_A,add_special_tokens=_A )
tokenizer.save_pretrained(_A )
SCREAMING_SNAKE_CASE_ : str = tokenizer.__class__.from_pretrained(_A )
SCREAMING_SNAKE_CASE_ : str = after_tokenizer.encode(_A,add_special_tokens=_A )
self.assertListEqual(_A,_A )
self.assertIn("new_additional_special_token",after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length,42 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenizer.__class__.from_pretrained(_A,model_max_length=43 )
self.assertEqual(tokenizer.model_max_length,43 )
shutil.rmtree(_A )
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_A )
with open(os.path.join(_A,"special_tokens_map.json" ),encoding="utf-8" ) as json_file:
SCREAMING_SNAKE_CASE_ : Optional[int] = json.load(_A )
with open(os.path.join(_A,"tokenizer_config.json" ),encoding="utf-8" ) as json_file:
SCREAMING_SNAKE_CASE_ : int = json.load(_A )
SCREAMING_SNAKE_CASE_ : Any = [F'<extra_id_{i}>' for i in range(125 )]
SCREAMING_SNAKE_CASE_ : List[Any] = added_tokens_extra_ids + [
"an_additional_special_token"
]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = added_tokens_extra_ids + [
"an_additional_special_token"
]
with open(os.path.join(_A,"special_tokens_map.json" ),"w",encoding="utf-8" ) as outfile:
json.dump(_A,_A )
with open(os.path.join(_A,"tokenizer_config.json" ),"w",encoding="utf-8" ) as outfile:
json.dump(_A,_A )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
SCREAMING_SNAKE_CASE_ : Dict = tokenizer_class.from_pretrained(
_A,)
self.assertIn(
"an_additional_special_token",tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
["an_additional_special_token"],tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(["an_additional_special_token"] ) ),)
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
SCREAMING_SNAKE_CASE_ : Union[str, Any] = added_tokens_extra_ids + [AddedToken("a_new_additional_special_token",lstrip=_A )]
SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer_class.from_pretrained(
_A,additional_special_tokens=_A,)
self.assertIn("a_new_additional_special_token",tokenizer.additional_special_tokens )
self.assertEqual(
["a_new_additional_special_token"],tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(["a_new_additional_special_token"] ) ),)
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([178] ),"�" )
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
pass
def __UpperCamelCase ( self : int ):
"""simple docstring"""
pass
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
pass
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
pass
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = self.get_tokenizers(fast=_A,do_lower_case=_A )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
SCREAMING_SNAKE_CASE_ : str = ["[CLS]", "t", "h", "i", "s", " ", "i", "s", " ", "a", " ", "t", "e", "s", "t", "[SEP]"]
SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenizer.convert_tokens_to_string(_A )
self.assertIsInstance(_A,_A )
| 316 | 0 |
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s''',
datefmt='''%m/%d/%Y %H:%M:%S''',
level=logging.INFO,
)
lowerCAmelCase__ :List[str] = logging.getLogger(__name__)
def lowerCAmelCase__ ( a__: str ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase = git.Repo(search_parent_directories=a__ )
_UpperCAmelCase = {
'repo_id': str(a__ ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
}
with open(os.path.join(a__ , 'git_log.json' ) , 'w' ) as f:
json.dump(a__ , a__ , indent=4 )
def lowerCAmelCase__ ( a__: int ) -> Union[str, Any]:
'''simple docstring'''
if params.n_gpu <= 0:
_UpperCAmelCase = 0
_UpperCAmelCase = -1
_UpperCAmelCase = True
_UpperCAmelCase = False
return
assert torch.cuda.is_available()
logger.info('Initializing GPUs' )
if params.n_gpu > 1:
assert params.local_rank != -1
_UpperCAmelCase = int(os.environ['WORLD_SIZE'] )
_UpperCAmelCase = int(os.environ['N_GPU_NODE'] )
_UpperCAmelCase = int(os.environ['RANK'] )
# number of nodes / node ID
_UpperCAmelCase = params.world_size // params.n_gpu_per_node
_UpperCAmelCase = params.global_rank // params.n_gpu_per_node
_UpperCAmelCase = True
assert params.n_nodes == int(os.environ['N_NODES'] )
assert params.node_id == int(os.environ['NODE_RANK'] )
# local job (single GPU)
else:
assert params.local_rank == -1
_UpperCAmelCase = 1
_UpperCAmelCase = 0
_UpperCAmelCase = 0
_UpperCAmelCase = 0
_UpperCAmelCase = 1
_UpperCAmelCase = 1
_UpperCAmelCase = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
_UpperCAmelCase = params.node_id == 0 and params.local_rank == 0
_UpperCAmelCase = params.n_nodes > 1
# summary
_UpperCAmelCase = F'''--- Global rank: {params.global_rank} - '''
logger.info(PREFIX + 'Number of nodes: %i' % params.n_nodes )
logger.info(PREFIX + 'Node ID : %i' % params.node_id )
logger.info(PREFIX + 'Local rank : %i' % params.local_rank )
logger.info(PREFIX + 'World size : %i' % params.world_size )
logger.info(PREFIX + 'GPUs per node : %i' % params.n_gpu_per_node )
logger.info(PREFIX + 'Master : %s' % str(params.is_master ) )
logger.info(PREFIX + 'Multi-node : %s' % str(params.multi_node ) )
logger.info(PREFIX + 'Multi-GPU : %s' % str(params.multi_gpu ) )
logger.info(PREFIX + 'Hostname : %s' % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info('Initializing PyTorch distributed' )
torch.distributed.init_process_group(
init_method='env://' , backend='nccl' , )
def lowerCAmelCase__ ( a__: int ) -> Dict:
'''simple docstring'''
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 618 |
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
lowerCAmelCase__ :int = logging.get_logger(__name__)
def lowerCAmelCase__ ( a__: Dict ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase = R'\w+[.]\d+'
_UpperCAmelCase = re.findall(a__ , a__ )
for pat in pats:
_UpperCAmelCase = key.replace(a__ , '_'.join(pat.split('.' ) ) )
return key
def lowerCAmelCase__ ( a__: Optional[int] , a__: Dict , a__: List[Any] ) -> str:
'''simple docstring'''
_UpperCAmelCase = pt_tuple_key[:-1] + ('scale',)
if (
any('norm' in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
_UpperCAmelCase = pt_tuple_key[:-1] + ('scale',)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
_UpperCAmelCase = pt_tuple_key[:-1] + ('scale',)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
_UpperCAmelCase = pt_tuple_key[:-1] + ('embedding',)
return renamed_pt_tuple_key, pt_tensor
# conv layer
_UpperCAmelCase = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
_UpperCAmelCase = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
_UpperCAmelCase = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight":
_UpperCAmelCase = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
_UpperCAmelCase = pt_tuple_key[:-1] + ('weight',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
_UpperCAmelCase = pt_tuple_key[:-1] + ('bias',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def lowerCAmelCase__ ( a__: Any , a__: Optional[Any] , a__: int=4_2 ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
_UpperCAmelCase = flax_model.init_weights(PRNGKey(a__ ) )
_UpperCAmelCase = flatten_dict(a__ )
_UpperCAmelCase = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
_UpperCAmelCase = rename_key(a__ )
_UpperCAmelCase = tuple(renamed_pt_key.split('.' ) )
# Correctly rename weight parameters
_UpperCAmelCase , _UpperCAmelCase = rename_key_and_reshape_tensor(a__ , a__ , a__ )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
F'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# also add unexpected weight so that warning is thrown
_UpperCAmelCase = jnp.asarray(a__ )
return unflatten_dict(a__ )
| 618 | 1 |
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
_lowerCamelCase : List[str] = random.Random()
if is_torch_available():
import torch
def __lowerCamelCase (UpperCAmelCase__ : Dict , UpperCAmelCase__ : str=1.0 , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : Tuple=None ):
if rng is None:
SCREAMING_SNAKE_CASE = global_rng
SCREAMING_SNAKE_CASE = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class lowercase ( unittest.TestCase ):
def __init__( self : int , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : int=7 , _UpperCamelCase : Optional[Any]=400 , _UpperCamelCase : List[str]=2_000 , _UpperCamelCase : List[str]=1 , _UpperCamelCase : List[str]=0.0 , _UpperCamelCase : Optional[Any]=16_000 , _UpperCamelCase : str=True , _UpperCamelCase : int=True , ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = min_seq_length
SCREAMING_SNAKE_CASE = max_seq_length
SCREAMING_SNAKE_CASE = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
SCREAMING_SNAKE_CASE = feature_size
SCREAMING_SNAKE_CASE = padding_value
SCREAMING_SNAKE_CASE = sampling_rate
SCREAMING_SNAKE_CASE = return_attention_mask
SCREAMING_SNAKE_CASE = do_normalize
def __snake_case( self : Dict ) -> str:
'''simple docstring'''
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def __snake_case( self : int , _UpperCamelCase : Union[str, Any]=False , _UpperCamelCase : Dict=False ) -> Tuple:
'''simple docstring'''
def _flatten(_UpperCamelCase : int ):
return list(itertools.chain(*_UpperCamelCase ) )
if equal_length:
SCREAMING_SNAKE_CASE = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
SCREAMING_SNAKE_CASE = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
SCREAMING_SNAKE_CASE = [np.asarray(_UpperCamelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowercase ( a , unittest.TestCase ):
lowercase__ : List[Any] = ASTFeatureExtractor
def __snake_case( self : Dict ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = ASTFeatureExtractionTester(self )
def __snake_case( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
SCREAMING_SNAKE_CASE = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
SCREAMING_SNAKE_CASE = [np.asarray(_UpperCamelCase ) for speech_input in speech_inputs]
# Test not batched input
SCREAMING_SNAKE_CASE = feat_extract(speech_inputs[0] , return_tensors="np" ).input_values
SCREAMING_SNAKE_CASE = feat_extract(np_speech_inputs[0] , return_tensors="np" ).input_values
self.assertTrue(np.allclose(_UpperCamelCase , _UpperCamelCase , atol=1e-3 ) )
# Test batched
SCREAMING_SNAKE_CASE = feat_extract(_UpperCamelCase , padding=_UpperCamelCase , return_tensors="np" ).input_values
SCREAMING_SNAKE_CASE = feat_extract(_UpperCamelCase , padding=_UpperCamelCase , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(_UpperCamelCase , _UpperCamelCase ):
self.assertTrue(np.allclose(_UpperCamelCase , _UpperCamelCase , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
SCREAMING_SNAKE_CASE = [floats_list((1, x) )[0] for x in (800, 800, 800)]
SCREAMING_SNAKE_CASE = np.asarray(_UpperCamelCase )
SCREAMING_SNAKE_CASE = feat_extract(_UpperCamelCase , return_tensors="np" ).input_values
SCREAMING_SNAKE_CASE = feat_extract(_UpperCamelCase , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(_UpperCamelCase , _UpperCamelCase ):
self.assertTrue(np.allclose(_UpperCamelCase , _UpperCamelCase , atol=1e-3 ) )
@require_torch
def __snake_case( self : str ) -> Dict:
'''simple docstring'''
import torch
SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE = np.random.rand(100 ).astype(np.floataa )
SCREAMING_SNAKE_CASE = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
SCREAMING_SNAKE_CASE = feature_extractor.pad([{"input_values": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
SCREAMING_SNAKE_CASE = feature_extractor.pad([{"input_values": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def __snake_case( self : Optional[int] , _UpperCamelCase : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
from datasets import load_dataset
SCREAMING_SNAKE_CASE = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
SCREAMING_SNAKE_CASE = ds.sort("id" ).select(range(_UpperCamelCase ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
@require_torch
def __snake_case( self : List[str] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = torch.tensor(
[-0.9_8_9_4, -1.2_7_7_6, -0.9_0_6_6, -1.2_7_7_6, -0.9_3_4_9, -1.2_6_0_9, -1.0_3_8_6, -1.2_7_7_6,
-1.1_5_6_1, -1.2_7_7_6, -1.2_0_5_2, -1.2_7_2_3, -1.2_1_9_0, -1.2_1_3_2, -1.2_7_7_6, -1.1_1_3_3,
-1.1_9_5_3, -1.1_3_4_3, -1.1_5_8_4, -1.2_2_0_3, -1.1_7_7_0, -1.2_4_7_4, -1.2_3_8_1, -1.1_9_3_6,
-0.9_2_7_0, -0.8_3_1_7, -0.8_0_4_9, -0.7_7_0_6, -0.7_5_6_5, -0.7_8_6_9] )
# fmt: on
SCREAMING_SNAKE_CASE = self._load_datasamples(1 )
SCREAMING_SNAKE_CASE = ASTFeatureExtractor()
SCREAMING_SNAKE_CASE = feature_extractor(_UpperCamelCase , return_tensors="pt" ).input_values
self.assertEquals(input_values.shape , (1, 1_024, 128) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , _UpperCamelCase , atol=1e-4 ) )
| 647 | import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class lowercase ( unittest.TestCase ):
def __init__( self : Optional[int] , _UpperCamelCase : Any , _UpperCamelCase : Dict=7 , _UpperCamelCase : Union[str, Any]=3 , _UpperCamelCase : Optional[int]=30 , _UpperCamelCase : List[Any]=400 , _UpperCamelCase : Dict=True , _UpperCamelCase : Union[str, Any]=None , _UpperCamelCase : Any=True , _UpperCamelCase : List[Any]=[0.5, 0.5, 0.5] , _UpperCamelCase : Tuple=[0.5, 0.5, 0.5] , _UpperCamelCase : Tuple=True , _UpperCamelCase : List[Any]=1 / 255 , _UpperCamelCase : Optional[Any]=True , ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = size if size is not None else {"shortest_edge": 18, "longest_edge": 1_333}
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = min_resolution
SCREAMING_SNAKE_CASE = max_resolution
SCREAMING_SNAKE_CASE = do_resize
SCREAMING_SNAKE_CASE = size
SCREAMING_SNAKE_CASE = do_normalize
SCREAMING_SNAKE_CASE = image_mean
SCREAMING_SNAKE_CASE = image_std
SCREAMING_SNAKE_CASE = do_rescale
SCREAMING_SNAKE_CASE = rescale_factor
SCREAMING_SNAKE_CASE = do_pad
def __snake_case( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def __snake_case( self : Any , _UpperCamelCase : List[str] , _UpperCamelCase : List[Any]=False ) -> List[Any]:
'''simple docstring'''
if not batched:
SCREAMING_SNAKE_CASE = image_inputs[0]
if isinstance(_UpperCamelCase , Image.Image ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = image.size
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = image.shape[1], image.shape[2]
if w < h:
SCREAMING_SNAKE_CASE = int(self.size["shortest_edge"] * h / w )
SCREAMING_SNAKE_CASE = self.size["shortest_edge"]
elif w > h:
SCREAMING_SNAKE_CASE = self.size["shortest_edge"]
SCREAMING_SNAKE_CASE = int(self.size["shortest_edge"] * w / h )
else:
SCREAMING_SNAKE_CASE = self.size["shortest_edge"]
SCREAMING_SNAKE_CASE = self.size["shortest_edge"]
else:
SCREAMING_SNAKE_CASE = []
for image in image_inputs:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
SCREAMING_SNAKE_CASE = max(_UpperCamelCase , key=lambda _UpperCamelCase : item[0] )[0]
SCREAMING_SNAKE_CASE = max(_UpperCamelCase , key=lambda _UpperCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowercase ( a , unittest.TestCase ):
lowercase__ : Optional[int] = DetaImageProcessor if is_vision_available() else None
def __snake_case( self : List[str] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = DetaImageProcessingTester(self )
@property
def __snake_case( self : int ) -> Tuple:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __snake_case( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCamelCase , "image_mean" ) )
self.assertTrue(hasattr(_UpperCamelCase , "image_std" ) )
self.assertTrue(hasattr(_UpperCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(_UpperCamelCase , "do_resize" ) )
self.assertTrue(hasattr(_UpperCamelCase , "do_rescale" ) )
self.assertTrue(hasattr(_UpperCamelCase , "do_pad" ) )
self.assertTrue(hasattr(_UpperCamelCase , "size" ) )
def __snake_case( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 1_333} )
self.assertEqual(image_processor.do_pad , _UpperCamelCase )
def __snake_case( self : str ) -> List[Any]:
'''simple docstring'''
pass
def __snake_case( self : List[Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(_UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(_UpperCamelCase , batched=_UpperCamelCase )
SCREAMING_SNAKE_CASE = image_processing(_UpperCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __snake_case( self : List[str] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase , numpify=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(_UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE = image_processing(_UpperCamelCase , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(_UpperCamelCase , batched=_UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __snake_case( self : str ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase , torchify=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(_UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE = image_processing(_UpperCamelCase , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(_UpperCamelCase , batched=_UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def __snake_case( self : Dict ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
SCREAMING_SNAKE_CASE = json.loads(f.read() )
SCREAMING_SNAKE_CASE = {"image_id": 39_769, "annotations": target}
# encode them
SCREAMING_SNAKE_CASE = DetaImageProcessor()
SCREAMING_SNAKE_CASE = image_processing(images=_UpperCamelCase , annotations=_UpperCamelCase , return_tensors="pt" )
# verify pixel values
SCREAMING_SNAKE_CASE = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding["pixel_values"].shape , _UpperCamelCase )
SCREAMING_SNAKE_CASE = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , _UpperCamelCase , atol=1e-4 ) )
# verify area
SCREAMING_SNAKE_CASE = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , _UpperCamelCase ) )
# verify boxes
SCREAMING_SNAKE_CASE = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , _UpperCamelCase )
SCREAMING_SNAKE_CASE = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , _UpperCamelCase , atol=1e-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , _UpperCamelCase ) )
# verify is_crowd
SCREAMING_SNAKE_CASE = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , _UpperCamelCase ) )
# verify class_labels
SCREAMING_SNAKE_CASE = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , _UpperCamelCase ) )
# verify orig_size
SCREAMING_SNAKE_CASE = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , _UpperCamelCase ) )
# verify size
SCREAMING_SNAKE_CASE = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , _UpperCamelCase ) )
@slow
def __snake_case( self : Dict ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
SCREAMING_SNAKE_CASE = json.loads(f.read() )
SCREAMING_SNAKE_CASE = {"file_name": "000000039769.png", "image_id": 39_769, "segments_info": target}
SCREAMING_SNAKE_CASE = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
SCREAMING_SNAKE_CASE = DetaImageProcessor(format="coco_panoptic" )
SCREAMING_SNAKE_CASE = image_processing(images=_UpperCamelCase , annotations=_UpperCamelCase , masks_path=_UpperCamelCase , return_tensors="pt" )
# verify pixel values
SCREAMING_SNAKE_CASE = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding["pixel_values"].shape , _UpperCamelCase )
SCREAMING_SNAKE_CASE = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , _UpperCamelCase , atol=1e-4 ) )
# verify area
SCREAMING_SNAKE_CASE = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , _UpperCamelCase ) )
# verify boxes
SCREAMING_SNAKE_CASE = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , _UpperCamelCase )
SCREAMING_SNAKE_CASE = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , _UpperCamelCase , atol=1e-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , _UpperCamelCase ) )
# verify is_crowd
SCREAMING_SNAKE_CASE = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , _UpperCamelCase ) )
# verify class_labels
SCREAMING_SNAKE_CASE = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , _UpperCamelCase ) )
# verify masks
SCREAMING_SNAKE_CASE = 822_873
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , _UpperCamelCase )
# verify orig_size
SCREAMING_SNAKE_CASE = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , _UpperCamelCase ) )
# verify size
SCREAMING_SNAKE_CASE = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , _UpperCamelCase ) )
| 647 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
A__: Tuple = logging.get_logger(__name__)
A__: Tuple = {
'''microsoft/focalnet-tiny''': '''https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json''',
}
class _a ( UpperCamelCase__ , UpperCamelCase__):
"""simple docstring"""
UpperCamelCase__ = """focalnet"""
def __init__( self: Union[str, Any] , __lowerCamelCase: Dict=224 , __lowerCamelCase: Tuple=4 , __lowerCamelCase: Union[str, Any]=3 , __lowerCamelCase: List[Any]=96 , __lowerCamelCase: Any=False , __lowerCamelCase: Optional[int]=[192, 384, 768, 768] , __lowerCamelCase: Tuple=[2, 2, 6, 2] , __lowerCamelCase: List[Any]=[2, 2, 2, 2] , __lowerCamelCase: Any=[3, 3, 3, 3] , __lowerCamelCase: List[Any]="gelu" , __lowerCamelCase: Any=4.0 , __lowerCamelCase: Tuple=0.0 , __lowerCamelCase: Union[str, Any]=0.1 , __lowerCamelCase: str=False , __lowerCamelCase: Union[str, Any]=1e-4 , __lowerCamelCase: List[str]=False , __lowerCamelCase: List[Any]=False , __lowerCamelCase: Optional[Any]=False , __lowerCamelCase: List[str]=0.02 , __lowerCamelCase: Optional[int]=1e-5 , __lowerCamelCase: Dict=32 , __lowerCamelCase: Optional[int]=None , __lowerCamelCase: List[str]=None , **__lowerCamelCase: Any , ):
'''simple docstring'''
super().__init__(**__lowerCamelCase )
UpperCamelCase__: Optional[int] = image_size
UpperCamelCase__: Optional[int] = patch_size
UpperCamelCase__: Any = num_channels
UpperCamelCase__: Any = embed_dim
UpperCamelCase__: Dict = use_conv_embed
UpperCamelCase__: Dict = hidden_sizes
UpperCamelCase__: Optional[Any] = depths
UpperCamelCase__: Optional[int] = focal_levels
UpperCamelCase__: Optional[int] = focal_windows
UpperCamelCase__: List[str] = hidden_act
UpperCamelCase__: Any = mlp_ratio
UpperCamelCase__: List[str] = hidden_dropout_prob
UpperCamelCase__: Dict = drop_path_rate
UpperCamelCase__: Optional[Any] = use_layerscale
UpperCamelCase__: Optional[int] = layerscale_value
UpperCamelCase__: Any = use_post_layernorm
UpperCamelCase__: List[Any] = use_post_layernorm_in_modulation
UpperCamelCase__: Optional[int] = normalize_modulator
UpperCamelCase__: Tuple = initializer_range
UpperCamelCase__: int = layer_norm_eps
UpperCamelCase__: List[str] = encoder_stride
UpperCamelCase__: List[Any] = ["stem"] + [F"stage{idx}" for idx in range(1 , len(self.depths ) + 1 )]
UpperCamelCase__ , UpperCamelCase__: List[Any] = get_aligned_output_features_output_indices(
out_features=__lowerCamelCase , out_indices=__lowerCamelCase , stage_names=self.stage_names )
| 380 |
def lowerCAmelCase_ ( A_):
if not all(char in "01" for char in bin_string):
raise ValueError("Non-binary value was passed to the function")
if not bin_string:
raise ValueError("Empty string was passed to the function")
UpperCamelCase__: List[Any] = ""
while len(A_) % 3 != 0:
UpperCamelCase__: int = "0" + bin_string
UpperCamelCase__: Optional[int] = [
bin_string[index : index + 3]
for index in range(len(A_))
if index % 3 == 0
]
for bin_group in bin_string_in_3_list:
UpperCamelCase__: Union[str, Any] = 0
for index, val in enumerate(A_):
oct_val += int(2 ** (2 - index) * int(A_))
oct_string += str(A_)
return oct_string
if __name__ == "__main__":
from doctest import testmod
testmod()
| 380 | 1 |
"""simple docstring"""
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__(self , *lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None , **lowerCAmelCase__ ):
'''simple docstring'''
super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__ )
_UpperCamelCase : int = eval_examples
_UpperCamelCase : str = post_process_function
def lowercase_ (self , lowerCAmelCase__ = None , lowerCAmelCase__=None , lowerCAmelCase__ = None , lowerCAmelCase__ = "eval" , **lowerCAmelCase__ , ):
'''simple docstring'''
_UpperCamelCase : Optional[int] = gen_kwargs.copy()
_UpperCamelCase : int = (
gen_kwargs["max_length"] if gen_kwargs.get("max_length" ) is not None else self.args.generation_max_length
)
_UpperCamelCase : List[str] = (
gen_kwargs["num_beams"] if gen_kwargs.get("num_beams" ) is not None else self.args.generation_num_beams
)
_UpperCamelCase : Optional[int] = gen_kwargs
_UpperCamelCase : Tuple = self.eval_dataset if eval_dataset is None else eval_dataset
_UpperCamelCase : Dict = self.get_eval_dataloader(lowerCAmelCase__ )
_UpperCamelCase : Tuple = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
_UpperCamelCase : Dict = self.compute_metrics
_UpperCamelCase : Optional[Any] = None
_UpperCamelCase : Union[str, Any] = time.time()
_UpperCamelCase : Dict = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
_UpperCamelCase : Optional[int] = eval_loop(
lowerCAmelCase__ , description="Evaluation" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowerCAmelCase__ , metric_key_prefix=lowerCAmelCase__ , )
finally:
_UpperCamelCase : Dict = compute_metrics
_UpperCamelCase : Optional[int] = self.args.eval_batch_size * self.args.world_size
if F"{metric_key_prefix}_jit_compilation_time" in output.metrics:
start_time += output.metrics[F"{metric_key_prefix}_jit_compilation_time"]
output.metrics.update(
speed_metrics(
lowerCAmelCase__ , lowerCAmelCase__ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
_UpperCamelCase : Union[str, Any] = self.post_process_function(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase : Union[str, Any] = self.compute_metrics(lowerCAmelCase__ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"{metric_key_prefix}_" ):
_UpperCamelCase : List[str] = metrics.pop(lowerCAmelCase__ )
metrics.update(output.metrics )
else:
_UpperCamelCase : str = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(lowerCAmelCase__ )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
_UpperCamelCase : Any = self.callback_handler.on_evaluate(self.args , self.state , self.control , lowerCAmelCase__ )
return metrics
def lowercase_ (self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__ = "test" , **lowerCAmelCase__ ):
'''simple docstring'''
_UpperCamelCase : str = gen_kwargs.copy()
_UpperCamelCase : Any = self.get_test_dataloader(lowerCAmelCase__ )
# Temporarily disable metric computation, we will do it in the loop here.
_UpperCamelCase : Any = self.compute_metrics
_UpperCamelCase : Union[str, Any] = None
_UpperCamelCase : Optional[int] = time.time()
_UpperCamelCase : Tuple = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
_UpperCamelCase : List[Any] = eval_loop(
lowerCAmelCase__ , description="Prediction" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowerCAmelCase__ , metric_key_prefix=lowerCAmelCase__ , )
finally:
_UpperCamelCase : Optional[Any] = compute_metrics
_UpperCamelCase : List[str] = self.args.eval_batch_size * self.args.world_size
if F"{metric_key_prefix}_jit_compilation_time" in output.metrics:
start_time += output.metrics[F"{metric_key_prefix}_jit_compilation_time"]
output.metrics.update(
speed_metrics(
lowerCAmelCase__ , lowerCAmelCase__ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
_UpperCamelCase : List[Any] = self.post_process_function(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , "predict" )
_UpperCamelCase : Any = self.compute_metrics(lowerCAmelCase__ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"{metric_key_prefix}_" ):
_UpperCamelCase : Optional[Any] = metrics.pop(lowerCAmelCase__ )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=lowerCAmelCase__ )
| 709 |
"""simple docstring"""
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__UpperCAmelCase = ["""image_processor""", """tokenizer"""]
__UpperCAmelCase = """BridgeTowerImageProcessor"""
__UpperCAmelCase = ("""RobertaTokenizer""", """RobertaTokenizerFast""")
def __init__(self , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
super().__init__(lowerCAmelCase__ , lowerCAmelCase__ )
def __call__(self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = True , lowerCAmelCase__ = False , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = 0 , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = True , lowerCAmelCase__ = None , **lowerCAmelCase__ , ):
'''simple docstring'''
_UpperCamelCase : Any = self.tokenizer(
text=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ , stride=lowerCAmelCase__ , pad_to_multiple_of=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , return_overflowing_tokens=lowerCAmelCase__ , return_special_tokens_mask=lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , return_length=lowerCAmelCase__ , verbose=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ , )
# add pixel_values + pixel_mask
_UpperCamelCase : Union[str, Any] = self.image_processor(
lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , do_normalize=lowerCAmelCase__ , do_center_crop=lowerCAmelCase__ , **lowerCAmelCase__ )
encoding.update(lowerCAmelCase__ )
return encoding
def lowercase_ (self , *lowerCAmelCase__ , **lowerCAmelCase__ ):
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
def lowercase_ (self , *lowerCAmelCase__ , **lowerCAmelCase__ ):
'''simple docstring'''
return self.tokenizer.decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
@property
def lowercase_ (self ):
'''simple docstring'''
_UpperCamelCase : List[str] = self.tokenizer.model_input_names
_UpperCamelCase : List[str] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 239 | 0 |
"""simple docstring"""
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class UpperCamelCase :
def __init__( self :Optional[int] , __magic_name__ :List[str] , __magic_name__ :Dict=13 , __magic_name__ :Union[str, Any]=32 , __magic_name__ :Optional[int]=2 , __magic_name__ :int=3 , __magic_name__ :List[str]=16 , __magic_name__ :int=[1, 2, 1] , __magic_name__ :Union[str, Any]=[2, 2, 4] , __magic_name__ :List[str]=2 , __magic_name__ :Tuple=2.0 , __magic_name__ :List[Any]=True , __magic_name__ :Tuple=0.0 , __magic_name__ :List[Any]=0.0 , __magic_name__ :List[Any]=0.1 , __magic_name__ :int="gelu" , __magic_name__ :Union[str, Any]=False , __magic_name__ :str=True , __magic_name__ :Dict=0.02 , __magic_name__ :int=1E-5 , __magic_name__ :Optional[int]=True , __magic_name__ :str=None , __magic_name__ :Union[str, Any]=True , __magic_name__ :List[str]=10 , __magic_name__ :Optional[int]=8 , __magic_name__ :Tuple=["stage1", "stage2", "stage3"] , __magic_name__ :Optional[Any]=[1, 2, 3] , ) ->Union[str, Any]:
lowercase : Tuple = parent
lowercase : int = batch_size
lowercase : Optional[Any] = image_size
lowercase : List[Any] = patch_size
lowercase : Optional[Any] = num_channels
lowercase : str = embed_dim
lowercase : str = depths
lowercase : List[Any] = num_heads
lowercase : str = window_size
lowercase : List[str] = mlp_ratio
lowercase : List[str] = qkv_bias
lowercase : int = hidden_dropout_prob
lowercase : Optional[int] = attention_probs_dropout_prob
lowercase : Tuple = drop_path_rate
lowercase : Any = hidden_act
lowercase : List[str] = use_absolute_embeddings
lowercase : str = patch_norm
lowercase : str = layer_norm_eps
lowercase : str = initializer_range
lowercase : List[Any] = is_training
lowercase : List[str] = scope
lowercase : List[str] = use_labels
lowercase : Dict = type_sequence_label_size
lowercase : Any = encoder_stride
lowercase : int = out_features
lowercase : Dict = out_indices
def __snake_case ( self :List[str] ) ->Any:
lowercase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase : List[str] = None
if self.use_labels:
lowercase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase : Optional[int] = self.get_config()
return config, pixel_values, labels
def __snake_case ( self :List[str] ) ->int:
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def __snake_case ( self :List[Any] , __magic_name__ :Any , __magic_name__ :int , __magic_name__ :str ) ->List[str]:
lowercase : Optional[Any] = MaskFormerSwinModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase : int = model(snake_case__ )
lowercase : int = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowercase : Dict = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def __snake_case ( self :List[Any] , __magic_name__ :List[Any] , __magic_name__ :Any , __magic_name__ :Optional[Any] ) ->int:
lowercase : Union[str, Any] = MaskFormerSwinBackbone(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase : Any = model(snake_case__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(snake_case__ ):
lowercase : List[str] = ["""stem"""]
lowercase : List[Any] = MaskFormerSwinBackbone(config=snake_case__ )
def __snake_case ( self :int ) ->Dict:
lowercase : int = self.prepare_config_and_inputs()
lowercase , lowercase , lowercase : List[str] = config_and_inputs
lowercase : int = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase (__UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : List[str] = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
_SCREAMING_SNAKE_CASE : Tuple = {'feature-extraction': MaskFormerSwinModel} if is_torch_available() else {}
_SCREAMING_SNAKE_CASE : Optional[int] = False
_SCREAMING_SNAKE_CASE : Optional[int] = False
_SCREAMING_SNAKE_CASE : Any = False
_SCREAMING_SNAKE_CASE : str = False
_SCREAMING_SNAKE_CASE : str = False
def __snake_case ( self :Union[str, Any] ) ->List[str]:
lowercase : Optional[Any] = MaskFormerSwinModelTester(self )
lowercase : Dict = ConfigTester(self , config_class=snake_case__ , embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
"""`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with"""
""" `nn.DataParallel`"""
) )
def __snake_case ( self :List[str] ) ->List[str]:
pass
def __snake_case ( self :Dict ) ->Union[str, Any]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __snake_case ( self :List[str] ) ->Union[str, Any]:
return
def __snake_case ( self :Any ) ->int:
lowercase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def __snake_case ( self :int ) ->Dict:
lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*snake_case__ )
@unittest.skip("""Swin does not use inputs_embeds""" )
def __snake_case ( self :Dict ) ->Tuple:
pass
@unittest.skip("""Swin does not support feedforward chunking""" )
def __snake_case ( self :List[Any] ) ->Any:
pass
def __snake_case ( self :List[str] ) ->Any:
lowercase , lowercase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase : Any = model_class(snake_case__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case__ , nn.Linear ) )
def __snake_case ( self :Optional[Any] ) ->List[Any]:
lowercase , lowercase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase : Optional[Any] = model_class(snake_case__ )
lowercase : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase : Optional[int] = [*signature.parameters.keys()]
lowercase : Optional[int] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , snake_case__ )
@unittest.skip(reason="""MaskFormerSwin is only used as backbone and doesn't support output_attentions""" )
def __snake_case ( self :str ) ->str:
pass
@unittest.skip(reason="""MaskFormerSwin is only used as an internal backbone""" )
def __snake_case ( self :Dict ) ->Optional[int]:
pass
def __snake_case ( self :Optional[Any] , __magic_name__ :Dict , __magic_name__ :List[str] , __magic_name__ :str , __magic_name__ :Optional[Any] ) ->Dict:
lowercase : Any = model_class(snake_case__ )
model.to(snake_case__ )
model.eval()
with torch.no_grad():
lowercase : List[Any] = model(**self._prepare_for_class(snake_case__ , snake_case__ ) )
lowercase : Union[str, Any] = outputs.hidden_states
lowercase : Tuple = getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(snake_case__ ) , snake_case__ )
# Swin has a different seq_length
lowercase : Any = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowercase : int = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def __snake_case ( self :List[str] ) ->Union[str, Any]:
lowercase , lowercase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
lowercase : List[Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
lowercase : Tuple = True
self.check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase : List[str] = True
self.check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
def __snake_case ( self :List[Any] ) ->Dict:
lowercase , lowercase : int = self.model_tester.prepare_config_and_inputs_for_common()
lowercase : Optional[int] = 3
lowercase : Tuple = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowercase : Optional[int] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowercase : int = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowercase : List[Any] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
lowercase : Tuple = True
self.check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase : int = True
self.check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ , (padded_height, padded_width) )
@unittest.skip(reason="""MaskFormerSwin doesn't have pretrained checkpoints""" )
def __snake_case ( self :int ) ->List[str]:
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def __snake_case ( self :Dict ) ->List[str]:
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def __snake_case ( self :List[Any] ) ->int:
pass
def __snake_case ( self :List[Any] ) ->Any:
lowercase , lowercase : int = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(__magic_name__ :List[str] ):
lowercase : List[str] = 0
return t
def check_equivalence(__magic_name__ :Any , __magic_name__ :Optional[int] , __magic_name__ :int , __magic_name__ :Dict={} ):
with torch.no_grad():
lowercase : Tuple = model(**snake_case__ , return_dict=snake_case__ , **snake_case__ )
lowercase : str = model(**snake_case__ , return_dict=snake_case__ , **snake_case__ ).to_tuple()
def recursive_check(__magic_name__ :List[str] , __magic_name__ :Optional[Any] ):
if isinstance(snake_case__ , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(snake_case__ , snake_case__ ):
recursive_check(snake_case__ , snake_case__ )
elif isinstance(snake_case__ , snake_case__ ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(snake_case__ , snake_case__ )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(snake_case__ ) , set_nan_tensor_to_zero(snake_case__ ) , atol=1E-5 ) , msg=(
"""Tuple and dict output are not equal. Difference:"""
f""" {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:"""
f""" {torch.isnan(snake_case__ ).any()} and `inf`: {torch.isinf(snake_case__ )}. Dict has"""
f""" `nan`: {torch.isnan(snake_case__ ).any()} and `inf`: {torch.isinf(snake_case__ )}."""
) , )
recursive_check(snake_case__ , snake_case__ )
for model_class in self.all_model_classes:
lowercase : List[Any] = model_class(snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase : Optional[Any] = self._prepare_for_class(snake_case__ , snake_case__ )
lowercase : Tuple = self._prepare_for_class(snake_case__ , snake_case__ )
check_equivalence(snake_case__ , snake_case__ , snake_case__ )
lowercase : int = self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
lowercase : int = self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
check_equivalence(snake_case__ , snake_case__ , snake_case__ )
lowercase : Any = self._prepare_for_class(snake_case__ , snake_case__ )
lowercase : Tuple = self._prepare_for_class(snake_case__ , snake_case__ )
check_equivalence(snake_case__ , snake_case__ , snake_case__ , {"""output_hidden_states""": True} )
lowercase : Any = self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
lowercase : List[Any] = self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
check_equivalence(snake_case__ , snake_case__ , snake_case__ , {"""output_hidden_states""": True} )
@require_torch
class UpperCamelCase (unittest.TestCase , __UpperCAmelCase ):
_SCREAMING_SNAKE_CASE : Optional[int] = (MaskFormerSwinBackbone,) if is_torch_available() else ()
_SCREAMING_SNAKE_CASE : List[Any] = MaskFormerSwinConfig
def __snake_case ( self :Optional[Any] ) ->Dict:
lowercase : str = MaskFormerSwinModelTester(self )
def __snake_case ( self :List[Any] ) ->Tuple:
lowercase , lowercase : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowercase : Any = inputs_dict["""pixel_values"""].shape[0]
for backbone_class in self.all_model_classes:
lowercase : Union[str, Any] = backbone_class(snake_case__ )
backbone.to(snake_case__ )
backbone.eval()
lowercase : str = backbone(**snake_case__ )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , snake_case__ )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
lowercase : Tuple = backbone(**snake_case__ , output_hidden_states=snake_case__ )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
lowercase , lowercase , lowercase : Tuple = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
lowercase : str = backbone(**snake_case__ , output_attentions=snake_case__ )
self.assertIsNotNone(outputs.attentions )
| 264 |
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class a ( unittest.TestCase , __UpperCAmelCase ):
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
__lowerCAmelCase = load_tool("text-to-speech" )
self.tool.setup()
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
torch.manual_seed(0 )
__lowerCAmelCase = self.tool("hey" )
__lowerCAmelCase = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_0_0_5_9_6_6_6_6_8_8_3_2_1_1_5_8_2_9, -0.0_0_0_3_6_5_7_6_4_0_1_9_0_7_9_5_0_6_4, -0.0_0_0_1_3_4_3_9_5_0_2_7_9_9_8_8_3_4_8_5] ) , ) )
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
torch.manual_seed(0 )
__lowerCAmelCase = self.tool("hey" )
__lowerCAmelCase = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_0_0_5_9_6_6_6_6_8_8_3_2_1_1_5_8_2_9, -0.0_0_0_3_6_5_7_6_4_0_1_9_0_7_9_5_0_6_4, -0.0_0_0_1_3_4_3_9_5_0_2_7_9_9_8_8_3_4_8_5] ) , ) )
| 611 | 0 |
from typing import Dict
from .base import GenericTensor, Pipeline
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
def __lowerCAmelCase ( self, _a=None, _a=None, _a=None, **_a ) -> Union[str, Any]:
if tokenize_kwargs is None:
__SCREAMING_SNAKE_CASE = {}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
"truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)" )
__SCREAMING_SNAKE_CASE = truncation
__SCREAMING_SNAKE_CASE = tokenize_kwargs
__SCREAMING_SNAKE_CASE = {}
if return_tensors is not None:
__SCREAMING_SNAKE_CASE = return_tensors
return preprocess_params, {}, postprocess_params
def __lowerCAmelCase ( self, _a, **_a ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = self.framework
__SCREAMING_SNAKE_CASE = self.tokenizer(_a, return_tensors=_a, **_a )
return model_inputs
def __lowerCAmelCase ( self, _a ) -> int:
__SCREAMING_SNAKE_CASE = self.model(**_a )
return model_outputs
def __lowerCAmelCase ( self, _a, _a=False ) -> int:
# [0] is the first available tensor, logits or last_hidden_state.
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__( self, *_a, **_a ) -> Optional[Any]:
return super().__call__(*_a, **_a )
| 721 |
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def _A ( __snake_case :List[str] , __snake_case :List[Any]=0.9_9_9 , __snake_case :str="cosine" , ) -> Any:
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(__snake_case :str ):
return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(__snake_case :Union[str, Any] ):
return math.exp(t * -1_2.0 )
else:
raise ValueError(f'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
__SCREAMING_SNAKE_CASE = []
for i in range(__snake_case ):
__SCREAMING_SNAKE_CASE = i / num_diffusion_timesteps
__SCREAMING_SNAKE_CASE = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__snake_case ) / alpha_bar_fn(__snake_case ) , __snake_case ) )
return torch.tensor(__snake_case , dtype=torch.floataa )
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE__ =[e.name for e in KarrasDiffusionSchedulers]
SCREAMING_SNAKE_CASE__ =2
@register_to_config
def __init__( self, _a = 10_00, _a = 0.0_0085, _a = 0.012, _a = "linear", _a = None, _a = "epsilon", _a = False, _a = False, _a = 1.0, _a = "linspace", _a = 0, ) -> str:
if trained_betas is not None:
__SCREAMING_SNAKE_CASE = torch.tensor(_a, dtype=torch.floataa )
elif beta_schedule == "linear":
__SCREAMING_SNAKE_CASE = torch.linspace(_a, _a, _a, dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
__SCREAMING_SNAKE_CASE = (
torch.linspace(beta_start**0.5, beta_end**0.5, _a, dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
__SCREAMING_SNAKE_CASE = betas_for_alpha_bar(_a, alpha_transform_type="cosine" )
elif beta_schedule == "exp":
__SCREAMING_SNAKE_CASE = betas_for_alpha_bar(_a, alpha_transform_type="exp" )
else:
raise NotImplementedError(f'''{beta_schedule} does is not implemented for {self.__class__}''' )
__SCREAMING_SNAKE_CASE = 1.0 - self.betas
__SCREAMING_SNAKE_CASE = torch.cumprod(self.alphas, dim=0 )
# set all values
self.set_timesteps(_a, _a, _a )
__SCREAMING_SNAKE_CASE = use_karras_sigmas
def __lowerCAmelCase ( self, _a, _a=None ) -> Any:
if schedule_timesteps is None:
__SCREAMING_SNAKE_CASE = self.timesteps
__SCREAMING_SNAKE_CASE = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
__SCREAMING_SNAKE_CASE = 1 if len(_a ) > 1 else 0
else:
__SCREAMING_SNAKE_CASE = timestep.cpu().item() if torch.is_tensor(_a ) else timestep
__SCREAMING_SNAKE_CASE = self._index_counter[timestep_int]
return indices[pos].item()
@property
def __lowerCAmelCase ( self ) -> List[str]:
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def __lowerCAmelCase ( self, _a, _a, ) -> torch.FloatTensor:
__SCREAMING_SNAKE_CASE = self.index_for_timestep(_a )
__SCREAMING_SNAKE_CASE = self.sigmas[step_index]
__SCREAMING_SNAKE_CASE = sample / ((sigma**2 + 1) ** 0.5)
return sample
def __lowerCAmelCase ( self, _a, _a = None, _a = None, ) -> str:
__SCREAMING_SNAKE_CASE = num_inference_steps
__SCREAMING_SNAKE_CASE = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
__SCREAMING_SNAKE_CASE = np.linspace(0, num_train_timesteps - 1, _a, dtype=_a )[::-1].copy()
elif self.config.timestep_spacing == "leading":
__SCREAMING_SNAKE_CASE = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__SCREAMING_SNAKE_CASE = (np.arange(0, _a ) * step_ratio).round()[::-1].copy().astype(_a )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
__SCREAMING_SNAKE_CASE = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__SCREAMING_SNAKE_CASE = (np.arange(_a, 0, -step_ratio )).round().copy().astype(_a )
timesteps -= 1
else:
raise ValueError(
f'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''' )
__SCREAMING_SNAKE_CASE = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
__SCREAMING_SNAKE_CASE = np.log(_a )
__SCREAMING_SNAKE_CASE = np.interp(_a, np.arange(0, len(_a ) ), _a )
if self.config.use_karras_sigmas:
__SCREAMING_SNAKE_CASE = self._convert_to_karras(in_sigmas=_a, num_inference_steps=self.num_inference_steps )
__SCREAMING_SNAKE_CASE = np.array([self._sigma_to_t(_a, _a ) for sigma in sigmas] )
__SCREAMING_SNAKE_CASE = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
__SCREAMING_SNAKE_CASE = torch.from_numpy(_a ).to(device=_a )
__SCREAMING_SNAKE_CASE = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
__SCREAMING_SNAKE_CASE = torch.from_numpy(_a )
__SCREAMING_SNAKE_CASE = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(_a ).startswith("mps" ):
# mps does not support float64
__SCREAMING_SNAKE_CASE = timesteps.to(_a, dtype=torch.floataa )
else:
__SCREAMING_SNAKE_CASE = timesteps.to(device=_a )
# empty dt and derivative
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
__SCREAMING_SNAKE_CASE = defaultdict(_a )
def __lowerCAmelCase ( self, _a, _a ) -> int:
# get log sigma
__SCREAMING_SNAKE_CASE = np.log(_a )
# get distribution
__SCREAMING_SNAKE_CASE = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
__SCREAMING_SNAKE_CASE = np.cumsum((dists >= 0), axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
__SCREAMING_SNAKE_CASE = low_idx + 1
__SCREAMING_SNAKE_CASE = log_sigmas[low_idx]
__SCREAMING_SNAKE_CASE = log_sigmas[high_idx]
# interpolate sigmas
__SCREAMING_SNAKE_CASE = (low - log_sigma) / (low - high)
__SCREAMING_SNAKE_CASE = np.clip(_a, 0, 1 )
# transform interpolation to time range
__SCREAMING_SNAKE_CASE = (1 - w) * low_idx + w * high_idx
__SCREAMING_SNAKE_CASE = t.reshape(sigma.shape )
return t
def __lowerCAmelCase ( self, _a, _a ) -> torch.FloatTensor:
__SCREAMING_SNAKE_CASE = in_sigmas[-1].item()
__SCREAMING_SNAKE_CASE = in_sigmas[0].item()
__SCREAMING_SNAKE_CASE = 7.0 # 7.0 is the value used in the paper
__SCREAMING_SNAKE_CASE = np.linspace(0, 1, _a )
__SCREAMING_SNAKE_CASE = sigma_min ** (1 / rho)
__SCREAMING_SNAKE_CASE = sigma_max ** (1 / rho)
__SCREAMING_SNAKE_CASE = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def __lowerCAmelCase ( self ) -> List[Any]:
return self.dt is None
def __lowerCAmelCase ( self, _a, _a, _a, _a = True, ) -> Union[SchedulerOutput, Tuple]:
__SCREAMING_SNAKE_CASE = self.index_for_timestep(_a )
# advance index counter by 1
__SCREAMING_SNAKE_CASE = timestep.cpu().item() if torch.is_tensor(_a ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
__SCREAMING_SNAKE_CASE = self.sigmas[step_index]
__SCREAMING_SNAKE_CASE = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
__SCREAMING_SNAKE_CASE = self.sigmas[step_index - 1]
__SCREAMING_SNAKE_CASE = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
__SCREAMING_SNAKE_CASE = sigma_hat if self.state_in_first_order else sigma_next
__SCREAMING_SNAKE_CASE = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
__SCREAMING_SNAKE_CASE = sigma_hat if self.state_in_first_order else sigma_next
__SCREAMING_SNAKE_CASE = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
__SCREAMING_SNAKE_CASE = model_output
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''' )
if self.config.clip_sample:
__SCREAMING_SNAKE_CASE = pred_original_sample.clamp(
-self.config.clip_sample_range, self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
__SCREAMING_SNAKE_CASE = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
__SCREAMING_SNAKE_CASE = sigma_next - sigma_hat
# store for 2nd order step
__SCREAMING_SNAKE_CASE = derivative
__SCREAMING_SNAKE_CASE = dt
__SCREAMING_SNAKE_CASE = sample
else:
# 2. 2nd order / Heun's method
__SCREAMING_SNAKE_CASE = (sample - pred_original_sample) / sigma_next
__SCREAMING_SNAKE_CASE = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
__SCREAMING_SNAKE_CASE = self.dt
__SCREAMING_SNAKE_CASE = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=_a )
def __lowerCAmelCase ( self, _a, _a, _a, ) -> torch.FloatTensor:
# Make sure sigmas and timesteps have the same device and dtype as original_samples
__SCREAMING_SNAKE_CASE = self.sigmas.to(device=original_samples.device, dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(_a ):
# mps does not support float64
__SCREAMING_SNAKE_CASE = self.timesteps.to(original_samples.device, dtype=torch.floataa )
__SCREAMING_SNAKE_CASE = timesteps.to(original_samples.device, dtype=torch.floataa )
else:
__SCREAMING_SNAKE_CASE = self.timesteps.to(original_samples.device )
__SCREAMING_SNAKE_CASE = timesteps.to(original_samples.device )
__SCREAMING_SNAKE_CASE = [self.index_for_timestep(_a, _a ) for t in timesteps]
__SCREAMING_SNAKE_CASE = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
__SCREAMING_SNAKE_CASE = sigma.unsqueeze(-1 )
__SCREAMING_SNAKE_CASE = original_samples + noise * sigma
return noisy_samples
def __len__( self ) -> Dict:
return self.config.num_train_timesteps
| 214 | 0 |
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def __UpperCAmelCase( lowercase_ , lowercase_=() , lowercase_=None , lowercase_="no" , lowercase_="29500" ):
_lowerCamelCase : str = False
_lowerCamelCase : Any = False
if any(key.startswith('''KAGGLE''' ) for key in os.environ.keys() ):
_lowerCamelCase : int = True
elif "IPython" in sys.modules:
_lowerCamelCase : int = '''google.colab''' in str(sys.modules['''IPython'''].get_ipython() )
try:
_lowerCamelCase : Optional[Any] = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
F"""Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.""" )
if (in_colab or in_kaggle) and (os.environ.get('''TPU_NAME''' , lowercase_ ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'''To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside '''
'''your training function. Restart your notebook and make sure no cells initializes an '''
'''`Accelerator`.''' )
if num_processes is None:
_lowerCamelCase : Any = 8
_lowerCamelCase : List[Any] = PrepareForLaunch(lowercase_ , distributed_type='''TPU''' )
print(F"""Launching a training on {num_processes} TPU cores.""" )
xmp.spawn(lowercase_ , args=lowercase_ , nprocs=lowercase_ , start_method='''fork''' )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print('''Launching training on one GPU.''' )
else:
print('''Launching training on one CPU.''' )
function(*lowercase_ )
else:
if num_processes is None:
raise ValueError(
'''You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.''' )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'''To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized '''
'''inside your training function. Restart your notebook and make sure no cells initializes an '''
'''`Accelerator`.''' )
if torch.cuda.is_initialized():
raise ValueError(
'''To launch a multi-GPU training from your notebook, you need to avoid running any instruction '''
'''using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA '''
'''function.''' )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=lowercase_ , master_addr='''127.0.01''' , master_port=lowercase_ , mixed_precision=lowercase_ ):
_lowerCamelCase : Optional[Any] = PrepareForLaunch(lowercase_ , distributed_type='''MULTI_GPU''' )
print(F"""Launching training on {num_processes} GPUs.""" )
try:
start_processes(lowercase_ , args=lowercase_ , nprocs=lowercase_ , start_method='''fork''' )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
'''CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. '''
'''This likely stems from an outside import causing issues once the `notebook_launcher()` is called. '''
'''Please review your imports and test them when running the `notebook_launcher()` to identify '''
'''which one is problematic.''' ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
_lowerCamelCase : List[Any] = '''1'''
print('''Launching training on MPS.''' )
elif torch.cuda.is_available():
print('''Launching training on one GPU.''' )
else:
print('''Launching training on CPU.''' )
function(*lowercase_ )
def __UpperCAmelCase( lowercase_ , lowercase_=() , lowercase_=2 ):
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=lowercase_ , master_addr='''127.0.01''' , master_port='''29500''' , accelerate_mixed_precision='''no''' , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu='''yes''' , ):
_lowerCamelCase : Tuple = PrepareForLaunch(lowercase_ , debug=lowercase_ )
start_processes(lowercase_ , args=lowercase_ , nprocs=lowercase_ , start_method='''fork''' )
| 114 |
import os
def __UpperCAmelCase( ):
with open(os.path.dirname(lowercase_ ) + '''/p022_names.txt''' ) as file:
_lowerCamelCase : Optional[int] = str(file.readlines()[0] )
_lowerCamelCase : List[Any] = names.replace('''"''' , '''''' ).split(''',''' )
names.sort()
_lowerCamelCase : Optional[int] = 0
_lowerCamelCase : Tuple = 0
for i, name in enumerate(lowercase_ ):
for letter in name:
name_score += ord(lowercase_ ) - 64
total_score += (i + 1) * name_score
_lowerCamelCase : Optional[int] = 0
return total_score
if __name__ == "__main__":
print(solution())
| 114 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCamelCase : Union[str, Any] = {
'''configuration_clipseg''': [
'''CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''CLIPSegConfig''',
'''CLIPSegTextConfig''',
'''CLIPSegVisionConfig''',
],
'''processing_clipseg''': ['''CLIPSegProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : List[Any] = [
'''CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CLIPSegModel''',
'''CLIPSegPreTrainedModel''',
'''CLIPSegTextModel''',
'''CLIPSegVisionModel''',
'''CLIPSegForImageSegmentation''',
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
__UpperCamelCase : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 417 |
'''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
__UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
class lowerCamelCase__ ( snake_case_ ):
"""simple docstring"""
__magic_name__ = ["""pixel_values"""]
def __init__( self , UpperCAmelCase__ = True , UpperCAmelCase__ = None , UpperCAmelCase__ = PILImageResampling.BILINEAR , UpperCAmelCase__ = True , UpperCAmelCase__ = 1 / 2_5_5 , UpperCAmelCase__ = True , UpperCAmelCase__ = None , UpperCAmelCase__ = True , **UpperCAmelCase__ , ) -> None:
super().__init__(**UpperCAmelCase__ )
_A : Tuple = size if size is not None else {'''shortest_edge''': 2_2_4}
_A : Any = get_size_dict(UpperCAmelCase__ , default_to_square=UpperCAmelCase__ )
_A : Tuple = crop_size if crop_size is not None else {'''height''': 2_5_6, '''width''': 2_5_6}
_A : Optional[int] = get_size_dict(UpperCAmelCase__ , param_name='''crop_size''' )
_A : Union[str, Any] = do_resize
_A : int = size
_A : Union[str, Any] = resample
_A : List[str] = do_rescale
_A : Union[str, Any] = rescale_factor
_A : int = do_center_crop
_A : Union[str, Any] = crop_size
_A : int = do_flip_channel_order
def _lowerCamelCase ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = PIL.Image.BILINEAR , UpperCAmelCase__ = None , **UpperCAmelCase__ , ) -> np.ndarray:
_A : Optional[Any] = get_size_dict(UpperCAmelCase__ , default_to_square=UpperCAmelCase__ )
if "shortest_edge" not in size:
raise ValueError(F"""The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}""" )
_A : Any = get_resize_output_image_size(UpperCAmelCase__ , size=size['''shortest_edge'''] , default_to_square=UpperCAmelCase__ )
return resize(UpperCAmelCase__ , size=UpperCAmelCase__ , resample=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def _lowerCamelCase ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = None , **UpperCAmelCase__ , ) -> np.ndarray:
_A : List[Any] = get_size_dict(UpperCAmelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""" )
return center_crop(UpperCAmelCase__ , size=(size['''height'''], size['''width''']) , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def _lowerCamelCase ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = None , **UpperCAmelCase__ , ) -> Optional[int]:
return rescale(UpperCAmelCase__ , scale=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def _lowerCamelCase ( self , UpperCAmelCase__ , UpperCAmelCase__ = None ) -> np.ndarray:
return flip_channel_order(UpperCAmelCase__ , data_format=UpperCAmelCase__ )
def _lowerCamelCase ( self , UpperCAmelCase__ , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = ChannelDimension.FIRST , **UpperCAmelCase__ , ) -> PIL.Image.Image:
_A : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
_A : Any = resample if resample is not None else self.resample
_A : Tuple = do_rescale if do_rescale is not None else self.do_rescale
_A : Optional[int] = rescale_factor if rescale_factor is not None else self.rescale_factor
_A : Optional[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
_A : Union[str, Any] = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
_A : Optional[int] = size if size is not None else self.size
_A : Tuple = get_size_dict(UpperCAmelCase__ , default_to_square=UpperCAmelCase__ )
_A : Any = crop_size if crop_size is not None else self.crop_size
_A : str = get_size_dict(UpperCAmelCase__ , param_name='''crop_size''' )
_A : Union[str, Any] = make_list_of_images(UpperCAmelCase__ )
if not valid_images(UpperCAmelCase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
# All transformations expect numpy arrays.
_A : Tuple = [to_numpy_array(UpperCAmelCase__ ) for image in images]
if do_resize:
_A : List[str] = [self.resize(image=UpperCAmelCase__ , size=UpperCAmelCase__ , resample=UpperCAmelCase__ ) for image in images]
if do_center_crop:
_A : Union[str, Any] = [self.center_crop(image=UpperCAmelCase__ , size=UpperCAmelCase__ ) for image in images]
if do_rescale:
_A : str = [self.rescale(image=UpperCAmelCase__ , scale=UpperCAmelCase__ ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
_A : Dict = [self.flip_channel_order(image=UpperCAmelCase__ ) for image in images]
_A : int = [to_channel_dimension_format(UpperCAmelCase__ , UpperCAmelCase__ ) for image in images]
_A : List[Any] = {'''pixel_values''': images}
return BatchFeature(data=UpperCAmelCase__ , tensor_type=UpperCAmelCase__ )
def _lowerCamelCase ( self , UpperCAmelCase__ , UpperCAmelCase__ = None ) -> Optional[int]:
_A : Optional[Any] = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(UpperCAmelCase__ ) != len(UpperCAmelCase__ ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(UpperCAmelCase__ ):
_A : int = target_sizes.numpy()
_A : Optional[int] = []
for idx in range(len(UpperCAmelCase__ ) ):
_A : Optional[int] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=UpperCAmelCase__ )
_A : Tuple = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(UpperCAmelCase__ )
else:
_A : Any = logits.argmax(dim=1 )
_A : int = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 417 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
a : Union[str, Any] = {
'configuration_vision_encoder_decoder': ['VisionEncoderDecoderConfig', 'VisionEncoderDecoderOnnxConfig']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : str = ['VisionEncoderDecoderModel']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : int = ['TFVisionEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Union[str, Any] = ['FlaxVisionEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
a : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 640 |
'''simple docstring'''
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
snake_case_ = [False] * len(__UpperCAmelCase )
snake_case_ = []
queue.append(__UpperCAmelCase )
snake_case_ = True
while queue:
snake_case_ = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(__UpperCAmelCase )
snake_case_ = True
snake_case_ = u
return visited[t]
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
snake_case_ = [-1] * (len(__UpperCAmelCase ))
snake_case_ = 0
while bfs(__UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ):
snake_case_ = float('''Inf''' )
snake_case_ = sink
while s != source:
# Find the minimum value in select path
snake_case_ = min(__UpperCAmelCase, graph[parent[s]][s] )
snake_case_ = parent[s]
max_flow += path_flow
snake_case_ = sink
while v != source:
snake_case_ = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
snake_case_ = parent[v]
return max_flow
a : List[Any] = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
a ,a : int = 0, 5
print(ford_fulkerson(graph, source, sink))
| 640 | 1 |
from __future__ import annotations
import os
import tempfile
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import is_tensorflow_text_available, is_tf_available
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
from ..test_modeling_tf_common import floats_tensor
from .test_framework_agnostic import GenerationIntegrationTestsMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
AutoTokenizer,
TFAutoModelForCausalLM,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSpeechSeqaSeq,
TFAutoModelForVisionaSeq,
TFBartForConditionalGeneration,
TFLogitsProcessorList,
TFMinLengthLogitsProcessor,
tf_top_k_top_p_filtering,
)
if is_tensorflow_text_available():
import tensorflow_text as text
@require_tf
class __snake_case ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = tf.convert_to_tensor(
[
[
8.2220991, # 3rd highest value; idx. 0
-0.5620044,
5.23229752,
4.0386393,
-6.8798378,
-0.54785802,
-3.2012153,
2.92777176,
1.88171953,
7.35341276, # 5th highest value; idx. 9
8.43207833, # 2nd highest value; idx. 10
-9.85711836,
-5.96209236,
-1.13039161,
-7.1115294,
-0.8369633,
-5.3186408,
7.06427407,
0.81369344,
-0.82023817,
-5.9179796,
0.58813443,
-6.99778438,
4.71551189,
-0.18771637,
7.44020759, # 4th highest value; idx. 25
9.38450987, # 1st highest value; idx. 26
2.12662941,
-9.32562038,
2.35652522,
], # cummulative prob of 5 highest values <= 0.6
[
0.58425518,
4.53139238,
-5.57510464,
-6.28030699,
-7.19529503,
-4.02122551,
1.39337037,
-6.06707057,
1.59480517,
-9.643119,
0.03907799,
0.67231762,
-8.88206726,
6.27115922, # 4th highest value; idx. 13
2.28520723,
4.82767506,
4.30421368,
8.8275313, # 2nd highest value; idx. 17
5.44029958, # 5th highest value; idx. 18
-4.4735794,
7.38579536, # 3rd highest value; idx. 20
-2.91051663,
2.61946077,
-2.5674762,
-9.48959302,
-4.02922645,
-1.35416918,
9.67702323, # 1st highest value; idx. 27
-5.89478553,
1.85370467,
], # cummulative prob of 5 highest values <= 0.6
] ,dtype=tf.floataa ,)
lowerCAmelCase__ = tf.convert_to_tensor(
[[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]] ,dtype=tf.intaa ,) # expected non filtered idx as noted above
lowerCAmelCase__ = tf.convert_to_tensor(
[8.222099, 7.3534126, 8.432078, 7.4402075, 9.38451, 6.271159, 8.827531, 5.4402995, 7.3857956, 9.677023] ,dtype=tf.floataa ,) # expected non filtered values as noted above
lowerCAmelCase__ = tf_top_k_top_p_filtering(a_ ,top_k=10 ,top_p=0.6 ,min_tokens_to_keep=4 )
lowerCAmelCase__ = output[output != -float('inf' )]
lowerCAmelCase__ = tf.cast(
tf.where(tf.not_equal(a_ ,tf.constant(-float('inf' ) ,dtype=tf.floataa ) ) ) ,dtype=tf.intaa ,)
tf.debugging.assert_near(a_ ,a_ ,rtol=1e-1_2 )
tf.debugging.assert_equal(a_ ,a_ )
@require_tf
class __snake_case ( unittest.TestCase , SCREAMING_SNAKE_CASE ):
# setting framework_dependent_parameters needs to be gated, just like its contents' imports
if is_tf_available():
SCREAMING_SNAKE_CASE__ = {
'AutoModelForCausalLM': TFAutoModelForCausalLM,
'AutoModelForSpeechSeq2Seq': TFAutoModelForSpeechSeqaSeq,
'AutoModelForSeq2SeqLM': TFAutoModelForSeqaSeqLM,
'AutoModelForVision2Seq': TFAutoModelForVisionaSeq,
'LogitsProcessorList': TFLogitsProcessorList,
'MinLengthLogitsProcessor': TFMinLengthLogitsProcessor,
'create_tensor_fn': tf.convert_to_tensor,
'floats_tensor': floats_tensor,
'return_tensors': 'tf',
}
@slow
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
# TF-only test: tf.saved_model export
lowerCAmelCase__ = TFAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
lowerCAmelCase__ = 2
lowerCAmelCase__ = 2
class __snake_case ( tf.Module ):
def __init__( self ,a_ ):
"""simple docstring"""
super(a_ ,self ).__init__()
lowerCAmelCase__ = model
@tf.function(
input_signature=(
tf.TensorSpec((None, input_length) ,tf.intaa ,name='input_ids' ),
tf.TensorSpec((None, input_length) ,tf.intaa ,name='attention_mask' ),
) ,jit_compile=a_ ,)
def SCREAMING_SNAKE_CASE_ ( self ,a_ ,a_ ):
"""simple docstring"""
lowerCAmelCase__ = self.model.generate(
input_ids=a_ ,attention_mask=a_ ,max_new_tokens=a_ ,return_dict_in_generate=a_ ,)
return {"sequences": outputs["sequences"]}
lowerCAmelCase__ = [[2, 0], [102, 103]]
lowerCAmelCase__ = [[1, 0], [1, 1]]
lowerCAmelCase__ = DummyModel(model=a_ )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(a_ ,a_ ,signatures={'serving_default': dummy_model.serving} )
lowerCAmelCase__ = tf.saved_model.load(a_ ).signatures['serving_default']
for batch_size in range(1 ,len(a_ ) + 1 ):
lowerCAmelCase__ = {
'input_ids': tf.constant(dummy_input_ids[:batch_size] ),
'attention_mask': tf.constant(dummy_attention_masks[:batch_size] ),
}
lowerCAmelCase__ = serving_func(**a_ )['sequences']
lowerCAmelCase__ = test_model.generate(**a_ ,max_new_tokens=a_ )
tf.debugging.assert_equal(a_ ,a_ )
@slow
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
# TF-only test: tf.saved_model export
lowerCAmelCase__ = TFAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
lowerCAmelCase__ = 1
lowerCAmelCase__ = 2
class __snake_case ( tf.Module ):
def __init__( self ,a_ ):
"""simple docstring"""
super(a_ ,self ).__init__()
lowerCAmelCase__ = model
@tf.function(
input_signature=(
tf.TensorSpec((batch_size, None) ,tf.intaa ,name='input_ids' ),
tf.TensorSpec((batch_size, None) ,tf.intaa ,name='attention_mask' ),
) ,jit_compile=a_ ,)
def SCREAMING_SNAKE_CASE_ ( self ,a_ ,a_ ):
"""simple docstring"""
lowerCAmelCase__ = self.model.generate(
input_ids=a_ ,attention_mask=a_ ,max_new_tokens=a_ ,return_dict_in_generate=a_ ,)
return {"sequences": outputs["sequences"]}
lowerCAmelCase__ = [[2], [102, 103]]
lowerCAmelCase__ = [[1], [1, 1]]
lowerCAmelCase__ = DummyModel(model=a_ )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(a_ ,a_ ,signatures={'serving_default': dummy_model.serving} )
lowerCAmelCase__ = tf.saved_model.load(a_ ).signatures['serving_default']
for input_row in range(len(a_ ) ):
lowerCAmelCase__ = {
'input_ids': tf.constant([dummy_input_ids[input_row]] ),
'attention_mask': tf.constant([dummy_attention_masks[input_row]] ),
}
lowerCAmelCase__ = serving_func(**a_ )['sequences']
lowerCAmelCase__ = test_model.generate(**a_ ,max_new_tokens=a_ )
tf.debugging.assert_equal(a_ ,a_ )
@slow
@require_tensorflow_text
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
# TF-only test: tf.saved_model export
with tempfile.TemporaryDirectory() as tmp_dir:
# file needed to load the TF tokenizer
hf_hub_download(repo_id='google/flan-t5-small' ,filename='spiece.model' ,local_dir=a_ )
class __snake_case ( tf.keras.layers.Layer ):
def __init__( self ):
"""simple docstring"""
super().__init__()
lowerCAmelCase__ = text.SentencepieceTokenizer(
model=tf.io.gfile.GFile(os.path.join(a_ ,'spiece.model' ) ,'rb' ).read() )
lowerCAmelCase__ = TFAutoModelForSeqaSeqLM.from_pretrained('hf-internal-testing/tiny-random-t5' )
def SCREAMING_SNAKE_CASE_ ( self ,a_ ,*a_ ,**a_ ):
"""simple docstring"""
lowerCAmelCase__ = self.tokenizer.tokenize(a_ )
lowerCAmelCase__ , lowerCAmelCase__ = text.pad_model_inputs(
a_ ,max_seq_length=64 ,pad_value=self.model.config.pad_token_id )
lowerCAmelCase__ = self.model.generate(input_ids=a_ ,attention_mask=a_ )
return self.tokenizer.detokenize(a_ )
lowerCAmelCase__ = CompleteSentenceTransformer()
lowerCAmelCase__ = tf.keras.layers.Input(shape=(1,) ,dtype=tf.string ,name='inputs' )
lowerCAmelCase__ = complete_model(a_ )
lowerCAmelCase__ = tf.keras.Model(a_ ,a_ )
keras_model.save(a_ )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
# Has PT equivalent: this test relies on random sampling
lowerCAmelCase__ = {
'do_sample': True,
'num_beams': 1,
'top_p': 0.7,
'top_k': 10,
'temperature': 0.7,
}
lowerCAmelCase__ = 14
lowerCAmelCase__ = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
lowerCAmelCase__ = 'Hello, my dog is cute and'
lowerCAmelCase__ = tokenizer(a_ ,return_tensors='tf' )
lowerCAmelCase__ = TFAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
lowerCAmelCase__ = 638
# forces the generation to happen on CPU, to avoid GPU-related quirks
with tf.device(':/CPU:0' ):
tf.random.set_seed(0 )
lowerCAmelCase__ = model.generate(**a_ ,eos_token_id=a_ ,**a_ )
self.assertTrue(expectation == len(generated_tokens[0] ) )
lowerCAmelCase__ = [638, 198]
with tf.device(':/CPU:0' ):
tf.random.set_seed(0 )
lowerCAmelCase__ = model.generate(**a_ ,eos_token_id=a_ ,**a_ )
self.assertTrue(expectation == len(generated_tokens[0] ) )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
# Has PT equivalent: ample use of framework-specific code
lowerCAmelCase__ = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-bart' )
lowerCAmelCase__ = 'Hugging Face is a technology company based in New York and Paris.'
lowerCAmelCase__ = bart_tokenizer(a_ ,return_tensors='tf' ).input_ids
lowerCAmelCase__ = TFBartForConditionalGeneration.from_pretrained('hf-internal-testing/tiny-random-bart' )
lowerCAmelCase__ = bart_model.generate(a_ ).numpy()
class __snake_case ( SCREAMING_SNAKE_CASE ):
def SCREAMING_SNAKE_CASE_ ( self ,a_ ,a_=None ,**a_ ):
"""simple docstring"""
return super().call(a_ ,**a_ )
lowerCAmelCase__ = FakeBart.from_pretrained('hf-internal-testing/tiny-random-bart' )
lowerCAmelCase__ = bart_model.generate(a_ ,foo='bar' ).numpy()
self.assertTrue(np.array_equal(a_ ,a_ ) )
class __snake_case ( bart_model.model.encoder.__class__ ):
def SCREAMING_SNAKE_CASE_ ( self ,a_ ,**a_ ):
"""simple docstring"""
return super().call(a_ ,**a_ )
lowerCAmelCase__ = FakeEncoder(bart_model.config ,bart_model.model.shared )
lowerCAmelCase__ = fake_encoder
# Normal generation still works (the output will be different because the encoder weights are different)
lowerCAmelCase__ = bart_model.generate(a_ ).numpy()
with self.assertRaises(a_ ):
# FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo"
bart_model.generate(a_ ,foo='bar' )
| 604 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : Any = logging.get_logger(__name__)
_lowerCAmelCase : Tuple = {}
class __snake_case ( SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE__ = 'llama'
SCREAMING_SNAKE_CASE__ = ['past_key_values']
def __init__( self ,a_=3_2000 ,a_=4096 ,a_=1_1008 ,a_=32 ,a_=32 ,a_=None ,a_="silu" ,a_=2048 ,a_=0.02 ,a_=1e-6 ,a_=True ,a_=0 ,a_=1 ,a_=2 ,a_=1 ,a_=False ,a_=None ,**a_ ,):
"""simple docstring"""
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = max_position_embeddings
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = num_key_value_heads
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = rms_norm_eps
lowerCAmelCase__ = pretraining_tp
lowerCAmelCase__ = use_cache
lowerCAmelCase__ = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=a_ ,bos_token_id=a_ ,eos_token_id=a_ ,tie_word_embeddings=a_ ,**a_ ,)
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling ,a_ ) or len(self.rope_scaling ) != 2:
raise ValueError(
'`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '
f'got {self.rope_scaling}' )
lowerCAmelCase__ = self.rope_scaling.get('type' ,a_ )
lowerCAmelCase__ = self.rope_scaling.get('factor' ,a_ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}' )
if rope_scaling_factor is None or not isinstance(a_ ,a_ ) or rope_scaling_factor <= 1.0:
raise ValueError(f'`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}' )
| 604 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase :Union[str, Any] = logging.get_logger(__name__)
lowerCamelCase :Optional[Any] = {
"uw-madison/mra-base-512-4": "https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json",
}
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Dict = 'mra'
def __init__(self , lowercase=50265 , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=3072 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=512 , lowercase=1 , lowercase=0.02 , lowercase=1E-5 , lowercase="absolute" , lowercase=4 , lowercase="full" , lowercase=0 , lowercase=0 , lowercase=1 , lowercase=0 , lowercase=2 , **lowercase , ):
super().__init__(pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ )
A_ : Dict = vocab_size
A_ : Optional[Any] = max_position_embeddings
A_ : Any = hidden_size
A_ : str = num_hidden_layers
A_ : Dict = num_attention_heads
A_ : int = intermediate_size
A_ : int = hidden_act
A_ : Optional[int] = hidden_dropout_prob
A_ : Union[str, Any] = attention_probs_dropout_prob
A_ : Any = initializer_range
A_ : List[Any] = type_vocab_size
A_ : str = layer_norm_eps
A_ : Tuple = position_embedding_type
A_ : List[Any] = block_per_row
A_ : Optional[int] = approx_mode
A_ : Optional[Any] = initial_prior_first_n_blocks
A_ : Union[str, Any] = initial_prior_diagonal_n_blocks | 667 |
import re
from filelock import FileLock
try:
import nltk
a_ : Optional[Any] = True
except (ImportError, ModuleNotFoundError):
a_ : Union[str, Any] = False
if NLTK_AVAILABLE:
with FileLock(".lock") as lock:
nltk.download("punkt", quiet=True)
def __lowerCAmelCase ( _UpperCamelCase : str ) -> str:
'''simple docstring'''
re.sub('<n>' , '' , _UpperCamelCase ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(_UpperCamelCase ) )
| 439 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_lowerCAmelCase = {"configuration_encoder_decoder": ["EncoderDecoderConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = ["EncoderDecoderModel"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = ["TFEncoderDecoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = ["FlaxEncoderDecoderModel"]
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 245 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCAmelCase = {
"configuration_rembert": ["REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "RemBertConfig", "RemBertOnnxConfig"]
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = ["RemBertTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = ["RemBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
"REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"RemBertForCausalLM",
"RemBertForMaskedLM",
"RemBertForMultipleChoice",
"RemBertForQuestionAnswering",
"RemBertForSequenceClassification",
"RemBertForTokenClassification",
"RemBertLayer",
"RemBertModel",
"RemBertPreTrainedModel",
"load_tf_weights_in_rembert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
"TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRemBertForCausalLM",
"TFRemBertForMaskedLM",
"TFRemBertForMultipleChoice",
"TFRemBertForQuestionAnswering",
"TFRemBertForSequenceClassification",
"TFRemBertForTokenClassification",
"TFRemBertLayer",
"TFRemBertModel",
"TFRemBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 245 | 1 |
'''simple docstring'''
import comet # From: unbabel-comet
import torch
import datasets
UpperCamelCase__ : List[Any] = datasets.logging.get_logger(__name__)
UpperCamelCase__ : Tuple = '''\
@inproceedings{rei-EtAl:2020:WMT,
author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},
title = {Unbabel\'s Participation in the WMT20 Metrics Shared Task},
booktitle = {Proceedings of the Fifth Conference on Machine Translation},
month = {November},
year = {2020},
address = {Online},
publisher = {Association for Computational Linguistics},
pages = {909--918},
}
@inproceedings{rei-etal-2020-comet,
title = "{COMET}: A Neural Framework for {MT} Evaluation",
author = "Rei, Ricardo and
Stewart, Craig and
Farinha, Ana C and
Lavie, Alon",
booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)",
month = nov,
year = "2020",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/2020.emnlp-main.213",
pages = "2685--2702",
}
'''
UpperCamelCase__ : Optional[int] = '''\
Crosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA\'s or MQM).
With the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.
See the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.
'''
UpperCamelCase__ : int = '''
COMET score.
Args:
`sources` (list of str): Source sentences
`predictions` (list of str): candidate translations
`references` (list of str): reference translations
`cuda` (bool): If set to True, runs COMET using GPU
`show_progress` (bool): Shows progress
`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.
Returns:
`samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.
`scores`: List of scores.
Examples:
>>> comet_metric = datasets.load_metric(\'comet\')
>>> # comet_metric = load_metric(\'comet\', \'wmt20-comet-da\') # you can also choose which model to use
>>> source = ["Dem Feuer konnte Einhalt geboten werden", "Schulen und Kindergärten wurden eröffnet."]
>>> hypothesis = ["The fire could be stopped", "Schools and kindergartens were open"]
>>> reference = ["They were able to control the fire.", "Schools and kindergartens opened"]
>>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)
>>> print([round(v, 2) for v in results["scores"]])
[0.19, 0.92]
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _UpperCamelCase ( datasets.Metric ):
'''simple docstring'''
def UpperCamelCase__ ( self : Optional[int] ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""https://unbabel.github.io/COMET/html/index.html""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""sources""": datasets.Value("""string""" , id="""sequence""" ),
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/Unbabel/COMET"""] , reference_urls=[
"""https://github.com/Unbabel/COMET""",
"""https://www.aclweb.org/anthology/2020.emnlp-main.213/""",
"""http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6""",
] , )
def UpperCamelCase__ ( self : Dict , lowerCAmelCase__ : Any ):
"""simple docstring"""
if self.config_name == "default":
__SCREAMING_SNAKE_CASE : Any = comet.load_from_checkpoint(comet.download_model("""wmt20-comet-da""" ) )
else:
__SCREAMING_SNAKE_CASE : Dict = comet.load_from_checkpoint(comet.download_model(self.config_name ) )
def UpperCamelCase__ ( self : str , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Union[str, Any]=None , lowerCAmelCase__ : Tuple=False ):
"""simple docstring"""
if gpus is None:
__SCREAMING_SNAKE_CASE : str = 1 if torch.cuda.is_available() else 0
__SCREAMING_SNAKE_CASE : Optional[int] = {"""src""": sources, """mt""": predictions, """ref""": references}
__SCREAMING_SNAKE_CASE : int = [dict(zip(lowerCAmelCase__ , lowerCAmelCase__ ) ) for t in zip(*data.values() )]
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = self.scorer.predict(lowerCAmelCase__ , gpus=lowerCAmelCase__ , progress_bar=lowerCAmelCase__ )
return {"mean_score": mean_score, "scores": scores} | 578 |
'''simple docstring'''
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def lowerCAmelCase_ ( _lowerCamelCase: str , _lowerCamelCase: float | Decimal , _lowerCamelCase: float = 10**-10 ):
__SCREAMING_SNAKE_CASE : List[Any] = a
while True:
__SCREAMING_SNAKE_CASE : Optional[Any] = Decimal(_lowerCamelCase ) - (
Decimal(eval(_lowerCamelCase ) ) / Decimal(eval(str(diff(_lowerCamelCase ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(_lowerCamelCase ) ) < precision: # noqa: S307
return float(_lowerCamelCase )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f"The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}")
# Find root of polynomial
print(f"The root of x**2 - 5*x + 2 = 0 is {newton_raphson('x**2 - 5*x + 2', 0.4)}")
# Find Square Root of 5
print(f"The root of log(x) - 1 = 0 is {newton_raphson('log(x) - 1', 2)}")
# Exponential Roots
print(f"The root of exp(x) - 1 = 0 is {newton_raphson('exp(x) - 1', 0)}") | 578 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {
'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json',
'YituTech/conv-bert-medium-small': (
'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json'
),
'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json',
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = "convbert"
def __init__( self :List[Any] , __A :List[Any]=3_0522 , __A :int=768 , __A :Dict=12 , __A :int=12 , __A :int=3072 , __A :Any="gelu" , __A :List[str]=0.1 , __A :Any=0.1 , __A :Union[str, Any]=512 , __A :List[str]=2 , __A :Optional[int]=0.0_2 , __A :Union[str, Any]=1E-12 , __A :Tuple=1 , __A :Tuple=0 , __A :int=2 , __A :Union[str, Any]=768 , __A :int=2 , __A :str=9 , __A :Tuple=1 , __A :Optional[int]=None , **__A :Optional[Any] , ) -> int:
"""simple docstring"""
super().__init__(
pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A , )
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = type_vocab_size
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = layer_norm_eps
SCREAMING_SNAKE_CASE__ = embedding_size
SCREAMING_SNAKE_CASE__ = head_ratio
SCREAMING_SNAKE_CASE__ = conv_kernel_size
SCREAMING_SNAKE_CASE__ = num_groups
SCREAMING_SNAKE_CASE__ = classifier_dropout
class UpperCamelCase_ ( UpperCamelCase__ ):
@property
def _snake_case ( self :Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE__ = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
SCREAMING_SNAKE_CASE__ = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] ) | 59 |
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Any ):
return {key.lstrip("""-""" ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )}
def SCREAMING_SNAKE_CASE__ ( ):
SCREAMING_SNAKE_CASE__ = ArgumentParser(
"""HuggingFace Datasets CLI tool""" , usage="""datasets-cli <command> [<args>]""" , allow_abbrev=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = parser.add_subparsers(help="""datasets-cli command helpers""" )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(UpperCamelCase__ )
EnvironmentCommand.register_subcommand(UpperCamelCase__ )
TestCommand.register_subcommand(UpperCamelCase__ )
RunBeamCommand.register_subcommand(UpperCamelCase__ )
DummyDataCommand.register_subcommand(UpperCamelCase__ )
# Parse args
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = parser.parse_known_args()
if not hasattr(UpperCamelCase__ , """func""" ):
parser.print_help()
exit(1 )
SCREAMING_SNAKE_CASE__ = parse_unknown_args(UpperCamelCase__ )
# Run
SCREAMING_SNAKE_CASE__ = args.func(UpperCamelCase__ , **UpperCamelCase__ )
service.run()
if __name__ == "__main__":
main() | 59 | 1 |
import math
UpperCAmelCase : str = 10
UpperCAmelCase : int = 7
UpperCAmelCase : Any = BALLS_PER_COLOUR * NUM_COLOURS
def __lowerCamelCase ( lowerCamelCase__ : int = 20 ):
'''simple docstring'''
lowerCamelCase = math.comb(lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase = math.comb(NUM_BALLS - BALLS_PER_COLOUR , lowerCamelCase__ )
lowerCamelCase = NUM_COLOURS * (1 - missing_colour / total)
return f'{result:.9f}'
if __name__ == "__main__":
print(solution(20))
| 457 |
import itertools
import random
import unittest
import numpy as np
from transformers import is_speech_available
from transformers.testing_utils import require_torch, require_torchaudio
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import SpeechaTextFeatureExtractor
UpperCAmelCase : Any = random.Random()
def __lowerCamelCase ( lowerCamelCase__ : int , lowerCamelCase__ : Union[str, Any]=1.0 , lowerCamelCase__ : Union[str, Any]=None , lowerCamelCase__ : int=None ):
'''simple docstring'''
if rng is None:
lowerCamelCase = global_rng
lowerCamelCase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , A , A=7 , A=4_00 , A=20_00 , A=24 , A=24 , A=0.0 , A=1_60_00 , A=True , A=True , ) -> str:
'''simple docstring'''
lowerCamelCase = parent
lowerCamelCase = batch_size
lowerCamelCase = min_seq_length
lowerCamelCase = max_seq_length
lowerCamelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowerCamelCase = feature_size
lowerCamelCase = num_mel_bins
lowerCamelCase = padding_value
lowerCamelCase = sampling_rate
lowerCamelCase = return_attention_mask
lowerCamelCase = do_normalize
def __A ( self ) -> List[str]:
'''simple docstring'''
return {
"feature_size": self.feature_size,
"num_mel_bins": self.num_mel_bins,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def __A ( self , A=False , A=False ) -> Tuple:
'''simple docstring'''
def _flatten(A ):
return list(itertools.chain(*A ) )
if equal_length:
lowerCamelCase = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
lowerCamelCase = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowerCamelCase = [np.asarray(A ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __lowercase ( a_ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : str = SpeechaTextFeatureExtractor if is_speech_available() else None
def __A ( self ) -> int:
'''simple docstring'''
lowerCamelCase = SpeechaTextFeatureExtractionTester(self )
def __A ( self , A ) -> List[Any]:
'''simple docstring'''
self.assertTrue(np.all(np.mean(A , axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(A , axis=0 ) - 1 ) < 1e-3 ) )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowerCamelCase = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
lowerCamelCase = [np.asarray(A ) for speech_input in speech_inputs]
# Test feature size
lowerCamelCase = feature_extractor(A , padding=A , return_tensors="""np""" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size )
# Test not batched input
lowerCamelCase = feature_extractor(speech_inputs[0] , return_tensors="""np""" ).input_features
lowerCamelCase = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" ).input_features
self.assertTrue(np.allclose(A , A , atol=1e-3 ) )
# Test batched
lowerCamelCase = feature_extractor(A , return_tensors="""np""" ).input_features
lowerCamelCase = feature_extractor(A , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(A , A ):
self.assertTrue(np.allclose(A , A , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
lowerCamelCase = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)]
lowerCamelCase = np.asarray(A )
lowerCamelCase = feature_extractor(A , return_tensors="""np""" ).input_features
lowerCamelCase = feature_extractor(A , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(A , A ):
self.assertTrue(np.allclose(A , A , atol=1e-3 ) )
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
lowerCamelCase = ["""longest""", """max_length""", """do_not_pad"""]
lowerCamelCase = [None, 16, None]
for max_length, padding in zip(A , A ):
lowerCamelCase = feature_extractor(
A , padding=A , max_length=A , return_attention_mask=A )
lowerCamelCase = inputs.input_features
lowerCamelCase = inputs.attention_mask
lowerCamelCase = [np.sum(A ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def __A ( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
lowerCamelCase = ["""longest""", """max_length""", """do_not_pad"""]
lowerCamelCase = [None, 16, None]
for max_length, padding in zip(A , A ):
lowerCamelCase = feature_extractor(
A , max_length=A , padding=A , return_tensors="""np""" , return_attention_mask=A )
lowerCamelCase = inputs.input_features
lowerCamelCase = inputs.attention_mask
lowerCamelCase = [np.sum(A ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def __A ( self ) -> int:
'''simple docstring'''
lowerCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
lowerCamelCase = feature_extractor(
A , padding="""max_length""" , max_length=4 , truncation=A , return_tensors="""np""" , return_attention_mask=A , )
lowerCamelCase = inputs.input_features
lowerCamelCase = inputs.attention_mask
lowerCamelCase = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1] )
self._check_zero_mean_unit_variance(input_features[2] )
def __A ( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
lowerCamelCase = feature_extractor(
A , padding="""longest""" , max_length=4 , truncation=A , return_tensors="""np""" , return_attention_mask=A , )
lowerCamelCase = inputs.input_features
lowerCamelCase = inputs.attention_mask
lowerCamelCase = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 4, 24) )
lowerCamelCase = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
lowerCamelCase = feature_extractor(
A , padding="""longest""" , max_length=16 , truncation=A , return_tensors="""np""" , return_attention_mask=A , )
lowerCamelCase = inputs.input_features
lowerCamelCase = inputs.attention_mask
lowerCamelCase = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 6, 24) )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
import torch
lowerCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase = np.random.rand(1_00 , 32 ).astype(np.floataa )
lowerCamelCase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
lowerCamelCase = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
lowerCamelCase = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def __A ( self , A ) -> Any:
'''simple docstring'''
from datasets import load_dataset
lowerCamelCase = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
lowerCamelCase = ds.sort("""id""" ).select(range(A ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def __A ( self ) -> Any:
'''simple docstring'''
lowerCamelCase = np.array([
-1.5745, -1.7713, -1.7020, -1.6069, -1.2250, -1.1105, -0.9072, -0.8241,
-1.2310, -0.8098, -0.3320, -0.4101, -0.7985, -0.4996, -0.8213, -0.9128,
-1.0420, -1.1286, -1.0440, -0.7999, -0.8405, -1.2275, -1.5443, -1.4625,
] )
# fmt: on
lowerCamelCase = self._load_datasamples(1 )
lowerCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase = feature_extractor(A , return_tensors="""pt""" ).input_features
self.assertEquals(input_features.shape , (1, 5_84, 24) )
self.assertTrue(np.allclose(input_features[0, 0, :30] , A , atol=1e-4 ) )
| 457 | 1 |
'''simple docstring'''
import operator
def __snake_case (__UpperCAmelCase , __UpperCAmelCase = False , __UpperCAmelCase = None ):
"""simple docstring"""
lowerCamelCase_ : List[Any] = operator.lt if reverse else operator.gt
lowerCamelCase_ : Union[str, Any] = solution or []
if not arr:
return solution
lowerCamelCase_ : Union[str, Any] = [arr.pop(0 )]
for i, item in enumerate(snake_case_ ):
if _operator(snake_case_ , sublist[-1] ):
sublist.append(snake_case_ )
arr.pop(snake_case_ )
# merging sublist into solution list
if not solution:
solution.extend(snake_case_ )
else:
while sublist:
lowerCamelCase_ : Optional[Any] = sublist.pop(0 )
for i, xx in enumerate(snake_case_ ):
if not _operator(snake_case_ , snake_case_ ):
solution.insert(snake_case_ , snake_case_ )
break
else:
solution.append(snake_case_ )
strand_sort(snake_case_ , snake_case_ , snake_case_ )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 719 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCAmelCase__ ( _lowerCAmelCase ,_lowerCAmelCase ,unittest.TestCase ):
A = StableDiffusionSAGPipeline
A = TEXT_TO_IMAGE_PARAMS
A = TEXT_TO_IMAGE_BATCH_PARAMS
A = TEXT_TO_IMAGE_IMAGE_PARAMS
A = TEXT_TO_IMAGE_IMAGE_PARAMS
A = False
def __UpperCamelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
torch.manual_seed(0 )
lowerCamelCase_ : Any = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
lowerCamelCase_ : int = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=UpperCamelCase_ , set_alpha_to_one=UpperCamelCase_ , )
torch.manual_seed(0 )
lowerCamelCase_ : List[str] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
lowerCamelCase_ : str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
lowerCamelCase_ : Dict = CLIPTextModel(UpperCamelCase_ )
lowerCamelCase_ : List[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
lowerCamelCase_ : Any = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def __UpperCamelCase ( self : List[str] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Union[str, Any]=0 ) -> Tuple:
"""simple docstring"""
if str(UpperCamelCase_ ).startswith('''mps''' ):
lowerCamelCase_ : Any = torch.manual_seed(UpperCamelCase_ )
else:
lowerCamelCase_ : str = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
lowerCamelCase_ : Tuple = {
'''prompt''': '''.''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 1.0,
'''sag_scale''': 1.0,
'''output_type''': '''numpy''',
}
return inputs
def __UpperCamelCase ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
def __UpperCamelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : List[Any] ) -> str:
"""simple docstring"""
lowerCamelCase_ : str = StableDiffusionSAGPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' )
lowerCamelCase_ : int = sag_pipe.to(UpperCamelCase_ )
sag_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
lowerCamelCase_ : str = '''.'''
lowerCamelCase_ : int = torch.manual_seed(0 )
lowerCamelCase_ : int = sag_pipe(
[prompt] , generator=UpperCamelCase_ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='''np''' )
lowerCamelCase_ : Dict = output.images
lowerCamelCase_ : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowerCamelCase_ : Tuple = np.array([0.1568, 0.1738, 0.1695, 0.1693, 0.1507, 0.1705, 0.1547, 0.1751, 0.1949] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2
def __UpperCamelCase ( self : Dict ) -> Dict:
"""simple docstring"""
lowerCamelCase_ : List[Any] = StableDiffusionSAGPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
lowerCamelCase_ : Optional[Any] = sag_pipe.to(UpperCamelCase_ )
sag_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
lowerCamelCase_ : Tuple = '''.'''
lowerCamelCase_ : Tuple = torch.manual_seed(0 )
lowerCamelCase_ : List[Any] = sag_pipe(
[prompt] , generator=UpperCamelCase_ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='''np''' )
lowerCamelCase_ : Optional[int] = output.images
lowerCamelCase_ : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowerCamelCase_ : Dict = np.array([0.3459, 0.2876, 0.2537, 0.3002, 0.2671, 0.2160, 0.3026, 0.2262, 0.2371] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2
def __UpperCamelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ : Any = StableDiffusionSAGPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
lowerCamelCase_ : Optional[Any] = sag_pipe.to(UpperCamelCase_ )
sag_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
lowerCamelCase_ : List[str] = '''.'''
lowerCamelCase_ : Optional[Any] = torch.manual_seed(0 )
lowerCamelCase_ : int = sag_pipe(
[prompt] , width=768 , height=512 , generator=UpperCamelCase_ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='''np''' , )
lowerCamelCase_ : Union[str, Any] = output.images
assert image.shape == (1, 512, 768, 3)
| 418 | 0 |
import argparse
import torch
from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : int ):
# Initialise PyTorch model
UpperCamelCase_ : int = FunnelConfig.from_json_file(_SCREAMING_SNAKE_CASE )
print(f'''Building PyTorch model from configuration: {config}''' )
UpperCamelCase_ : int = FunnelBaseModel(_SCREAMING_SNAKE_CASE ) if base_model else FunnelModel(_SCREAMING_SNAKE_CASE )
# Load weights from tf checkpoint
load_tf_weights_in_funnel(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--base_model", action="store_true", help="Whether you want just the base model (no decoder) or not."
)
SCREAMING_SNAKE_CASE : Optional[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model
)
| 635 | import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_funnel import FunnelTokenizer
SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Dict = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
SCREAMING_SNAKE_CASE : Dict = [
"small",
"small-base",
"medium",
"medium-base",
"intermediate",
"intermediate-base",
"large",
"large-base",
"xlarge",
"xlarge-base",
]
SCREAMING_SNAKE_CASE : int = {
"vocab_file": {
"funnel-transformer/small": "https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt",
"funnel-transformer/small-base": "https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt",
"funnel-transformer/medium": "https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt",
"funnel-transformer/medium-base": (
"https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt"
),
"funnel-transformer/intermediate": (
"https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt"
),
"funnel-transformer/intermediate-base": (
"https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt"
),
"funnel-transformer/large": "https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt",
"funnel-transformer/large-base": "https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt",
"funnel-transformer/xlarge": "https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt",
"funnel-transformer/xlarge-base": (
"https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"funnel-transformer/small": "https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json",
"funnel-transformer/small-base": (
"https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json"
),
"funnel-transformer/medium": "https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json",
"funnel-transformer/medium-base": (
"https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json"
),
"funnel-transformer/intermediate": (
"https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json"
),
"funnel-transformer/intermediate-base": (
"https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json"
),
"funnel-transformer/large": "https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json",
"funnel-transformer/large-base": (
"https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json"
),
"funnel-transformer/xlarge": "https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json",
"funnel-transformer/xlarge-base": (
"https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json"
),
},
}
SCREAMING_SNAKE_CASE : Any = {F'''funnel-transformer/{name}''': 512 for name in _model_names}
SCREAMING_SNAKE_CASE : Tuple = {F'''funnel-transformer/{name}''': {"do_lower_case": True} for name in _model_names}
class UpperCamelCase ( __a ):
a__ :Dict = VOCAB_FILES_NAMES
a__ :Dict = PRETRAINED_VOCAB_FILES_MAP
a__ :Dict = PRETRAINED_INIT_CONFIGURATION
a__ :str = FunnelTokenizer
a__ :List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ :int = 2
def __init__(self , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=True , __UpperCamelCase="<unk>" , __UpperCamelCase="<sep>" , __UpperCamelCase="<pad>" , __UpperCamelCase="<cls>" , __UpperCamelCase="<mask>" , __UpperCamelCase="<s>" , __UpperCamelCase="</s>" , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=None , __UpperCamelCase="##" , **__UpperCamelCase , ) -> List[Any]:
super().__init__(
__UpperCamelCase , tokenizer_file=__UpperCamelCase , do_lower_case=__UpperCamelCase , unk_token=__UpperCamelCase , sep_token=__UpperCamelCase , pad_token=__UpperCamelCase , cls_token=__UpperCamelCase , mask_token=__UpperCamelCase , bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , clean_text=__UpperCamelCase , tokenize_chinese_chars=__UpperCamelCase , strip_accents=__UpperCamelCase , wordpieces_prefix=__UpperCamelCase , **__UpperCamelCase , )
UpperCamelCase_ : List[str] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , __UpperCamelCase ) != do_lower_case
or normalizer_state.get("""strip_accents""" , __UpperCamelCase ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , __UpperCamelCase ) != tokenize_chinese_chars
):
UpperCamelCase_ : List[Any] = getattr(__UpperCamelCase , normalizer_state.pop("""type""" ) )
UpperCamelCase_ : Union[str, Any] = do_lower_case
UpperCamelCase_ : Tuple = strip_accents
UpperCamelCase_ : Dict = tokenize_chinese_chars
UpperCamelCase_ : Union[str, Any] = normalizer_class(**__UpperCamelCase )
UpperCamelCase_ : Dict = do_lower_case
def A_ (self , __UpperCamelCase , __UpperCamelCase=None ) -> Dict:
UpperCamelCase_ : Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def A_ (self , __UpperCamelCase , __UpperCamelCase = None ) -> List[int]:
UpperCamelCase_ : Optional[Any] = [self.sep_token_id]
UpperCamelCase_ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0]
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A_ (self , __UpperCamelCase , __UpperCamelCase = None ) -> Tuple[str]:
UpperCamelCase_ : int = self._tokenizer.model.save(__UpperCamelCase , name=__UpperCamelCase )
return tuple(__UpperCamelCase )
| 635 | 1 |
def lowerCAmelCase_ ( __lowerCamelCase ):
if len(__lowerCamelCase ) < 2:
return collection
def circle_sort_util(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> bool:
__snake_case : Tuple = False
if low == high:
return swapped
__snake_case : str = low
__snake_case : str = high
while left < right:
if collection[left] > collection[right]:
__snake_case : int = (
collection[right],
collection[left],
)
__snake_case : List[Any] = True
left += 1
right -= 1
if left == right and collection[left] > collection[right + 1]:
__snake_case : Tuple = (
collection[right + 1],
collection[left],
)
__snake_case : str = True
__snake_case : Tuple = low + int((high - low) / 2 )
__snake_case : Dict = circle_sort_util(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
__snake_case : int = circle_sort_util(__lowerCamelCase , mid + 1 , __lowerCamelCase )
return swapped or left_swap or right_swap
__snake_case : Tuple = True
while is_not_sorted is True:
__snake_case : str = circle_sort_util(__lowerCamelCase , 0 , len(__lowerCamelCase ) - 1 )
return collection
if __name__ == "__main__":
_snake_case : Any = input("Enter numbers separated by a comma:\n").strip()
_snake_case : Optional[int] = [int(item) for item in user_input.split(",")]
print(circle_sort(unsorted))
| 714 |
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
_snake_case : int = {
"text_branch": "text_model",
"audio_branch": "audio_model.audio_encoder",
"attn": "attention.self",
"self.proj": "output.dense",
"attention.self_mask": "attn_mask",
"mlp.fc1": "intermediate.dense",
"mlp.fc2": "output.dense",
"norm1": "layernorm_before",
"norm2": "layernorm_after",
"bn0": "batch_norm",
}
_snake_case : Dict = AutoFeatureExtractor.from_pretrained("laion/clap-htsat-unfused", truncation="rand_trunc")
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase=False ):
__snake_case , __snake_case : Any = create_model(
"HTSAT-tiny" , "roberta" , __lowerCamelCase , precision="fp32" , device="cuda:0" if torch.cuda.is_available() else "cpu" , enable_fusion=__lowerCamelCase , fusion_type="aff_2d" if enable_fusion else None , )
return model, model_cfg
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : int = {}
__snake_case : List[Any] = R".*sequential.(\d+).*"
__snake_case : Any = R".*_projection.(\d+).*"
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
__snake_case : Optional[int] = key.replace(__lowerCamelCase , __lowerCamelCase )
if re.match(__lowerCamelCase , __lowerCamelCase ):
# replace sequential layers with list
__snake_case : List[Any] = re.match(__lowerCamelCase , __lowerCamelCase ).group(1 )
__snake_case : str = key.replace(F'sequential.{sequential_layer}.' , F'layers.{int(__lowerCamelCase )//3}.linear.' )
elif re.match(__lowerCamelCase , __lowerCamelCase ):
__snake_case : Any = int(re.match(__lowerCamelCase , __lowerCamelCase ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
__snake_case : List[str] = 1 if projecton_layer == 0 else 2
__snake_case : Tuple = key.replace(F'_projection.{projecton_layer}.' , F'_projection.linear{transformers_projection_layer}.' )
if "audio" and "qkv" in key:
# split qkv into query key and value
__snake_case : List[str] = value
__snake_case : Optional[int] = mixed_qkv.size(0 ) // 3
__snake_case : List[str] = mixed_qkv[:qkv_dim]
__snake_case : int = mixed_qkv[qkv_dim : qkv_dim * 2]
__snake_case : str = mixed_qkv[qkv_dim * 2 :]
__snake_case : int = query_layer
__snake_case : Tuple = key_layer
__snake_case : List[str] = value_layer
else:
__snake_case : Tuple = value
return model_state_dict
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=False ):
__snake_case , __snake_case : List[str] = init_clap(__lowerCamelCase , enable_fusion=__lowerCamelCase )
clap_model.eval()
__snake_case : Union[str, Any] = clap_model.state_dict()
__snake_case : Any = rename_state_dict(__lowerCamelCase )
__snake_case : Dict = ClapConfig()
__snake_case : Dict = enable_fusion
__snake_case : Optional[Any] = ClapModel(__lowerCamelCase )
# ignore the spectrogram embedding layer
model.load_state_dict(__lowerCamelCase , strict=__lowerCamelCase )
model.save_pretrained(__lowerCamelCase )
transformers_config.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
_snake_case : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument("--enable_fusion", action="store_true", help="Whether to enable fusion or not")
_snake_case : Tuple = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 203 | 0 |
"""simple docstring"""
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class a__ :
def __init__( self :List[Any] , _lowerCamelCase :str = None , _lowerCamelCase :uuid.UUID = None , _lowerCamelCase :Dict=None , _lowerCamelCase :int=None ):
'''simple docstring'''
if not conversation_id:
UpperCamelCase_ : int =uuid.uuida()
if past_user_inputs is None:
UpperCamelCase_ : Union[str, Any] =[]
if generated_responses is None:
UpperCamelCase_ : List[Any] =[]
UpperCamelCase_ : uuid.UUID =conversation_id
UpperCamelCase_ : List[str] =past_user_inputs
UpperCamelCase_ : List[str] =generated_responses
UpperCamelCase_ : Optional[str] =text
def __eq__( self :List[Any] , _lowerCamelCase :List[str] ):
'''simple docstring'''
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def lowerCamelCase_ ( self :Any , _lowerCamelCase :str , _lowerCamelCase :bool = False ):
'''simple docstring'''
if self.new_user_input:
if overwrite:
logger.warning(
f'''User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten '''
f'''with: "{text}".''' )
UpperCamelCase_ : int =text
else:
logger.warning(
f'''User input added while unprocessed input was existing: "{self.new_user_input}" new input '''
f'''ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input''' )
else:
UpperCamelCase_ : Tuple =text
def lowerCamelCase_ ( self :Dict ):
'''simple docstring'''
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
UpperCamelCase_ : int =None
def lowerCamelCase_ ( self :Any , _lowerCamelCase :str ):
'''simple docstring'''
self.generated_responses.append(_lowerCamelCase )
def lowerCamelCase_ ( self :Optional[int] ):
'''simple docstring'''
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self :List[Any] ):
'''simple docstring'''
UpperCamelCase_ : Any =f'''Conversation id: {self.uuid} \n'''
for is_user, text in self.iter_texts():
UpperCamelCase_ : Optional[int] ='user' if is_user else 'bot'
output += f'''{name} >> {text} \n'''
return output
@add_end_docstrings(
A__ , R'''
min_length_for_response (`int`, *optional*, defaults to 32):
The minimum length (in number of tokens) for a response.
minimum_tokens (`int`, *optional*, defaults to 10):
The minimum length of tokens to leave for a response.
''' , )
class a__ ( A__ ):
def __init__( self :List[str] , *_lowerCamelCase :Optional[Any] , **_lowerCamelCase :Optional[Any] ):
'''simple docstring'''
super().__init__(*_lowerCamelCase , **_lowerCamelCase )
if self.tokenizer.pad_token_id is None:
UpperCamelCase_ : List[str] =self.tokenizer.eos_token
def lowerCamelCase_ ( self :Any , _lowerCamelCase :Dict=None , _lowerCamelCase :Dict=None , _lowerCamelCase :Any=None , **_lowerCamelCase :str ):
'''simple docstring'''
UpperCamelCase_ : Dict ={}
UpperCamelCase_ : List[Any] ={}
UpperCamelCase_ : Optional[int] ={}
if min_length_for_response is not None:
UpperCamelCase_ : int =min_length_for_response
if minimum_tokens is not None:
UpperCamelCase_ : Optional[Any] =minimum_tokens
if "max_length" in generate_kwargs:
UpperCamelCase_ : Any =generate_kwargs['max_length']
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
UpperCamelCase_ : List[Any] =clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(_lowerCamelCase )
return preprocess_params, forward_params, postprocess_params
def __call__( self :Any , _lowerCamelCase :Union[Conversation, List[Conversation]] , _lowerCamelCase :List[Any]=0 , **_lowerCamelCase :Any ):
'''simple docstring'''
UpperCamelCase_ : List[Any] =super().__call__(_lowerCamelCase , num_workers=_lowerCamelCase , **_lowerCamelCase )
if isinstance(_lowerCamelCase , _lowerCamelCase ) and len(_lowerCamelCase ) == 1:
return outputs[0]
return outputs
def lowerCamelCase_ ( self :str , _lowerCamelCase :Conversation , _lowerCamelCase :Dict=32 ):
'''simple docstring'''
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise ValueError('ConversationalPipeline, expects Conversation as inputs' )
if conversation.new_user_input is None:
raise ValueError(
f'''Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. '''
'Add user inputs with the conversation\'s `add_user_input` method' )
if hasattr(self.tokenizer , '_build_conversation_input_ids' ):
UpperCamelCase_ : Optional[int] =self.tokenizer._build_conversation_input_ids(_lowerCamelCase )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
UpperCamelCase_ : str =self._legacy_parse_and_tokenize(_lowerCamelCase )
if self.framework == "pt":
UpperCamelCase_ : Tuple =torch.LongTensor([input_ids] )
elif self.framework == "tf":
UpperCamelCase_ : Optional[Any] =tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def lowerCamelCase_ ( self :Tuple , _lowerCamelCase :int , _lowerCamelCase :Tuple=10 , **_lowerCamelCase :Tuple ):
'''simple docstring'''
UpperCamelCase_ : List[Any] =generate_kwargs.get('max_length' , self.model.config.max_length )
UpperCamelCase_ : Optional[Any] =model_inputs['input_ids'].shape[1]
if max_length - minimum_tokens < n:
logger.warning(f'''Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})''' )
UpperCamelCase_ : Optional[Any] =max_length - minimum_tokens
UpperCamelCase_ : List[str] =model_inputs['input_ids'][:, -trim:]
if "attention_mask" in model_inputs:
UpperCamelCase_ : List[Any] =model_inputs['attention_mask'][:, -trim:]
UpperCamelCase_ : Tuple =model_inputs.pop('conversation' )
UpperCamelCase_ : List[str] =max_length
UpperCamelCase_ : Optional[int] =self.model.generate(**_lowerCamelCase , **_lowerCamelCase )
if self.model.config.is_encoder_decoder:
UpperCamelCase_ : Optional[int] =1
else:
UpperCamelCase_ : Optional[int] =n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def lowerCamelCase_ ( self :Dict , _lowerCamelCase :Tuple , _lowerCamelCase :int=True ):
'''simple docstring'''
UpperCamelCase_ : List[Any] =model_outputs['output_ids']
UpperCamelCase_ : Tuple =self.tokenizer.decode(
output_ids[0] , skip_special_tokens=_lowerCamelCase , clean_up_tokenization_spaces=_lowerCamelCase , )
UpperCamelCase_ : Any =model_outputs['conversation']
conversation.mark_processed()
conversation.append_response(_lowerCamelCase )
return conversation
def lowerCamelCase_ ( self :Union[str, Any] , _lowerCamelCase :Conversation ):
'''simple docstring'''
UpperCamelCase_ : List[str] =self.tokenizer.eos_token_id
UpperCamelCase_ : Any =[]
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase ) )
if len(_lowerCamelCase ) > self.tokenizer.model_max_length:
UpperCamelCase_ : Union[str, Any] =input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 357 |
"""simple docstring"""
# This code is adapted from OpenAI's release
# https://github.com/openai/human-eval/blob/master/human_eval/execution.py
import contextlib
import faulthandler
import io
import multiprocessing
import os
import platform
import signal
import tempfile
def A_ ( __lowercase , __lowercase , __lowercase , __lowercase ):
UpperCamelCase_ : Tuple =multiprocessing.Manager()
UpperCamelCase_ : Dict =manager.list()
UpperCamelCase_ : Union[str, Any] =multiprocessing.Process(target=__lowercase , args=(check_program, result, timeout) )
p.start()
p.join(timeout=timeout + 1 )
if p.is_alive():
p.kill()
if not result:
result.append('timed out' )
return {
"task_id": task_id,
"passed": result[0] == "passed",
"result": result[0],
"completion_id": completion_id,
}
def A_ ( __lowercase , __lowercase , __lowercase ):
with create_tempdir():
# These system calls are needed when cleaning up tempdir.
import os
import shutil
UpperCamelCase_ : Tuple =shutil.rmtree
UpperCamelCase_ : List[str] =os.rmdir
UpperCamelCase_ : Any =os.chdir
# Disable functionalities that can make destructive changes to the test.
reliability_guard()
# Run program.
try:
UpperCamelCase_ : List[str] ={}
with swallow_io():
with time_limit(__lowercase ):
exec(__lowercase , __lowercase )
result.append('passed' )
except TimeoutException:
result.append('timed out' )
except BaseException as e:
result.append(F'''failed: {e}''' )
# Needed for cleaning up.
UpperCamelCase_ : Optional[Any] =rmtree
UpperCamelCase_ : str =rmdir
UpperCamelCase_ : Dict =chdir
@contextlib.contextmanager
def A_ ( __lowercase ):
def signal_handler(__lowercase , __lowercase ):
raise TimeoutException('Timed out!' )
signal.setitimer(signal.ITIMER_REAL , __lowercase )
signal.signal(signal.SIGALRM , __lowercase )
try:
yield
finally:
signal.setitimer(signal.ITIMER_REAL , 0 )
@contextlib.contextmanager
def A_ ( ):
UpperCamelCase_ : str =WriteOnlyStringIO()
with contextlib.redirect_stdout(__lowercase ):
with contextlib.redirect_stderr(__lowercase ):
with redirect_stdin(__lowercase ):
yield
@contextlib.contextmanager
def A_ ( ):
with tempfile.TemporaryDirectory() as dirname:
with chdir(__lowercase ):
yield dirname
class a__ ( A__ ):
pass
class a__ ( io.StringIO ):
def lowerCamelCase_ ( self :Any , *_lowerCamelCase :Any , **_lowerCamelCase :Tuple ):
'''simple docstring'''
raise OSError
def lowerCamelCase_ ( self :Union[str, Any] , *_lowerCamelCase :Any , **_lowerCamelCase :Tuple ):
'''simple docstring'''
raise OSError
def lowerCamelCase_ ( self :Tuple , *_lowerCamelCase :str , **_lowerCamelCase :List[str] ):
'''simple docstring'''
raise OSError
def lowerCamelCase_ ( self :List[str] , *_lowerCamelCase :Optional[int] , **_lowerCamelCase :str ):
'''simple docstring'''
return False
class a__ ( contextlib._RedirectStream ): # type: ignore
UpperCAmelCase__ = '''stdin'''
@contextlib.contextmanager
def A_ ( __lowercase ):
if root == ".":
yield
return
UpperCamelCase_ : List[str] =os.getcwd()
os.chdir(__lowercase )
try:
yield
except BaseException as exc:
raise exc
finally:
os.chdir(__lowercase )
def A_ ( __lowercase=None ):
if maximum_memory_bytes is not None:
import resource
resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes) )
resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes) )
if not platform.uname().system == "Darwin":
resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes) )
faulthandler.disable()
import builtins
UpperCamelCase_ : Any =None
UpperCamelCase_ : Any =None
import os
UpperCamelCase_ : Union[str, Any] ='1'
UpperCamelCase_ : Union[str, Any] =None
UpperCamelCase_ : Optional[Any] =None
UpperCamelCase_ : Optional[Any] =None
UpperCamelCase_ : Optional[Any] =None
UpperCamelCase_ : Dict =None
UpperCamelCase_ : List[Any] =None
UpperCamelCase_ : Any =None
UpperCamelCase_ : str =None
UpperCamelCase_ : Optional[int] =None
UpperCamelCase_ : Union[str, Any] =None
UpperCamelCase_ : str =None
UpperCamelCase_ : str =None
UpperCamelCase_ : List[Any] =None
UpperCamelCase_ : Optional[Any] =None
UpperCamelCase_ : str =None
UpperCamelCase_ : List[str] =None
UpperCamelCase_ : Optional[Any] =None
UpperCamelCase_ : int =None
UpperCamelCase_ : Optional[Any] =None
UpperCamelCase_ : Tuple =None
UpperCamelCase_ : List[Any] =None
UpperCamelCase_ : Tuple =None
UpperCamelCase_ : Dict =None
UpperCamelCase_ : Optional[int] =None
UpperCamelCase_ : str =None
UpperCamelCase_ : Union[str, Any] =None
UpperCamelCase_ : int =None
import shutil
UpperCamelCase_ : List[str] =None
UpperCamelCase_ : Optional[Any] =None
UpperCamelCase_ : Dict =None
import subprocess
UpperCamelCase_ : Optional[int] =None # type: ignore
UpperCamelCase_ : Union[str, Any] =None
import sys
UpperCamelCase_ : Union[str, Any] =None
UpperCamelCase_ : Tuple =None
UpperCamelCase_ : Union[str, Any] =None
UpperCamelCase_ : Dict =None
UpperCamelCase_ : Any =None
| 357 | 1 |
"""simple docstring"""
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
A = """src/transformers"""
A = """docs/source/en/tasks"""
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Union[str, Any]:
"""simple docstring"""
with open(UpperCamelCase , "r" , encoding="utf-8" , newline="\n" ) as f:
__UpperCAmelCase : Any = f.readlines()
# Find the start prompt.
__UpperCAmelCase : Dict = 0
while not lines[start_index].startswith(UpperCamelCase ):
start_index += 1
start_index += 1
__UpperCAmelCase : Tuple = start_index
while not lines[end_index].startswith(UpperCamelCase ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
A = direct_transformers_import(TRANSFORMERS_PATH)
A = {
"""asr.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
"""audio_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
"""language_modeling.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
"""image_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
"""masked_language_modeling.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
"""multiple_choice.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
"""object_detection.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
"""question_answering.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
"""semantic_segmentation.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
"""sequence_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
"""summarization.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
"""token_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
"""translation.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
"""video_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
"""document_question_answering.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
"""monocular_depth_estimation.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
A = {
"""summarization.md""": ("""nllb""",),
"""translation.md""": ("""nllb""",),
}
def _UpperCamelCase ( UpperCamelCase ) -> Optional[int]:
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = TASK_GUIDE_TO_MODELS[task_guide]
__UpperCAmelCase : List[Any] = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(UpperCamelCase , set() )
__UpperCAmelCase : Tuple = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([f"[{name}](../model_doc/{code})" for code, name in model_names.items()] ) + "\n"
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase=False ) -> Union[str, Any]:
"""simple docstring"""
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : List[str] = _find_text_in_file(
filename=os.path.join(UpperCamelCase , UpperCamelCase ) , start_prompt="<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->" , end_prompt="<!--End of the generated tip-->" , )
__UpperCAmelCase : Optional[Any] = get_model_list_for_task(UpperCamelCase )
if current_list != new_list:
if overwrite:
with open(os.path.join(UpperCamelCase , UpperCamelCase ) , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
f"The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`"
" to fix this." )
if __name__ == "__main__":
A = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
A = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 717 |
"""simple docstring"""
import argparse
import torch
from datasets import load_dataset
from donut import DonutModel
from transformers import (
DonutImageProcessor,
DonutProcessor,
DonutSwinConfig,
DonutSwinModel,
MBartConfig,
MBartForCausalLM,
VisionEncoderDecoderModel,
XLMRobertaTokenizerFast,
)
def _UpperCamelCase ( UpperCamelCase ) -> Tuple:
"""simple docstring"""
__UpperCAmelCase : str = model.config
__UpperCAmelCase : int = DonutSwinConfig(
image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 16, 32] , window_size=original_config.window_size , embed_dim=128 , )
__UpperCAmelCase : Union[str, Any] = MBartConfig(
is_decoder=UpperCamelCase , is_encoder_decoder=UpperCamelCase , add_cross_attention=UpperCamelCase , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len(
model.decoder.tokenizer ) , scale_embedding=UpperCamelCase , add_final_layer_norm=UpperCamelCase , )
return encoder_config, decoder_config
def _UpperCamelCase ( UpperCamelCase ) -> Union[str, Any]:
"""simple docstring"""
if "encoder.model" in name:
__UpperCAmelCase : Union[str, Any] = name.replace("encoder.model" , "encoder" )
if "decoder.model" in name:
__UpperCAmelCase : Dict = name.replace("decoder.model" , "decoder" )
if "patch_embed.proj" in name:
__UpperCAmelCase : Dict = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
__UpperCAmelCase : str = name.replace("patch_embed.norm" , "embeddings.norm" )
if name.startswith("encoder" ):
if "layers" in name:
__UpperCAmelCase : Dict = "encoder." + name
if "attn.proj" in name:
__UpperCAmelCase : List[Any] = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name and "mask" not in name:
__UpperCAmelCase : int = name.replace("attn" , "attention.self" )
if "norm1" in name:
__UpperCAmelCase : Optional[Any] = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
__UpperCAmelCase : str = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
__UpperCAmelCase : str = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
__UpperCAmelCase : Union[str, Any] = name.replace("mlp.fc2" , "output.dense" )
if name == "encoder.norm.weight":
__UpperCAmelCase : List[Any] = "encoder.layernorm.weight"
if name == "encoder.norm.bias":
__UpperCAmelCase : Tuple = "encoder.layernorm.bias"
return name
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase ) -> List[Any]:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
__UpperCAmelCase : Any = orig_state_dict.pop(UpperCamelCase )
if "qkv" in key:
__UpperCAmelCase : Any = key.split("." )
__UpperCAmelCase : List[Any] = int(key_split[3] )
__UpperCAmelCase : Any = int(key_split[5] )
__UpperCAmelCase : str = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
__UpperCAmelCase : List[Any] = val[:dim, :]
__UpperCAmelCase : Union[str, Any] = val[dim : dim * 2, :]
__UpperCAmelCase : Any = val[-dim:, :]
else:
__UpperCAmelCase : List[str] = val[:dim]
__UpperCAmelCase : List[Any] = val[dim : dim * 2]
__UpperCAmelCase : Tuple = val[-dim:]
elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
# HuggingFace implementation doesn't use attn_mask buffer
# and model doesn't use final LayerNorms for the encoder
pass
else:
__UpperCAmelCase : Dict = val
return orig_state_dict
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase=None , UpperCamelCase=False ) -> List[str]:
"""simple docstring"""
# load original model
__UpperCAmelCase : Dict = DonutModel.from_pretrained(UpperCamelCase ).eval()
# load HuggingFace model
__UpperCAmelCase , __UpperCAmelCase : List[str] = get_configs(UpperCamelCase )
__UpperCAmelCase : Tuple = DonutSwinModel(UpperCamelCase )
__UpperCAmelCase : Optional[Any] = MBartForCausalLM(UpperCamelCase )
__UpperCAmelCase : int = VisionEncoderDecoderModel(encoder=UpperCamelCase , decoder=UpperCamelCase )
model.eval()
__UpperCAmelCase : Union[str, Any] = original_model.state_dict()
__UpperCAmelCase : List[str] = convert_state_dict(UpperCamelCase , UpperCamelCase )
model.load_state_dict(UpperCamelCase )
# verify results on scanned document
__UpperCAmelCase : List[Any] = load_dataset("hf-internal-testing/example-documents" )
__UpperCAmelCase : Any = dataset["test"][0]["image"].convert("RGB" )
__UpperCAmelCase : Union[str, Any] = XLMRobertaTokenizerFast.from_pretrained(UpperCamelCase , from_slow=UpperCamelCase )
__UpperCAmelCase : str = DonutImageProcessor(
do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] )
__UpperCAmelCase : Union[str, Any] = DonutProcessor(UpperCamelCase , UpperCamelCase )
__UpperCAmelCase : Union[str, Any] = processor(UpperCamelCase , return_tensors="pt" ).pixel_values
if model_name == "naver-clova-ix/donut-base-finetuned-docvqa":
__UpperCAmelCase : List[Any] = "<s_docvqa><s_question>{user_input}</s_question><s_answer>"
__UpperCAmelCase : Any = "When is the coffee break?"
__UpperCAmelCase : Dict = task_prompt.replace("{user_input}" , UpperCamelCase )
elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip":
__UpperCAmelCase : int = "<s_rvlcdip>"
elif model_name in [
"naver-clova-ix/donut-base-finetuned-cord-v1",
"naver-clova-ix/donut-base-finetuned-cord-v1-2560",
]:
__UpperCAmelCase : Union[str, Any] = "<s_cord>"
elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2":
__UpperCAmelCase : Dict = "s_cord-v2>"
elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket":
__UpperCAmelCase : str = "<s_zhtrainticket>"
elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]:
# use a random prompt
__UpperCAmelCase : Tuple = "hello world"
else:
raise ValueError("Model name not supported" )
__UpperCAmelCase : Dict = original_model.decoder.tokenizer(UpperCamelCase , add_special_tokens=UpperCamelCase , return_tensors="pt" )[
"input_ids"
]
__UpperCAmelCase : int = original_model.encoder.model.patch_embed(UpperCamelCase )
__UpperCAmelCase , __UpperCAmelCase : Tuple = model.encoder.embeddings(UpperCamelCase )
assert torch.allclose(UpperCamelCase , UpperCamelCase , atol=1e-3 )
# verify encoder hidden states
__UpperCAmelCase : int = original_model.encoder(UpperCamelCase )
__UpperCAmelCase : Union[str, Any] = model.encoder(UpperCamelCase ).last_hidden_state
assert torch.allclose(UpperCamelCase , UpperCamelCase , atol=1e-2 )
# verify decoder hidden states
__UpperCAmelCase : int = original_model(UpperCamelCase , UpperCamelCase , UpperCamelCase ).logits
__UpperCAmelCase : List[str] = model(UpperCamelCase , decoder_input_ids=UpperCamelCase ).logits
assert torch.allclose(UpperCamelCase , UpperCamelCase , atol=1e-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(f"Saving model and processor to {pytorch_dump_folder_path}" )
model.save_pretrained(UpperCamelCase )
processor.save_pretrained(UpperCamelCase )
if push_to_hub:
model.push_to_hub("nielsr/" + model_name.split("/" )[-1] , commit_message="Update model" )
processor.push_to_hub("nielsr/" + model_name.split("/" )[-1] , commit_message="Update model" )
if __name__ == "__main__":
A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""naver-clova-ix/donut-base-finetuned-docvqa""",
required=False,
type=str,
help="""Name of the original model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
required=False,
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether or not to push the converted model and processor to the 🤗 hub.""",
)
A = parser.parse_args()
convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 487 | 0 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxCrossAttnUpBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
FlaxUpBlockaD,
)
@flax.struct.dataclass
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 42
@flax_register_to_config
class SCREAMING_SNAKE_CASE ( nn.Module , _a , _a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 32
_SCREAMING_SNAKE_CASE = 4
_SCREAMING_SNAKE_CASE = 4
_SCREAMING_SNAKE_CASE = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
_SCREAMING_SNAKE_CASE = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = (320, 640, 1_280, 1_280)
_SCREAMING_SNAKE_CASE = 2
_SCREAMING_SNAKE_CASE = 8
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = 1_280
_SCREAMING_SNAKE_CASE = 0.0
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = jnp.floataa
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = False
def A ( self : int , UpperCamelCase__ : jax.random.KeyArray ):
"""simple docstring"""
UpperCamelCase = (1, self.in_channels, self.sample_size, self.sample_size)
UpperCamelCase = jnp.zeros(UpperCamelCase__ , dtype=jnp.floataa )
UpperCamelCase = jnp.ones((1,) , dtype=jnp.intaa )
UpperCamelCase = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
UpperCamelCase , UpperCamelCase = jax.random.split(UpperCamelCase__ )
UpperCamelCase = {'params': params_rng, 'dropout': dropout_rng}
return self.init(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )["params"]
def A ( self : Any ):
"""simple docstring"""
UpperCamelCase = self.block_out_channels
UpperCamelCase = block_out_channels[0] * 4
if self.num_attention_heads is not None:
raise ValueError(
'At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.' )
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
UpperCamelCase = self.num_attention_heads or self.attention_head_dim
# input
UpperCamelCase = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
UpperCamelCase = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
UpperCamelCase = FlaxTimestepEmbedding(UpperCamelCase__ , dtype=self.dtype )
UpperCamelCase = self.only_cross_attention
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
UpperCamelCase = (only_cross_attention,) * len(self.down_block_types )
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
UpperCamelCase = (num_attention_heads,) * len(self.down_block_types )
# down
UpperCamelCase = []
UpperCamelCase = block_out_channels[0]
for i, down_block_type in enumerate(self.down_block_types ):
UpperCamelCase = output_channel
UpperCamelCase = block_out_channels[i]
UpperCamelCase = i == len(UpperCamelCase__ ) - 1
if down_block_type == "CrossAttnDownBlock2D":
UpperCamelCase = FlaxCrossAttnDownBlockaD(
in_channels=UpperCamelCase__ , out_channels=UpperCamelCase__ , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
UpperCamelCase = FlaxDownBlockaD(
in_channels=UpperCamelCase__ , out_channels=UpperCamelCase__ , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(UpperCamelCase__ )
UpperCamelCase = down_blocks
# mid
UpperCamelCase = FlaxUNetMidBlockaDCrossAttn(
in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
# up
UpperCamelCase = []
UpperCamelCase = list(reversed(UpperCamelCase__ ) )
UpperCamelCase = list(reversed(UpperCamelCase__ ) )
UpperCamelCase = list(reversed(UpperCamelCase__ ) )
UpperCamelCase = reversed_block_out_channels[0]
for i, up_block_type in enumerate(self.up_block_types ):
UpperCamelCase = output_channel
UpperCamelCase = reversed_block_out_channels[i]
UpperCamelCase = reversed_block_out_channels[min(i + 1 , len(UpperCamelCase__ ) - 1 )]
UpperCamelCase = i == len(UpperCamelCase__ ) - 1
if up_block_type == "CrossAttnUpBlock2D":
UpperCamelCase = FlaxCrossAttnUpBlockaD(
in_channels=UpperCamelCase__ , out_channels=UpperCamelCase__ , prev_output_channel=UpperCamelCase__ , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
UpperCamelCase = FlaxUpBlockaD(
in_channels=UpperCamelCase__ , out_channels=UpperCamelCase__ , prev_output_channel=UpperCamelCase__ , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , )
up_blocks.append(UpperCamelCase__ )
UpperCamelCase = output_channel
UpperCamelCase = up_blocks
# out
UpperCamelCase = nn.GroupNorm(num_groups=3_2 , epsilon=1E-5 )
UpperCamelCase = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : Any , UpperCamelCase__ : Any , UpperCamelCase__ : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Any=None , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : bool = True , UpperCamelCase__ : bool = False , ):
"""simple docstring"""
if not isinstance(UpperCamelCase__ , jnp.ndarray ):
UpperCamelCase = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(UpperCamelCase__ , jnp.ndarray ) and len(timesteps.shape ) == 0:
UpperCamelCase = timesteps.astype(dtype=jnp.floataa )
UpperCamelCase = jnp.expand_dims(UpperCamelCase__ , 0 )
UpperCamelCase = self.time_proj(UpperCamelCase__ )
UpperCamelCase = self.time_embedding(UpperCamelCase__ )
# 2. pre-process
UpperCamelCase = jnp.transpose(UpperCamelCase__ , (0, 2, 3, 1) )
UpperCamelCase = self.conv_in(UpperCamelCase__ )
# 3. down
UpperCamelCase = (sample,)
for down_block in self.down_blocks:
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
UpperCamelCase , UpperCamelCase = down_block(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , deterministic=not train )
else:
UpperCamelCase , UpperCamelCase = down_block(UpperCamelCase__ , UpperCamelCase__ , deterministic=not train )
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
UpperCamelCase = ()
for down_block_res_sample, down_block_additional_residual in zip(
UpperCamelCase__ , UpperCamelCase__ ):
down_block_res_sample += down_block_additional_residual
new_down_block_res_samples += (down_block_res_sample,)
UpperCamelCase = new_down_block_res_samples
# 4. mid
UpperCamelCase = self.mid_block(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , deterministic=not train )
if mid_block_additional_residual is not None:
sample += mid_block_additional_residual
# 5. up
for up_block in self.up_blocks:
UpperCamelCase = down_block_res_samples[-(self.layers_per_block + 1) :]
UpperCamelCase = down_block_res_samples[: -(self.layers_per_block + 1)]
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
UpperCamelCase = up_block(
UpperCamelCase__ , temb=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , res_hidden_states_tuple=UpperCamelCase__ , deterministic=not train , )
else:
UpperCamelCase = up_block(UpperCamelCase__ , temb=UpperCamelCase__ , res_hidden_states_tuple=UpperCamelCase__ , deterministic=not train )
# 6. post-process
UpperCamelCase = self.conv_norm_out(UpperCamelCase__ )
UpperCamelCase = nn.silu(UpperCamelCase__ )
UpperCamelCase = self.conv_out(UpperCamelCase__ )
UpperCamelCase = jnp.transpose(UpperCamelCase__ , (0, 3, 1, 2) )
if not return_dict:
return (sample,)
return FlaxUNetaDConditionOutput(sample=UpperCamelCase__ )
| 430 |
'''simple docstring'''
def __lowerCamelCase ( A__ ) -> str:
"""simple docstring"""
UpperCamelCase = int(A__ )
if decimal in (0, 1): # Exit cases for the recursion
return str(A__ )
UpperCamelCase , UpperCamelCase = divmod(A__ , 2 )
return binary_recursive(A__ ) + str(A__ )
def __lowerCamelCase ( A__ ) -> str:
"""simple docstring"""
UpperCamelCase = str(A__ ).strip()
if not number:
raise ValueError('No input value was provided' )
UpperCamelCase = '-' if number.startswith('-' ) else ''
UpperCamelCase = number.lstrip('-' )
if not number.isnumeric():
raise ValueError('Input value is not an integer' )
return F"""{negative}0b{binary_recursive(int(A__ ) )}"""
if __name__ == "__main__":
from doctest import testmod
testmod()
| 430 | 1 |
from __future__ import annotations
_UpperCAmelCase = tuple[int, int, int]
_UpperCAmelCase = tuple[str, str, str]
# used alphabet --------------------------
# from string.ascii_uppercase
_UpperCAmelCase = """ABCDEFGHIJKLMNOPQRSTUVWXYZ"""
# -------------------------- default selection --------------------------
# rotors --------------------------
_UpperCAmelCase = """EGZWVONAHDCLFQMSIPJBYUKXTR"""
_UpperCAmelCase = """FOBHMDKEXQNRAULPGSJVTYICZW"""
_UpperCAmelCase = """ZJXESIUQLHAVRMDOYGTNFWPBKC"""
# reflector --------------------------
_UpperCAmelCase = {
"""A""": """N""",
"""N""": """A""",
"""B""": """O""",
"""O""": """B""",
"""C""": """P""",
"""P""": """C""",
"""D""": """Q""",
"""Q""": """D""",
"""E""": """R""",
"""R""": """E""",
"""F""": """S""",
"""S""": """F""",
"""G""": """T""",
"""T""": """G""",
"""H""": """U""",
"""U""": """H""",
"""I""": """V""",
"""V""": """I""",
"""J""": """W""",
"""W""": """J""",
"""K""": """X""",
"""X""": """K""",
"""L""": """Y""",
"""Y""": """L""",
"""M""": """Z""",
"""Z""": """M""",
}
# -------------------------- extra rotors --------------------------
_UpperCAmelCase = """RMDJXFUWGISLHVTCQNKYPBEZOA"""
_UpperCAmelCase = """SGLCPQWZHKXAREONTFBVIYJUDM"""
_UpperCAmelCase = """HVSICLTYKQUBXDWAJZOMFGPREN"""
_UpperCAmelCase = """RZWQHFMVDBKICJLNTUXAGYPSOE"""
_UpperCAmelCase = """LFKIJODBEGAMQPXVUHYSTCZRWN"""
_UpperCAmelCase = """KOAEGVDHXPQZMLFTYWJNBRCIUS"""
def UpperCamelCase ( __lowercase : Tuple ,__lowercase : Optional[Any] ,__lowercase : Optional[Any] ):
'''simple docstring'''
if (unique_rotsel := len(set(_snake_case ) )) < 3:
A_ : Optional[int] = f'''Please use 3 unique rotors (not {unique_rotsel})'''
raise Exception(_snake_case )
# Checks if rotor positions are valid
A_ , A_ , A_ : Optional[int] = rotpos
if not 0 < rotorposa <= len(_snake_case ):
A_ : List[Any] = f'''First rotor position is not within range of 1..26 ({rotorposa}'''
raise ValueError(_snake_case )
if not 0 < rotorposa <= len(_snake_case ):
A_ : List[str] = f'''Second rotor position is not within range of 1..26 ({rotorposa})'''
raise ValueError(_snake_case )
if not 0 < rotorposa <= len(_snake_case ):
A_ : Optional[int] = f'''Third rotor position is not within range of 1..26 ({rotorposa})'''
raise ValueError(_snake_case )
# Validates string and returns dict
A_ : Optional[int] = _plugboard(_snake_case )
return rotpos, rotsel, pbdict
def UpperCamelCase ( __lowercase : List[str] ):
'''simple docstring'''
if not isinstance(_snake_case ,_snake_case ):
A_ : Optional[int] = f'''Plugboard setting isn\'t type string ({type(_snake_case )})'''
raise TypeError(_snake_case )
elif len(_snake_case ) % 2 != 0:
A_ : Union[str, Any] = f'''Odd number of symbols ({len(_snake_case )})'''
raise Exception(_snake_case )
elif pbstring == "":
return {}
pbstring.replace(' ' ,'' )
# Checks if all characters are unique
A_ : Any = set()
for i in pbstring:
if i not in abc:
A_ : Any = f'''\'{i}\' not in list of symbols'''
raise Exception(_snake_case )
elif i in tmppbl:
A_ : Union[str, Any] = f'''Duplicate symbol ({i})'''
raise Exception(_snake_case )
else:
tmppbl.add(_snake_case )
del tmppbl
# Created the dictionary
A_ : Dict = {}
for j in range(0 ,len(_snake_case ) - 1 ,2 ):
A_ : Dict = pbstring[j + 1]
A_ : List[str] = pbstring[j]
return pb
def UpperCamelCase ( __lowercase : str ,__lowercase : List[str] ,__lowercase : List[Any] = (rotora, rotora, rotora) ,__lowercase : Dict = "" ,):
'''simple docstring'''
A_ : Union[str, Any] = text.upper()
A_ , A_ , A_ : Union[str, Any] = _validator(
_snake_case ,_snake_case ,plugb.upper() )
A_ , A_ , A_ : int = rotor_position
A_ , A_ , A_ : Tuple = rotor_selection
rotorposa -= 1
rotorposa -= 1
rotorposa -= 1
A_ : Optional[Any] = []
# encryption/decryption process --------------------------
for symbol in text:
if symbol in abc:
# 1st plugboard --------------------------
if symbol in plugboard:
A_ : Any = plugboard[symbol]
# rotor ra --------------------------
A_ : Union[str, Any] = abc.index(_snake_case ) + rotorposa
A_ : Optional[int] = rotora[index % len(_snake_case )]
# rotor rb --------------------------
A_ : Dict = abc.index(_snake_case ) + rotorposa
A_ : int = rotora[index % len(_snake_case )]
# rotor rc --------------------------
A_ : Dict = abc.index(_snake_case ) + rotorposa
A_ : Tuple = rotora[index % len(_snake_case )]
# reflector --------------------------
# this is the reason you don't need another machine to decipher
A_ : Dict = reflector[symbol]
# 2nd rotors
A_ : int = abc[rotora.index(_snake_case ) - rotorposa]
A_ : Dict = abc[rotora.index(_snake_case ) - rotorposa]
A_ : Dict = abc[rotora.index(_snake_case ) - rotorposa]
# 2nd plugboard
if symbol in plugboard:
A_ : Union[str, Any] = plugboard[symbol]
# moves/resets rotor positions
rotorposa += 1
if rotorposa >= len(_snake_case ):
A_ : List[Any] = 0
rotorposa += 1
if rotorposa >= len(_snake_case ):
A_ : Union[str, Any] = 0
rotorposa += 1
if rotorposa >= len(_snake_case ):
A_ : Tuple = 0
# else:
# pass
# Error could be also raised
# raise ValueError(
# 'Invalid symbol('+repr(symbol)+')')
result.append(_snake_case )
return "".join(_snake_case )
if __name__ == "__main__":
_UpperCAmelCase = """This is my Python script that emulates the Enigma machine from WWII."""
_UpperCAmelCase = (1, 1, 1)
_UpperCAmelCase = """pictures"""
_UpperCAmelCase = (rotora, rotora, rotora)
_UpperCAmelCase = enigma(message, rotor_pos, rotor_sel, pb)
print("""Encrypted message:""", en)
print("""Decrypted message:""", enigma(en, rotor_pos, rotor_sel, pb))
| 707 | def UpperCamelCase ( __lowercase : list ):
'''simple docstring'''
A_ : str = len(__lowercase )
for _ in range(__lowercase ):
for i in range(_ % 2 ,arr_size - 1 ,2 ):
if arr[i + 1] < arr[i]:
A_ , A_ : Optional[Any] = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
_UpperCAmelCase = list(range(10, 0, -1))
print(F"""Original: {arr}. Sorted: {odd_even_transposition(arr)}""")
| 70 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"nielsr/canine-s": 20_48,
}
# Unicode defines 1,114,112 total “codepoints”
UpperCAmelCase_ = 1_11_41_12
# Below: Constants defining canonical codepoints for special, pseudo-characters.
# Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0xE000
UpperCAmelCase_ = 0xE001
UpperCAmelCase_ = 0xE002
UpperCAmelCase_ = 0xE003
UpperCAmelCase_ = 0xE004
# Maps special codepoints to human-readable names.
UpperCAmelCase_ = {
# Special symbols are represented using codepoints values that are valid,
# but designated as "Private Use", meaning that they will never be assigned
# characters by the Unicode Consortium, and are thus safe for use here.
#
# NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly
# excluded and should fail with a hard error.
CLS: "[CLS]",
SEP: "[SEP]",
BOS: "[BOS]",
MASK: "[MASK]",
PAD: "[PAD]",
RESERVED: "[RESERVED]",
}
# Maps special codepoint human-readable names to their codepoint values.
UpperCAmelCase_ = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()}
class __lowercase ( __magic_name__ ):
_a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , UpperCamelCase=chr(UpperCamelCase ) , UpperCamelCase=chr(UpperCamelCase ) , UpperCamelCase=chr(UpperCamelCase ) , UpperCamelCase=chr(UpperCamelCase ) , UpperCamelCase=chr(UpperCamelCase ) , UpperCamelCase=chr(UpperCamelCase ) , UpperCamelCase=False , UpperCamelCase=2048 , **UpperCamelCase , ) -> str:
__a = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else bos_token
__a = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else eos_token
__a = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else sep_token
__a = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else cls_token
__a = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__a = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else mask_token
super().__init__(
bos_token=UpperCamelCase , eos_token=UpperCamelCase , sep_token=UpperCamelCase , cls_token=UpperCamelCase , pad_token=UpperCamelCase , mask_token=UpperCamelCase , add_prefix_space=UpperCamelCase , model_max_length=UpperCamelCase , **UpperCamelCase , )
# Creates a mapping for looking up the IDs of special symbols.
__a = {}
for codepoint, name in SPECIAL_CODEPOINTS.items():
__a = codepoint
# Creates a mapping for looking up the string forms of special symbol IDs.
__a = {
codepoint: name for name, codepoint in self._special_codepoints.items()
}
__a = UNICODE_VOCAB_SIZE
__a = len(self._special_codepoints )
@property
def UpperCamelCase__ ( self ) -> int:
return self._unicode_vocab_size
def UpperCamelCase__ ( self , UpperCamelCase ) -> List[str]:
return list(UpperCamelCase )
def UpperCamelCase__ ( self , UpperCamelCase ) -> int:
try:
return ord(UpperCamelCase )
except TypeError:
raise ValueError(f"invalid token: '{token}'" )
def UpperCamelCase__ ( self , UpperCamelCase ) -> str:
try:
if index in SPECIAL_CODEPOINTS:
return SPECIAL_CODEPOINTS[index]
return chr(UpperCamelCase )
except TypeError:
raise ValueError(f"invalid id: {index}" )
def UpperCamelCase__ ( self , UpperCamelCase ) -> Any:
return "".join(UpperCamelCase )
def UpperCamelCase__ ( self , UpperCamelCase , UpperCamelCase = None ) -> List[int]:
__a = [self.sep_token_id]
__a = [self.cls_token_id]
__a = cls + token_ids_a + sep
if token_ids_a is not None:
result += token_ids_a + sep
return result
def UpperCamelCase__ ( self , UpperCamelCase , UpperCamelCase = None , UpperCamelCase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase , token_ids_a=UpperCamelCase , already_has_special_tokens=UpperCamelCase )
__a = [1] + ([0] * len(UpperCamelCase )) + [1]
if token_ids_a is not None:
result += ([0] * len(UpperCamelCase )) + [1]
return result
def UpperCamelCase__ ( self , UpperCamelCase , UpperCamelCase = None ) -> List[int]:
__a = [self.sep_token_id]
__a = [self.cls_token_id]
__a = len(cls + token_ids_a + sep ) * [0]
if token_ids_a is not None:
result += len(token_ids_a + sep ) * [1]
return result
def UpperCamelCase__ ( self , UpperCamelCase , UpperCamelCase = None ) -> List[str]:
return ()
| 539 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class __lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
_a = StableUnCLIPImgaImgPipeline
_a = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
_a = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
_a = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_a = frozenset([] )
def UpperCamelCase__ ( self ) -> List[str]:
__a = 32
__a = embedder_hidden_size
# image encoding components
__a = CLIPImageProcessor(crop_size=32 , size=32 )
torch.manual_seed(0 )
__a = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=UpperCamelCase , projection_dim=UpperCamelCase , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) )
# regular denoising components
torch.manual_seed(0 )
__a = StableUnCLIPImageNormalizer(embedding_dim=UpperCamelCase )
__a = DDPMScheduler(beta_schedule='squaredcos_cap_v2' )
torch.manual_seed(0 )
__a = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
torch.manual_seed(0 )
__a = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=UpperCamelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
__a = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'CrossAttnUpBlock2D') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='projection' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=UpperCamelCase , layers_per_block=1 , upcast_attention=UpperCamelCase , use_linear_projection=UpperCamelCase , )
torch.manual_seed(0 )
__a = DDIMScheduler(
beta_schedule='scaled_linear' , beta_start=0.00_085 , beta_end=0.012 , prediction_type='v_prediction' , set_alpha_to_one=UpperCamelCase , steps_offset=1 , )
torch.manual_seed(0 )
__a = AutoencoderKL()
__a = {
# image encoding components
'feature_extractor': feature_extractor,
'image_encoder': image_encoder.eval(),
# image noising components
'image_normalizer': image_normalizer.eval(),
'image_noising_scheduler': image_noising_scheduler,
# regular denoising components
'tokenizer': tokenizer,
'text_encoder': text_encoder.eval(),
'unet': unet.eval(),
'scheduler': scheduler,
'vae': vae.eval(),
}
return components
def UpperCamelCase__ ( self , UpperCamelCase , UpperCamelCase=0 , UpperCamelCase=True ) -> Dict:
if str(UpperCamelCase ).startswith('mps' ):
__a = torch.manual_seed(UpperCamelCase )
else:
__a = torch.Generator(device=UpperCamelCase ).manual_seed(UpperCamelCase )
__a = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase ) ).to(UpperCamelCase )
if pil_image:
__a = input_image * 0.5 + 0.5
__a = input_image.clamp(0 , 1 )
__a = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
__a = DiffusionPipeline.numpy_to_pil(UpperCamelCase )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def UpperCamelCase__ ( self ) -> int:
__a = 'cpu' # ensure determinism for the device-dependent torch.Generator
__a = self.get_dummy_components()
__a = StableUnCLIPImgaImgPipeline(**UpperCamelCase )
__a = sd_pipe.to(UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase )
__a = self.get_dummy_inputs(UpperCamelCase )
inputs.update({'image_embeds': None} )
__a = sd_pipe(**UpperCamelCase ).images
__a = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__a = np.array([0.3_872, 0.7_224, 0.5_601, 0.4_741, 0.6_872, 0.5_814, 0.4_636, 0.3_867, 0.5_078] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def UpperCamelCase__ ( self ) -> Any:
__a = torch_device in ['cpu', 'mps']
self._test_attention_slicing_forward_pass(test_max_difference=UpperCamelCase )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
__a = torch_device in ['cpu', 'mps']
self._test_inference_batch_single_identical(test_max_difference=UpperCamelCase )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def UpperCamelCase__ ( self ) -> Optional[Any]:
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=UpperCamelCase )
@slow
@require_torch_gpu
class __lowercase ( unittest.TestCase ):
def UpperCamelCase__ ( self ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self ) -> str:
__a = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png' )
__a = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy' )
__a = StableUnCLIPImgaImgPipeline.from_pretrained(
'fusing/stable-unclip-2-1-l-img2img' , torch_dtype=torch.floataa )
pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__a = torch.Generator(device='cpu' ).manual_seed(0 )
__a = pipe(UpperCamelCase , 'anime turle' , generator=UpperCamelCase , output_type='np' )
__a = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(UpperCamelCase , UpperCamelCase )
def UpperCamelCase__ ( self ) -> Optional[int]:
__a = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png' )
__a = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy' )
__a = StableUnCLIPImgaImgPipeline.from_pretrained(
'fusing/stable-unclip-2-1-h-img2img' , torch_dtype=torch.floataa )
pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__a = torch.Generator(device='cpu' ).manual_seed(0 )
__a = pipe(UpperCamelCase , 'anime turle' , generator=UpperCamelCase , output_type='np' )
__a = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(UpperCamelCase , UpperCamelCase )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
__a = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png' )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__a = StableUnCLIPImgaImgPipeline.from_pretrained(
'fusing/stable-unclip-2-1-h-img2img' , torch_dtype=torch.floataa )
__a = pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__a = pipe(
UpperCamelCase , 'anime turtle' , num_inference_steps=2 , output_type='np' , )
__a = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 539 | 1 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
@property
def A ( self ) -> Optional[int]:
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
def A ( self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.dummy_uncond_unet
SCREAMING_SNAKE_CASE = KarrasVeScheduler()
SCREAMING_SNAKE_CASE = KarrasVePipeline(unet=lowercase__ , scheduler=lowercase__ )
pipe.to(lowercase__ )
pipe.set_progress_bar_config(disable=lowercase__ )
SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = pipe(num_inference_steps=2 , generator=lowercase__ , output_type='numpy' ).images
SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = pipe(num_inference_steps=2 , generator=lowercase__ , output_type='numpy' , return_dict=lowercase__ )[0]
SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def A ( self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'google/ncsnpp-celebahq-256'
SCREAMING_SNAKE_CASE = UNetaDModel.from_pretrained(lowercase__ )
SCREAMING_SNAKE_CASE = KarrasVeScheduler()
SCREAMING_SNAKE_CASE = KarrasVePipeline(unet=lowercase__ , scheduler=lowercase__ )
pipe.to(lowercase__ )
pipe.set_progress_bar_config(disable=lowercase__ )
SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = pipe(num_inference_steps=20 , generator=lowercase__ , output_type='numpy' ).images
SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
SCREAMING_SNAKE_CASE = np.array([0.578, 0.5_811, 0.5_924, 0.5_809, 0.587, 0.5_886, 0.5_861, 0.5_802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 406 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
snake_case = logging.get_logger(__name__)
def UpperCamelCase_ ( SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
SCREAMING_SNAKE_CASE = 1_9_2
SCREAMING_SNAKE_CASE = 7_6_8
SCREAMING_SNAKE_CASE = 1_2
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = [8_0_0, 1_3_3_3]
SCREAMING_SNAKE_CASE = False
elif yolos_name == "yolos_s_dWr":
SCREAMING_SNAKE_CASE = 3_3_0
SCREAMING_SNAKE_CASE = 1_4
SCREAMING_SNAKE_CASE = 6
SCREAMING_SNAKE_CASE = 1_3_2_0
elif "yolos_s" in yolos_name:
SCREAMING_SNAKE_CASE = 3_8_4
SCREAMING_SNAKE_CASE = 1_5_3_6
SCREAMING_SNAKE_CASE = 1_2
SCREAMING_SNAKE_CASE = 6
elif "yolos_b" in yolos_name:
SCREAMING_SNAKE_CASE = [8_0_0, 1_3_4_4]
SCREAMING_SNAKE_CASE = 9_1
SCREAMING_SNAKE_CASE = 'huggingface/label-files'
SCREAMING_SNAKE_CASE = 'coco-detection-id2label.json'
SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, repo_type='dataset' ), 'r' ) )
SCREAMING_SNAKE_CASE = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE = idalabel
SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
return config
def UpperCamelCase_ ( SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = False ):
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
SCREAMING_SNAKE_CASE = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' )
SCREAMING_SNAKE_CASE = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE = in_proj_weight[: config.hidden_size, :]
SCREAMING_SNAKE_CASE = in_proj_bias[: config.hidden_size]
SCREAMING_SNAKE_CASE = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
SCREAMING_SNAKE_CASE = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
SCREAMING_SNAKE_CASE = in_proj_weight[-config.hidden_size :, :]
SCREAMING_SNAKE_CASE = in_proj_bias[-config.hidden_size :]
def UpperCamelCase_ ( SCREAMING_SNAKE_CASE_ ):
if "backbone" in name:
SCREAMING_SNAKE_CASE = name.replace('backbone', 'vit' )
if "cls_token" in name:
SCREAMING_SNAKE_CASE = name.replace('cls_token', 'embeddings.cls_token' )
if "det_token" in name:
SCREAMING_SNAKE_CASE = name.replace('det_token', 'embeddings.detection_tokens' )
if "mid_pos_embed" in name:
SCREAMING_SNAKE_CASE = name.replace('mid_pos_embed', 'encoder.mid_position_embeddings' )
if "pos_embed" in name:
SCREAMING_SNAKE_CASE = name.replace('pos_embed', 'embeddings.position_embeddings' )
if "patch_embed.proj" in name:
SCREAMING_SNAKE_CASE = name.replace('patch_embed.proj', 'embeddings.patch_embeddings.projection' )
if "blocks" in name:
SCREAMING_SNAKE_CASE = name.replace('blocks', 'encoder.layer' )
if "attn.proj" in name:
SCREAMING_SNAKE_CASE = name.replace('attn.proj', 'attention.output.dense' )
if "attn" in name:
SCREAMING_SNAKE_CASE = name.replace('attn', 'attention.self' )
if "norm1" in name:
SCREAMING_SNAKE_CASE = name.replace('norm1', 'layernorm_before' )
if "norm2" in name:
SCREAMING_SNAKE_CASE = name.replace('norm2', 'layernorm_after' )
if "mlp.fc1" in name:
SCREAMING_SNAKE_CASE = name.replace('mlp.fc1', 'intermediate.dense' )
if "mlp.fc2" in name:
SCREAMING_SNAKE_CASE = name.replace('mlp.fc2', 'output.dense' )
if "class_embed" in name:
SCREAMING_SNAKE_CASE = name.replace('class_embed', 'class_labels_classifier' )
if "bbox_embed" in name:
SCREAMING_SNAKE_CASE = name.replace('bbox_embed', 'bbox_predictor' )
if "vit.norm" in name:
SCREAMING_SNAKE_CASE = name.replace('vit.norm', 'vit.layernorm' )
return name
def UpperCamelCase_ ( SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
for key in orig_state_dict.copy().keys():
SCREAMING_SNAKE_CASE = orig_state_dict.pop(SCREAMING_SNAKE_CASE_ )
if "qkv" in key:
SCREAMING_SNAKE_CASE = key.split('.' )
SCREAMING_SNAKE_CASE = int(key_split[2] )
SCREAMING_SNAKE_CASE = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
SCREAMING_SNAKE_CASE = val[:dim, :]
SCREAMING_SNAKE_CASE = val[
dim : dim * 2, :
]
SCREAMING_SNAKE_CASE = val[-dim:, :]
else:
SCREAMING_SNAKE_CASE = val[:dim]
SCREAMING_SNAKE_CASE = val[dim : dim * 2]
SCREAMING_SNAKE_CASE = val[-dim:]
else:
SCREAMING_SNAKE_CASE = val
return orig_state_dict
def UpperCamelCase_ ( ):
SCREAMING_SNAKE_CASE = 'http://images.cocodataset.org/val2017/000000039769.jpg'
SCREAMING_SNAKE_CASE = Image.open(requests.get(SCREAMING_SNAKE_CASE_, stream=SCREAMING_SNAKE_CASE_ ).raw )
return im
@torch.no_grad()
def UpperCamelCase_ ( SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = False ):
SCREAMING_SNAKE_CASE = get_yolos_config(SCREAMING_SNAKE_CASE_ )
# load original state_dict
SCREAMING_SNAKE_CASE = torch.load(SCREAMING_SNAKE_CASE_, map_location='cpu' )['model']
# load 🤗 model
SCREAMING_SNAKE_CASE = YolosForObjectDetection(SCREAMING_SNAKE_CASE_ )
model.eval()
SCREAMING_SNAKE_CASE = convert_state_dict(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
# Check outputs on an image, prepared by YolosImageProcessor
SCREAMING_SNAKE_CASE = 8_0_0 if yolos_name != 'yolos_ti' else 5_1_2
SCREAMING_SNAKE_CASE = YolosImageProcessor(format='coco_detection', size=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = image_processor(images=prepare_img(), return_tensors='pt' )
SCREAMING_SNAKE_CASE = model(**SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = outputs.logits, outputs.pred_boxes
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None, None
if yolos_name == "yolos_ti":
SCREAMING_SNAKE_CASE = torch.tensor(
[[-39.5_022, -11.9_820, -17.6_888], [-29.9_574, -9.9_769, -17.7_691], [-42.3_281, -20.7_200, -30.6_294]] )
SCREAMING_SNAKE_CASE = torch.tensor(
[[0.4_021, 0.0_836, 0.7_979], [0.0_184, 0.2_609, 0.0_364], [0.1_781, 0.2_004, 0.2_095]] )
elif yolos_name == "yolos_s_200_pre":
SCREAMING_SNAKE_CASE = torch.tensor(
[[-24.0_248, -10.3_024, -14.8_290], [-42.0_392, -16.8_200, -27.4_334], [-27.2_743, -11.8_154, -18.7_148]] )
SCREAMING_SNAKE_CASE = torch.tensor(
[[0.2_559, 0.5_455, 0.4_706], [0.2_989, 0.7_279, 0.1_875], [0.7_732, 0.4_017, 0.4_462]] )
elif yolos_name == "yolos_s_300_pre":
SCREAMING_SNAKE_CASE = torch.tensor(
[[-36.2_220, -14.4_385, -23.5_457], [-35.6_970, -14.7_583, -21.3_935], [-31.5_939, -13.6_042, -16.8_049]] )
SCREAMING_SNAKE_CASE = torch.tensor(
[[0.7_614, 0.2_316, 0.4_728], [0.7_168, 0.4_495, 0.3_855], [0.4_996, 0.1_466, 0.9_996]] )
elif yolos_name == "yolos_s_dWr":
SCREAMING_SNAKE_CASE = torch.tensor(
[[-42.8_668, -24.1_049, -41.1_690], [-34.7_456, -14.1_274, -24.9_194], [-33.7_898, -12.1_946, -25.6_495]] )
SCREAMING_SNAKE_CASE = torch.tensor(
[[0.5_587, 0.2_773, 0.0_605], [0.5_004, 0.3_014, 0.9_994], [0.4_999, 0.1_548, 0.9_994]] )
elif yolos_name == "yolos_base":
SCREAMING_SNAKE_CASE = torch.tensor(
[[-40.6_064, -24.3_084, -32.6_447], [-55.1_990, -30.7_719, -35.5_877], [-51.4_311, -33.3_507, -35.6_462]] )
SCREAMING_SNAKE_CASE = torch.tensor(
[[0.5_555, 0.2_794, 0.0_655], [0.9_049, 0.2_664, 0.1_894], [0.9_183, 0.1_984, 0.1_635]] )
else:
raise ValueError(f'''Unknown yolos_name: {yolos_name}''' )
assert torch.allclose(logits[0, :3, :3], SCREAMING_SNAKE_CASE_, atol=1E-4 )
assert torch.allclose(pred_boxes[0, :3, :3], SCREAMING_SNAKE_CASE_, atol=1E-4 )
Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
print(f'''Saving model {yolos_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
if push_to_hub:
SCREAMING_SNAKE_CASE = {
'yolos_ti': 'yolos-tiny',
'yolos_s_200_pre': 'yolos-small',
'yolos_s_300_pre': 'yolos-small-300',
'yolos_s_dWr': 'yolos-small-dwr',
'yolos_base': 'yolos-base',
}
print('Pushing to the hub...' )
SCREAMING_SNAKE_CASE = model_mapping[yolos_name]
image_processor.push_to_hub(SCREAMING_SNAKE_CASE_, organization='hustvl' )
model.push_to_hub(SCREAMING_SNAKE_CASE_, organization='hustvl' )
if __name__ == "__main__":
snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--yolos_name',
default='yolos_s_200_pre',
type=str,
help=(
'Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\','
' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.'
),
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, help='Path to the original state dict (.pth file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
snake_case = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 406 | 1 |
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
lowerCamelCase__ = logging.get_logger(__name__)
class lowerCAmelCase__ ( __lowercase ):
def __init__( self , **a ) -> Tuple:
'''simple docstring'''
requires_backends(self , ["""bs4"""] )
super().__init__(**a )
def A_ ( self , a ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = []
_UpperCamelCase = []
_UpperCamelCase = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
_UpperCamelCase = parent.find_all(child.name , recursive=a )
xpath_tags.append(child.name )
xpath_subscripts.append(
0 if 1 == len(a ) else next(i for i, s in enumerate(a , 1 ) if s is child ) )
_UpperCamelCase = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def A_ ( self , a ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = BeautifulSoup(a , """html.parser""" )
_UpperCamelCase = []
_UpperCamelCase = []
_UpperCamelCase = []
for element in html_code.descendants:
if type(a ) == bsa.element.NavigableString:
if type(element.parent ) != bsa.element.Tag:
continue
_UpperCamelCase = html.unescape(a ).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(a )
_UpperCamelCase , _UpperCamelCase = self.xpath_soup(a )
stringaxtag_seq.append(a )
stringaxsubs_seq.append(a )
if len(a ) != len(a ):
raise ValueError("""Number of doc strings and xtags does not correspond""" )
if len(a ) != len(a ):
raise ValueError("""Number of doc strings and xsubs does not correspond""" )
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def A_ ( self , a , a ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = """"""
for tagname, subs in zip(a , a ):
xpath += F'/{tagname}'
if subs != 0:
xpath += F'[{subs}]'
return xpath
def __call__( self , a ) -> BatchFeature:
'''simple docstring'''
_UpperCamelCase = False
# Check that strings has a valid type
if isinstance(a , a ):
_UpperCamelCase = True
elif isinstance(a , (list, tuple) ):
if len(a ) == 0 or isinstance(html_strings[0] , a ):
_UpperCamelCase = True
if not valid_strings:
raise ValueError(
"""HTML strings must of type `str`, `List[str]` (batch of examples), """
F'but is of type {type(a )}.' )
_UpperCamelCase = bool(isinstance(a , (list, tuple) ) and (isinstance(html_strings[0] , a )) )
if not is_batched:
_UpperCamelCase = [html_strings]
# Get nodes + xpaths
_UpperCamelCase = []
_UpperCamelCase = []
for html_string in html_strings:
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = self.get_three_from_single(a )
nodes.append(a )
_UpperCamelCase = []
for node, tag_list, sub_list in zip(a , a , a ):
_UpperCamelCase = self.construct_xpath(a , a )
xpath_strings.append(a )
xpaths.append(a )
# return as Dict
_UpperCamelCase = {"""nodes""": nodes, """xpaths""": xpaths}
_UpperCamelCase = BatchFeature(data=a , tensor_type=a )
return encoded_inputs
| 612 |
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
lowerCamelCase__ = logging.getLogger()
def __A() -> str:
"""simple docstring"""
_UpperCamelCase = argparse.ArgumentParser()
parser.add_argument("""-f""" )
_UpperCamelCase = parser.parse_args()
return args.f
def __A(lowerCAmelCase ) -> Any:
"""simple docstring"""
_UpperCamelCase = {}
_UpperCamelCase = os.path.join(lowerCAmelCase , """all_results.json""" )
if os.path.exists(lowerCAmelCase ):
with open(lowerCAmelCase , """r""" ) as f:
_UpperCamelCase = json.load(lowerCAmelCase )
else:
raise ValueError(F'can\'t find {path}' )
return results
def __A() -> Tuple:
"""simple docstring"""
_UpperCamelCase = torch.cuda.is_available() and torch_device == """cuda"""
return is_using_cuda and is_apex_available()
lowerCamelCase__ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class lowerCAmelCase__ ( __lowercase ):
@classmethod
def A_ ( cls ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = tempfile.mkdtemp()
_UpperCamelCase = os.path.join(cls.tmpdir , """default_config.yml""" )
write_basic_config(save_location=cls.configPath )
_UpperCamelCase = ["""accelerate""", """launch""", """--config_file""", cls.configPath]
@classmethod
def A_ ( cls ) -> str:
'''simple docstring'''
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def A_ ( self ) -> Any:
'''simple docstring'''
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = F'\n {self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --seed=42\n --checkpointing_steps epoch\n --with_tracking\n '.split()
if is_cuda_and_apex_available():
testargs.append("""--fp16""" )
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(a )
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.75 )
self.assertTrue(os.path.exists(os.path.join(a , """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(a , """glue_no_trainer""" ) ) )
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def A_ ( self ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = F'\n {self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --block_size 128\n --per_device_train_batch_size 5\n --per_device_eval_batch_size 5\n --num_train_epochs 2\n --output_dir {tmp_dir}\n --checkpointing_steps epoch\n --with_tracking\n '.split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(a )
self.assertLess(result["""perplexity"""] , 1_00 )
self.assertTrue(os.path.exists(os.path.join(a , """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(a , """clm_no_trainer""" ) ) )
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def A_ ( self ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = F'\n {self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --num_train_epochs=1\n --checkpointing_steps epoch\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(a )
self.assertLess(result["""perplexity"""] , 42 )
self.assertTrue(os.path.exists(os.path.join(a , """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(a , """mlm_no_trainer""" ) ) )
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def A_ ( self ) -> Dict:
'''simple docstring'''
_UpperCamelCase = 7 if get_gpu_count() > 1 else 2
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = F'\n {self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n --checkpointing_steps epoch\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(a )
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.75 )
self.assertLess(result["""train_loss"""] , 0.5 )
self.assertTrue(os.path.exists(os.path.join(a , """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(a , """ner_no_trainer""" ) ) )
@unittest.skip(reason="""Fix me @muellerzr""" )
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def A_ ( self ) -> Dict:
'''simple docstring'''
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = F'\n {self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --seed=42\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(a )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result["""eval_f1"""] , 28 )
self.assertGreaterEqual(result["""eval_exact"""] , 28 )
self.assertTrue(os.path.exists(os.path.join(a , """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(a , """qa_no_trainer""" ) ) )
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def A_ ( self ) -> Dict:
'''simple docstring'''
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = F'\n {self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/swag/sample.json\n --validation_file tests/fixtures/tests_samples/swag/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=20\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(a )
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.8 )
self.assertTrue(os.path.exists(os.path.join(a , """swag_no_trainer""" ) ) )
@slow
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def A_ ( self ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = F'\n {self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(a )
self.assertGreaterEqual(result["""eval_rouge1"""] , 10 )
self.assertGreaterEqual(result["""eval_rouge2"""] , 2 )
self.assertGreaterEqual(result["""eval_rougeL"""] , 7 )
self.assertGreaterEqual(result["""eval_rougeLsum"""] , 7 )
self.assertTrue(os.path.exists(os.path.join(a , """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(a , """summarization_no_trainer""" ) ) )
@slow
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def A_ ( self ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = F'\n {self.examples_dir}/pytorch/translation/run_translation_no_trainer.py\n --model_name_or_path sshleifer/student_marian_en_ro_6_1\n --source_lang en\n --target_lang ro\n --train_file tests/fixtures/tests_samples/wmt16/sample.json\n --validation_file tests/fixtures/tests_samples/wmt16/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --num_beams=6\n --learning_rate=3e-3\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --source_lang en_XX\n --target_lang ro_RO\n --checkpointing_steps epoch\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(a )
self.assertGreaterEqual(result["""eval_bleu"""] , 30 )
self.assertTrue(os.path.exists(os.path.join(a , """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(a , """translation_no_trainer""" ) ) )
@slow
def A_ ( self ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = logging.StreamHandler(sys.stdout )
logger.addHandler(a )
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = F'\n {self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py\n --dataset_name huggingface/semantic-segmentation-test-sample\n --output_dir {tmp_dir}\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n '.split()
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(a )
self.assertGreaterEqual(result["""eval_overall_accuracy"""] , 0.10 )
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def A_ ( self ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = F'\n {self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py\n --model_name_or_path google/vit-base-patch16-224-in21k\n --dataset_name hf-internal-testing/cats_vs_dogs_sample\n --learning_rate 1e-4\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 1\n --max_train_steps 2\n --train_val_split 0.1\n --seed 42\n --output_dir {tmp_dir}\n --with_tracking\n --checkpointing_steps 1\n '.split()
if is_cuda_and_apex_available():
testargs.append("""--fp16""" )
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(a )
# The base model scores a 25%
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.6 )
self.assertTrue(os.path.exists(os.path.join(a , """step_1""" ) ) )
self.assertTrue(os.path.exists(os.path.join(a , """image_classification_no_trainer""" ) ) )
| 612 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_lowerCAmelCase : Tuple = {
"""configuration_resnet""": ["""RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ResNetConfig""", """ResNetOnnxConfig"""]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Tuple = [
"""RESNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ResNetForImageClassification""",
"""ResNetModel""",
"""ResNetPreTrainedModel""",
"""ResNetBackbone""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Tuple = [
"""TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFResNetForImageClassification""",
"""TFResNetModel""",
"""TFResNetPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Tuple = [
"""FlaxResNetForImageClassification""",
"""FlaxResNetModel""",
"""FlaxResNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
_lowerCAmelCase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 701 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
_lowerCAmelCase : Dict = {
'SenseTime/deformable-detr': 'https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json',
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class lowerCAmelCase ( a ):
_lowerCamelCase : Any = """deformable_detr"""
_lowerCamelCase : List[str] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self , snake_case__=True , snake_case__=None , snake_case__=3 , snake_case__=300 , snake_case__=1024 , snake_case__=6 , snake_case__=1024 , snake_case__=8 , snake_case__=6 , snake_case__=1024 , snake_case__=8 , snake_case__=0.0 , snake_case__=True , snake_case__="relu" , snake_case__=256 , snake_case__=0.1 , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.0_2 , snake_case__=1.0 , snake_case__=True , snake_case__=False , snake_case__="sine" , snake_case__="resnet50" , snake_case__=True , snake_case__=False , snake_case__=4 , snake_case__=4 , snake_case__=4 , snake_case__=False , snake_case__=300 , snake_case__=False , snake_case__=1 , snake_case__=5 , snake_case__=2 , snake_case__=1 , snake_case__=1 , snake_case__=5 , snake_case__=2 , snake_case__=0.1 , snake_case__=0.2_5 , snake_case__=False , **snake_case__ , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
lowerCAmelCase : Optional[int] = CONFIG_MAPPING['resnet'](out_features=['stage4'] )
elif isinstance(snake_case__ , snake_case__ ):
lowerCAmelCase : List[str] = backbone_config.get('model_type' )
lowerCAmelCase : str = CONFIG_MAPPING[backbone_model_type]
lowerCAmelCase : Optional[Any] = config_class.from_dict(snake_case__ )
lowerCAmelCase : Union[str, Any] = use_timm_backbone
lowerCAmelCase : List[Any] = backbone_config
lowerCAmelCase : Any = num_channels
lowerCAmelCase : Tuple = num_queries
lowerCAmelCase : Dict = max_position_embeddings
lowerCAmelCase : int = d_model
lowerCAmelCase : List[str] = encoder_ffn_dim
lowerCAmelCase : List[str] = encoder_layers
lowerCAmelCase : int = encoder_attention_heads
lowerCAmelCase : str = decoder_ffn_dim
lowerCAmelCase : str = decoder_layers
lowerCAmelCase : Dict = decoder_attention_heads
lowerCAmelCase : str = dropout
lowerCAmelCase : List[str] = attention_dropout
lowerCAmelCase : Union[str, Any] = activation_dropout
lowerCAmelCase : str = activation_function
lowerCAmelCase : Any = init_std
lowerCAmelCase : Any = init_xavier_std
lowerCAmelCase : Dict = encoder_layerdrop
lowerCAmelCase : int = auxiliary_loss
lowerCAmelCase : Optional[Any] = position_embedding_type
lowerCAmelCase : List[str] = backbone
lowerCAmelCase : int = use_pretrained_backbone
lowerCAmelCase : int = dilation
# deformable attributes
lowerCAmelCase : List[str] = num_feature_levels
lowerCAmelCase : List[str] = encoder_n_points
lowerCAmelCase : Union[str, Any] = decoder_n_points
lowerCAmelCase : Tuple = two_stage
lowerCAmelCase : Dict = two_stage_num_proposals
lowerCAmelCase : Union[str, Any] = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError('If two_stage is True, with_box_refine must be True.' )
# Hungarian matcher
lowerCAmelCase : Union[str, Any] = class_cost
lowerCAmelCase : Dict = bbox_cost
lowerCAmelCase : List[Any] = giou_cost
# Loss coefficients
lowerCAmelCase : Dict = mask_loss_coefficient
lowerCAmelCase : Any = dice_loss_coefficient
lowerCAmelCase : str = bbox_loss_coefficient
lowerCAmelCase : Tuple = giou_loss_coefficient
lowerCAmelCase : List[str] = eos_coefficient
lowerCAmelCase : Any = focal_alpha
lowerCAmelCase : Dict = disable_custom_kernels
super().__init__(is_encoder_decoder=snake_case__ , **snake_case__ )
@property
def lowercase ( self ):
return self.encoder_attention_heads
@property
def lowercase ( self ):
return self.d_model
def lowercase ( self ):
lowerCAmelCase : Union[str, Any] = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
lowerCAmelCase : List[Any] = self.backbone_config.to_dict()
lowerCAmelCase : str = self.__class__.model_type
return output
| 646 | 0 |
"""simple docstring"""
def lowerCamelCase ( _snake_case = 3 ,_snake_case = 7 ,_snake_case = 1000000 ):
UpperCAmelCase__ : str = 0
UpperCAmelCase__ : Any = 1
for current_denominator in range(1 ,limit + 1 ):
UpperCAmelCase__ : Dict = current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
UpperCAmelCase__ : str = current_numerator
UpperCAmelCase__ : Optional[int] = current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=1_00_00_00))
| 110 |
"""simple docstring"""
def lowerCamelCase ( _snake_case ,_snake_case ):
UpperCAmelCase__ : Optional[int] = ''
for i in table:
res += inp[i - 1]
return res
def lowerCamelCase ( _snake_case ):
return data[1:] + data[0]
def lowerCamelCase ( _snake_case ,_snake_case ):
UpperCAmelCase__ : str = ''
for i in range(len(_snake_case ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def lowerCamelCase ( _snake_case ,_snake_case ):
UpperCAmelCase__ : str = int('0b' + data[0] + data[-1] ,2 )
UpperCAmelCase__ : Optional[Any] = int('0b' + data[1:3] ,2 )
return bin(s[row][col] )[2:]
def lowerCamelCase ( _snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ):
UpperCAmelCase__ : Any = message[:4]
UpperCAmelCase__ : Optional[Any] = message[4:]
UpperCAmelCase__ : Dict = apply_table(_snake_case ,_snake_case )
UpperCAmelCase__ : Optional[Any] = xor(_snake_case ,_snake_case )
UpperCAmelCase__ : Dict = apply_sbox(_snake_case ,temp[:4] ) # noqa: E741
UpperCAmelCase__ : Tuple = apply_sbox(_snake_case ,temp[4:] )
UpperCAmelCase__ : List[str] = '0' * (2 - len(_snake_case )) + l # noqa: E741
UpperCAmelCase__ : Optional[Any] = '0' * (2 - len(_snake_case )) + r
UpperCAmelCase__ : Optional[int] = apply_table(l + r ,_snake_case )
UpperCAmelCase__ : Any = xor(_snake_case ,_snake_case )
return temp + right
if __name__ == "__main__":
UpperCamelCase__ = input('Enter 10 bit key: ')
UpperCamelCase__ = input('Enter 8 bit message: ')
UpperCamelCase__ = [6, 3, 7, 4, 8, 5, 10, 9]
UpperCamelCase__ = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6]
UpperCamelCase__ = [2, 4, 3, 1]
UpperCamelCase__ = [2, 6, 3, 1, 4, 8, 5, 7]
UpperCamelCase__ = [4, 1, 3, 5, 7, 2, 8, 6]
UpperCamelCase__ = [4, 1, 2, 3, 2, 3, 4, 1]
UpperCamelCase__ = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
UpperCamelCase__ = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
UpperCamelCase__ = apply_table(key, paa_table)
UpperCamelCase__ = temp[:5]
UpperCamelCase__ = temp[5:]
UpperCamelCase__ = left_shift(left)
UpperCamelCase__ = left_shift(right)
UpperCamelCase__ = apply_table(left + right, pa_table)
UpperCamelCase__ = left_shift(left)
UpperCamelCase__ = left_shift(right)
UpperCamelCase__ = left_shift(left)
UpperCamelCase__ = left_shift(right)
UpperCamelCase__ = apply_table(left + right, pa_table)
# encryption
UpperCamelCase__ = apply_table(message, IP)
UpperCamelCase__ = function(expansion, sa, sa, keya, temp)
UpperCamelCase__ = temp[4:] + temp[:4]
UpperCamelCase__ = function(expansion, sa, sa, keya, temp)
UpperCamelCase__ = apply_table(temp, IP_inv)
print('Cipher text is:', CT)
# decryption
UpperCamelCase__ = apply_table(CT, IP)
UpperCamelCase__ = function(expansion, sa, sa, keya, temp)
UpperCamelCase__ = temp[4:] + temp[:4]
UpperCamelCase__ = function(expansion, sa, sa, keya, temp)
UpperCamelCase__ = apply_table(temp, IP_inv)
print('Plain text after decypting is:', PT)
| 110 | 1 |
'''simple docstring'''
from __future__ import annotations
def A (__lowerCamelCase :list[int | str] ):
create_state_space_tree(__lowerCamelCase , [] , 0 , [0 for i in range(len(__lowerCamelCase ) )] )
def A (__lowerCamelCase :list[int | str] , __lowerCamelCase :list[int | str] , __lowerCamelCase :int , __lowerCamelCase :list[int] , ):
if index == len(__lowerCamelCase ):
print(__lowerCamelCase )
return
for i in range(len(__lowerCamelCase ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
_lowerCAmelCase = True
create_state_space_tree(__lowerCamelCase , __lowerCamelCase , index + 1 , __lowerCamelCase )
current_sequence.pop()
_lowerCAmelCase = False
_lowercase = [3, 1, 2, 4]
generate_all_permutations(sequence)
_lowercase = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 162 |
'''simple docstring'''
def A (__lowerCamelCase :int ):
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
_lowerCAmelCase = f'Input value of [number={number}] must be an integer'
raise TypeError(__lowerCamelCase )
if number < 1:
_lowerCAmelCase = f'Input value of [number={number}] must be > 0'
raise ValueError(__lowerCamelCase )
_lowerCAmelCase = 1
for i in range(1 , __lowerCamelCase ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 162 | 1 |
"""simple docstring"""
def lowerCamelCase ( _snake_case ,_snake_case ):
UpperCAmelCase__ : Union[str, Any] = len(_snake_case )
UpperCAmelCase__ : Dict = len(_snake_case )
UpperCAmelCase__ : str = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
UpperCAmelCase__ : Any = True
for i in range(_snake_case ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
UpperCAmelCase__ : Union[str, Any] = True
if a[i].islower():
UpperCAmelCase__ : Any = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 110 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , )
@pytest.mark.usefixtures('''sm_env''' )
@parameterized_class(
[
{
'''framework''': '''pytorch''',
'''script''': '''run_glue_model_parallelism.py''',
'''model_name_or_path''': '''roberta-large''',
'''instance_type''': '''ml.p3dn.24xlarge''',
'''results''': {'''train_runtime''': 1_6_0_0, '''eval_accuracy''': 0.3, '''eval_loss''': 1.2},
},
{
'''framework''': '''pytorch''',
'''script''': '''run_glue.py''',
'''model_name_or_path''': '''roberta-large''',
'''instance_type''': '''ml.p3dn.24xlarge''',
'''results''': {'''train_runtime''': 1_6_0_0, '''eval_accuracy''': 0.3, '''eval_loss''': 1.2},
},
] )
class A_ ( unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : Any ) -> Optional[int]:
if self.framework == "pytorch":
subprocess.run(
F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() ,encoding='utf-8' ,check=__A ,)
assert hasattr(self ,'env' )
def __UpperCAmelCase ( self : str ,__A : Tuple ) -> int:
# configuration for running training on smdistributed Model Parallel
_lowercase = {
'enabled': True,
'processes_per_host': 8,
}
_lowercase = {
'enabled': True,
'parameters': {
'microbatches': 4,
'placement_strategy': 'spread',
'pipeline': 'interleaved',
'optimize': 'speed',
'partitions': 4,
'ddp': True,
},
}
_lowercase = {'smdistributed': {'modelparallel': smp_options}, 'mpi': mpi_options}
_lowercase = 'trainer' if self.script == 'run_glue.py' else 'smtrainer'
# creates estimator
return HuggingFace(
entry_point=self.script ,source_dir=self.env.test_path ,role=self.env.role ,image_uri=self.env.image_uri ,base_job_name=F"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""" ,instance_count=__A ,instance_type=self.instance_type ,debugger_hook_config=__A ,hyperparameters={
**self.env.hyperparameters,
'model_name_or_path': self.model_name_or_path,
'max_steps': 500,
} ,metric_definitions=self.env.metric_definitions ,distribution=__A ,py_version='py36' ,)
def __UpperCAmelCase ( self : List[Any] ,__A : Any ) -> Optional[Any]:
TrainingJobAnalytics(__A ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(1,)] )
def __UpperCAmelCase ( self : Tuple ,__A : Union[str, Any] ) -> Optional[Any]:
# create estimator
_lowercase = self.create_estimator(__A )
# run training
estimator.fit()
# result dataframe
_lowercase = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
_lowercase = list(result_metrics_df[result_metrics_df.metric_name == 'eval_accuracy']['value'] )
_lowercase = list(result_metrics_df[result_metrics_df.metric_name == 'eval_loss']['value'] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
_lowercase = (
Session().describe_training_job(estimator.latest_training_job.name ).get('TrainingTimeInSeconds' ,99_9999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['eval_accuracy'] for t in eval_accuracy )
assert all(t <= self.results['eval_loss'] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F"""{estimator.latest_training_job.name}.json""" ,'w' ) as outfile:
json.dump({'train_time': train_runtime, 'eval_accuracy': eval_accuracy, 'eval_loss': eval_loss} ,__A ) | 67 | 0 |
"""simple docstring"""
import math
class __lowerCamelCase :
'''simple docstring'''
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> int:
_a = 0.0
_a = 0.0
for i in range(len(__UpperCAmelCase ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> list[list[int | float]]:
for i in range(len(__UpperCAmelCase ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def A_ ( ):
"""simple docstring"""
_a = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
_a = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
_a = SelfOrganizingMap()
_a = 3
_a = 0.5
for _ in range(_lowerCAmelCase ):
for j in range(len(_lowerCAmelCase ) ):
# training sample
_a = training_samples[j]
# Compute the winning vector
_a = self_organizing_map.get_winner(_lowerCAmelCase, _lowerCAmelCase )
# Update the winning vector
_a = self_organizing_map.update(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase )
# classify test sample
_a = [0, 0, 0, 1]
_a = self_organizing_map.get_winner(_lowerCAmelCase, _lowerCAmelCase )
# results
print(f'Clusters that the test sample belongs to : {winner}' )
print(f'Weights that have been trained : {weights}' )
# running the main() function
if __name__ == "__main__":
main() | 285 |
"""simple docstring"""
from typing import Any
def A_ ( _lowerCAmelCase : list ):
"""simple docstring"""
if not input_list:
return []
_a = [input_list.count(_lowerCAmelCase ) for value in input_list]
_a = max(_lowerCAmelCase ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(_lowerCAmelCase ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod() | 285 | 1 |
'''simple docstring'''
import numpy as np
def __lowercase (_SCREAMING_SNAKE_CASE :np.ndarray , _SCREAMING_SNAKE_CASE :np.ndarray , _SCREAMING_SNAKE_CASE :float = 1E-1_2 , _SCREAMING_SNAKE_CASE :int = 1_00 , ):
assert np.shape(_SCREAMING_SNAKE_CASE )[0] == np.shape(_SCREAMING_SNAKE_CASE )[1]
# Ensure proper dimensionality.
assert np.shape(_SCREAMING_SNAKE_CASE )[0] == np.shape(_SCREAMING_SNAKE_CASE )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(_SCREAMING_SNAKE_CASE ) == np.iscomplexobj(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : int = np.iscomplexobj(_SCREAMING_SNAKE_CASE )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(_SCREAMING_SNAKE_CASE , input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
SCREAMING_SNAKE_CASE : Optional[int] = False
SCREAMING_SNAKE_CASE : Optional[int] = 0
SCREAMING_SNAKE_CASE : Optional[Any] = 0
SCREAMING_SNAKE_CASE : List[Any] = 1E1_2
while not convergence:
# Multiple matrix by the vector.
SCREAMING_SNAKE_CASE : Any = np.dot(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Normalize the resulting output vector.
SCREAMING_SNAKE_CASE : List[Any] = w / np.linalg.norm(_SCREAMING_SNAKE_CASE )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
SCREAMING_SNAKE_CASE : Tuple = vector.conj().T if is_complex else vector.T
SCREAMING_SNAKE_CASE : List[Any] = np.dot(_SCREAMING_SNAKE_CASE , np.dot(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
# Check convergence.
SCREAMING_SNAKE_CASE : Union[str, Any] = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
SCREAMING_SNAKE_CASE : Dict = True
SCREAMING_SNAKE_CASE : List[Any] = lambda_
if is_complex:
SCREAMING_SNAKE_CASE : List[Any] = np.real(lambda_ )
return lambda_, vector
def __lowercase ():
SCREAMING_SNAKE_CASE : Dict = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
SCREAMING_SNAKE_CASE : Tuple = np.array([41, 4, 20] )
SCREAMING_SNAKE_CASE : Tuple = real_input_matrix.astype(np.complexaaa )
SCREAMING_SNAKE_CASE : Tuple = np.triu(1j * complex_input_matrix , 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
SCREAMING_SNAKE_CASE : Optional[Any] = np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
SCREAMING_SNAKE_CASE : Optional[Any] = real_input_matrix
SCREAMING_SNAKE_CASE : List[Any] = real_vector
elif problem_type == "complex":
SCREAMING_SNAKE_CASE : Optional[Any] = complex_input_matrix
SCREAMING_SNAKE_CASE : Dict = complex_vector
# Our implementation.
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = power_iteration(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = np.linalg.eigh(_SCREAMING_SNAKE_CASE )
# Last eigenvalue is the maximum one.
SCREAMING_SNAKE_CASE : Dict = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
SCREAMING_SNAKE_CASE : Dict = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1E-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(_SCREAMING_SNAKE_CASE ) - np.abs(_SCREAMING_SNAKE_CASE ) ) <= 1E-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 507 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_lxmert import LxmertTokenizer
snake_case_ = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
snake_case_ = {
"""vocab_file""": {
"""unc-nlp/lxmert-base-uncased""": """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt""",
},
"""tokenizer_file""": {
"""unc-nlp/lxmert-base-uncased""": (
"""https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json"""
),
},
}
snake_case_ = {
"""unc-nlp/lxmert-base-uncased""": 512,
}
snake_case_ = {
"""unc-nlp/lxmert-base-uncased""": {"""do_lower_case""": True},
}
class a__ ( _lowercase ):
__magic_name__ : Any = VOCAB_FILES_NAMES
__magic_name__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ : Dict = PRETRAINED_INIT_CONFIGURATION
__magic_name__ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ : Any = LxmertTokenizer
def __init__(self : Tuple, __UpperCAmelCase : int=None, __UpperCAmelCase : Optional[Any]=None, __UpperCAmelCase : Union[str, Any]=True, __UpperCAmelCase : int="[UNK]", __UpperCAmelCase : Optional[int]="[SEP]", __UpperCAmelCase : Optional[int]="[PAD]", __UpperCAmelCase : List[str]="[CLS]", __UpperCAmelCase : List[str]="[MASK]", __UpperCAmelCase : List[str]=True, __UpperCAmelCase : Optional[int]=None, **__UpperCAmelCase : int, ) -> int:
"""simple docstring"""
super().__init__(
__UpperCAmelCase, tokenizer_file=__UpperCAmelCase, do_lower_case=__UpperCAmelCase, unk_token=__UpperCAmelCase, sep_token=__UpperCAmelCase, pad_token=__UpperCAmelCase, cls_token=__UpperCAmelCase, mask_token=__UpperCAmelCase, tokenize_chinese_chars=__UpperCAmelCase, strip_accents=__UpperCAmelCase, **__UpperCAmelCase, )
SCREAMING_SNAKE_CASE : List[str] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''', __UpperCAmelCase ) != do_lower_case
or normalizer_state.get('''strip_accents''', __UpperCAmelCase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''', __UpperCAmelCase ) != tokenize_chinese_chars
):
SCREAMING_SNAKE_CASE : Dict = getattr(__UpperCAmelCase, normalizer_state.pop('''type''' ) )
SCREAMING_SNAKE_CASE : Dict = do_lower_case
SCREAMING_SNAKE_CASE : List[Any] = strip_accents
SCREAMING_SNAKE_CASE : List[str] = tokenize_chinese_chars
SCREAMING_SNAKE_CASE : Dict = normalizer_class(**__UpperCAmelCase )
SCREAMING_SNAKE_CASE : List[str] = do_lower_case
def lowercase__ (self : int, __UpperCAmelCase : List[str], __UpperCAmelCase : List[Any]=None ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowercase__ (self : Optional[Any], __UpperCAmelCase : List[int], __UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = [self.sep_token_id]
SCREAMING_SNAKE_CASE : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase__ (self : Union[str, Any], __UpperCAmelCase : str, __UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = self._tokenizer.model.save(__UpperCAmelCase, name=__UpperCAmelCase )
return tuple(__UpperCAmelCase )
| 507 | 1 |
'''simple docstring'''
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def UpperCAmelCase ( lowercase__ : Optional[int]=32 , lowercase__ : Any=10 , lowercase__ : Dict=100 , lowercase__ : int=1026 , lowercase__ : List[str]=True , lowercase__ : Dict="data/tokenized_stories_train_wikitext103.jbl" , lowercase__ : int="igf_context_pairs.jbl" , ):
'''simple docstring'''
set_seed(3 )
# generate train_data and objective_set
a__ = generate_datasets(
a_ , a_ , number=a_ , min_len=1026 , trim=a_ )
# keeps model same across runs
set_seed(4 )
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
a__ = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" )
# load pretrained model
a__ = load_gpta("""gpt2""" ).to(a_ )
print("""computing perplexity on objective set""" )
a__ = compute_perplexity(a_ , a_ , a_ ).item()
print("""perplexity on objective set:""" , a_ )
# collect igf pairs and save to file demo.jbl
collect_objective_set(a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ )
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def UpperCAmelCase ( lowercase__ : Optional[Any] , lowercase__ : List[Any]=15 , lowercase__ : List[Any]=128 , lowercase__ : Optional[Any]=100 , lowercase__ : Tuple="igf_model.pt" , ):
'''simple docstring'''
set_seed(42 )
# Load pre-trained model
a__ = GPTaLMHeadModel.from_pretrained("""gpt2""" )
# Initialize secondary learner to use embedding weights of model
a__ = SecondaryLearner(a_ )
# Train secondary learner
a__ = train_secondary_learner(
a_ , a_ , max_epochs=a_ , batch_size=a_ , eval_freq=100 , igf_model_path=a_ , )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def UpperCAmelCase ( lowercase__ : Union[str, Any] , lowercase__ : int , lowercase__ : Optional[Any] , lowercase__ : List[Any]=32 , lowercase__ : Tuple=1000 , lowercase__ : List[Any]=16 , lowercase__ : List[str]=1.0 , lowercase__ : Tuple=recopy_gpta , lowercase__ : Tuple=None , lowercase__ : List[Any]=10 , lowercase__ : str="gpt2_finetuned.pt" , ):
'''simple docstring'''
a__ = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" )
a__ = RandomSampler(a_ )
a__ = DataLoader(a_ , sampler=a_ )
a__ = max_steps // (len(a_ )) + 1
a__ = 0
a__ = torch.zeros((1, context_len) , dtype=torch.long , device=a_ )
a__ = recopy_model(a_ , a_ , a_ )
model.train()
if secondary_learner is not None:
secondary_learner.to(a_ )
secondary_learner.eval()
a__ = []
a__ = 0
a__ = []
a__ = []
# Compute the performance of the transformer model at the beginning
a__ = compute_perplexity(a_ , a_ , a_ )
test_perps.append(a_ )
print("""Test perplexity, step""" , a_ , """:""" , a_ )
for epoch in range(int(a_ ) ):
for step, example in enumerate(a_ ):
torch.cuda.empty_cache()
a__ = random.randint(0 , example.size(2 ) - context_len - 1 )
a__ = example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
a__ = model(a_ , labels=a_ )
a__ = True
if secondary_learner is not None:
a__ = secondary_learner.forward(
torch.tensor(a_ , dtype=torch.long , device=a_ ).unsqueeze(0 ) )[0].item()
observed_qs.append(float(a_ ) )
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 10:
a__ = -1
if predicted_q < threshold:
a__ = False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu() ) )
a__ = outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
a__ = 0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 )
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
a__ = compute_perplexity(a_ , a_ , a_ )
test_perps.append(a_ )
print("""Test perplexity, step""" , a_ , """:""" , a_ )
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 60:
break
if max_steps > 0 and global_step > 60:
break
# save finetuned transformer model
torch.save(model.state_dict() , a_ )
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def UpperCAmelCase ( ):
'''simple docstring'''
a__ = argparse.ArgumentParser(description="""Fine-tune a transformer model with IGF on a language modeling task""" )
# Required parameters
parser.add_argument(
"""--data_dir""" , default=a_ , type=a_ , required=a_ , help="""The input data dir. Should contain data files for WikiText.""" , )
parser.add_argument(
"""--model_name_or_path""" , default=a_ , type=a_ , required=a_ , help="""Path to pretrained model or model identifier from huggingface.co/models""" , )
parser.add_argument(
"""--data_file""" , type=a_ , default=a_ , help=(
"""A jbl file containing tokenized data which can be split as objective dataset, """
"""train_dataset and test_dataset."""
) , )
parser.add_argument(
"""--igf_data_file""" , type=a_ , default=a_ , help="""A jbl file containing the context and information gain pairs to train secondary learner.""" , )
parser.add_argument(
"""--output_dir""" , default=a_ , type=a_ , required=a_ , help="""The output directory where the final fine-tuned model is stored.""" , )
parser.add_argument(
"""--tokenizer_name""" , default=a_ , type=a_ , help="""Pretrained tokenizer name or path if not the same as model_name""" , )
parser.add_argument("""--seed""" , type=a_ , default=a_ , help="""A seed for reproducible training.""" )
parser.add_argument(
"""--context_len""" , default=32 , type=a_ , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--size_objective_set""" , default=100 , type=a_ , help="""number of articles that are long enough to be used as our objective set""" , )
parser.add_argument(
"""--eval_freq""" , default=100 , type=a_ , help="""secondary model evaluation is triggered at eval_freq""" )
parser.add_argument("""--max_steps""" , default=1000 , type=a_ , help="""To calculate training epochs""" )
parser.add_argument(
"""--secondary_learner_batch_size""" , default=128 , type=a_ , help="""batch size of training data for secondary learner""" , )
parser.add_argument(
"""--batch_size""" , default=16 , type=a_ , help="""batch size of training data of language model(gpt2) """ )
parser.add_argument(
"""--eval_interval""" , default=10 , type=a_ , help=(
"""decay the selectivity of our secondary learner filter from"""
"""1 standard deviation above average to 1 below average after 10 batches"""
) , )
parser.add_argument(
"""--number""" , default=100 , type=a_ , help="""The number of examples split to be used as objective_set/test_data""" )
parser.add_argument(
"""--min_len""" , default=1026 , type=a_ , help="""The minimum length of the article to be used as objective set""" )
parser.add_argument(
"""--secondary_learner_max_epochs""" , default=15 , type=a_ , help="""number of epochs to train secondary learner""" )
parser.add_argument("""--trim""" , default=a_ , type=a_ , help="""truncate the example if it exceeds context length""" )
parser.add_argument(
"""--threshold""" , default=1.0 , type=a_ , help=(
"""The threshold value used by secondary learner to filter the train_data and allow only"""
""" informative data as input to the model"""
) , )
parser.add_argument("""--finetuned_model_name""" , default="""gpt2_finetuned.pt""" , type=a_ , help="""finetuned_model_name""" )
parser.add_argument(
"""--recopy_model""" , default=a_ , type=a_ , help="""Reset the model to the original pretrained GPT-2 weights after each iteration""" , )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=32 , max_steps=10 , size_objective_set=100 , min_len=1026 , trim=a_ , data_file="""data/tokenized_stories_train_wikitext103.jbl""" , igf_data_file="""igf_context_pairs.jbl""" , )
# Load train data for secondary learner
a__ = joblib.load("""data/IGF_values.jbl""" )
# Train secondary learner
a__ = training_secondary_learner(
a_ , secondary_learner_max_epochs=15 , secondary_learner_batch_size=128 , eval_freq=100 , igf_model_path="""igf_model.pt""" , )
# load pretrained gpt2 model
a__ = GPTaLMHeadModel.from_pretrained("""gpt2""" )
set_seed(42 )
# Generate train and test data to train and evaluate gpt2 model
a__ = generate_datasets(
context_len=32 , file="""data/tokenized_stories_train_wikitext103.jbl""" , number=100 , min_len=1026 , trim=a_ )
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
a_ , a_ , a_ , context_len=32 , max_steps=1000 , batch_size=16 , threshold=1.0 , recopy_model=a_ , secondary_learner=a_ , eval_interval=10 , finetuned_model_name="""gpt2_finetuned.pt""" , )
if __name__ == "__main__":
main()
| 705 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
_lowercase : int =16
_lowercase : int =32
def UpperCAmelCase ( lowercase__ : Accelerator , lowercase__ : int = 16 , lowercase__ : str = "bert-base-cased" ):
'''simple docstring'''
a__ = AutoTokenizer.from_pretrained(lowercase__ )
a__ = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(lowercase__ : int ):
# max_length=None => use the model max length (it's actually the default)
a__ = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowercase__ , max_length=lowercase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
a__ = datasets.map(
lowercase__ , batched=lowercase__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=lowercase__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
a__ = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(lowercase__ : Tuple ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowercase__ , padding="""max_length""" , max_length=128 , return_tensors="""pt""" )
return tokenizer.pad(lowercase__ , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
a__ = DataLoader(
tokenized_datasets["""train"""] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ )
a__ = DataLoader(
tokenized_datasets["""validation"""] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ )
return train_dataloader, eval_dataloader
def UpperCAmelCase ( lowercase__ : Union[str, Any] , lowercase__ : List[Any] ):
'''simple docstring'''
a__ = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
a__ = config["""lr"""]
a__ = int(config["""num_epochs"""] )
a__ = int(config["""seed"""] )
a__ = int(config["""batch_size"""] )
a__ = args.model_name_or_path
set_seed(lowercase__ )
a__ , a__ = get_dataloaders(lowercase__ , lowercase__ , lowercase__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
a__ = AutoModelForSequenceClassification.from_pretrained(lowercase__ , return_dict=lowercase__ )
# Instantiate optimizer
a__ = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
a__ = optimizer_cls(params=model.parameters() , lr=lowercase__ )
if accelerator.state.deepspeed_plugin is not None:
a__ = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
a__ = 1
a__ = (len(lowercase__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
a__ = get_linear_schedule_with_warmup(
optimizer=lowercase__ , num_warmup_steps=0 , num_training_steps=lowercase__ , )
else:
a__ = DummyScheduler(lowercase__ , total_num_steps=lowercase__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
a__ , a__ , a__ , a__ , a__ = accelerator.prepare(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
# We need to keep track of how many total steps we have iterated over
a__ = 0
# We also need to keep track of the stating epoch so files are named properly
a__ = 0
# Now we train the model
a__ = evaluate.load("""glue""" , """mrpc""" )
a__ = 0
a__ = {}
for epoch in range(lowercase__ , lowercase__ ):
model.train()
for step, batch in enumerate(lowercase__ ):
a__ = model(**lowercase__ )
a__ = outputs.loss
a__ = loss / gradient_accumulation_steps
accelerator.backward(lowercase__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
a__ = 0
for step, batch in enumerate(lowercase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
a__ = model(**lowercase__ )
a__ = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
a__ , a__ = accelerator.gather(
(predictions, batch["""labels"""]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(lowercase__ ) - 1:
a__ = predictions[: len(eval_dataloader.dataset ) - samples_seen]
a__ = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=lowercase__ , references=lowercase__ , )
a__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'epoch {epoch}:' , lowercase__ )
a__ = eval_metric["""accuracy"""]
if best_performance < eval_metric["accuracy"]:
a__ = eval_metric["""accuracy"""]
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), f'Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}'
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , """all_results.json""" ) , """w""" ) as f:
json.dump(lowercase__ , lowercase__ )
def UpperCAmelCase ( ):
'''simple docstring'''
a__ = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" )
parser.add_argument(
"""--model_name_or_path""" , type=lowercase__ , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=lowercase__ , )
parser.add_argument(
"""--output_dir""" , type=lowercase__ , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--performance_lower_bound""" , type=lowercase__ , default=lowercase__ , help="""Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.""" , )
parser.add_argument(
"""--num_epochs""" , type=lowercase__ , default=3 , help="""Number of train epochs.""" , )
a__ = parser.parse_args()
a__ = {"""lr""": 2e-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16}
training_function(lowercase__ , lowercase__ )
if __name__ == "__main__":
main()
| 412 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__A = logging.get_logger(__name__)
__A = {
"google/bit-50": "https://huggingface.co/google/bit-50/resolve/main/config.json",
}
class _A ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase : Tuple = 'bit'
lowerCamelCase : List[Any] = ['preactivation', 'bottleneck']
lowerCamelCase : List[Any] = ['SAME', 'VALID']
def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any]=3 , __SCREAMING_SNAKE_CASE : List[str]=64 , __SCREAMING_SNAKE_CASE : Tuple=[256, 512, 1024, 2048] , __SCREAMING_SNAKE_CASE : Optional[Any]=[3, 4, 6, 3] , __SCREAMING_SNAKE_CASE : Optional[Any]="preactivation" , __SCREAMING_SNAKE_CASE : int="relu" , __SCREAMING_SNAKE_CASE : Optional[Any]=None , __SCREAMING_SNAKE_CASE : Tuple=32 , __SCREAMING_SNAKE_CASE : Tuple=0.0 , __SCREAMING_SNAKE_CASE : Optional[int]=False , __SCREAMING_SNAKE_CASE : Union[str, Any]=32 , __SCREAMING_SNAKE_CASE : List[str]=1 , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : Tuple=None , **__SCREAMING_SNAKE_CASE : Tuple , ) -> List[str]:
super().__init__(**__SCREAMING_SNAKE_CASE )
if layer_type not in self.layer_types:
raise ValueError(f'''layer_type={layer_type} is not one of {",".join(self.layer_types )}''' )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
__UpperCAmelCase =global_padding.upper()
else:
raise ValueError(f'''Padding strategy {global_padding} not supported''' )
__UpperCAmelCase =num_channels
__UpperCAmelCase =embedding_size
__UpperCAmelCase =hidden_sizes
__UpperCAmelCase =depths
__UpperCAmelCase =layer_type
__UpperCAmelCase =hidden_act
__UpperCAmelCase =global_padding
__UpperCAmelCase =num_groups
__UpperCAmelCase =drop_path_rate
__UpperCAmelCase =embedding_dynamic_padding
__UpperCAmelCase =output_stride
__UpperCAmelCase =width_factor
__UpperCAmelCase =["""stem"""] + [f'''stage{idx}''' for idx in range(1 , len(__SCREAMING_SNAKE_CASE ) + 1 )]
__UpperCAmelCase , __UpperCAmelCase =get_aligned_output_features_output_indices(
out_features=__SCREAMING_SNAKE_CASE , out_indices=__SCREAMING_SNAKE_CASE , stage_names=self.stage_names )
| 68 | """simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'weiweishi/roc-bert-base-zh': 'https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json',
}
class UpperCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : List[Any] = """roc_bert"""
def __init__( self , _SCREAMING_SNAKE_CASE=3_0_5_2_2 , _SCREAMING_SNAKE_CASE=7_6_8 , _SCREAMING_SNAKE_CASE=1_2 , _SCREAMING_SNAKE_CASE=1_2 , _SCREAMING_SNAKE_CASE=3_0_7_2 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=5_1_2 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=1E-12 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE="absolute" , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=7_6_8 , _SCREAMING_SNAKE_CASE=9_1_0 , _SCREAMING_SNAKE_CASE=5_1_2 , _SCREAMING_SNAKE_CASE=2_4_8_5_8 , _SCREAMING_SNAKE_CASE=True , **_SCREAMING_SNAKE_CASE , ) -> List[Any]:
a_ : Optional[int] = vocab_size
a_ : str = max_position_embeddings
a_ : List[Any] = hidden_size
a_ : Optional[Any] = num_hidden_layers
a_ : Union[str, Any] = num_attention_heads
a_ : List[str] = intermediate_size
a_ : List[Any] = hidden_act
a_ : str = hidden_dropout_prob
a_ : Union[str, Any] = attention_probs_dropout_prob
a_ : Any = initializer_range
a_ : str = type_vocab_size
a_ : Union[str, Any] = layer_norm_eps
a_ : str = use_cache
a_ : Tuple = enable_pronunciation
a_ : Dict = enable_shape
a_ : int = pronunciation_embed_dim
a_ : List[Any] = pronunciation_vocab_size
a_ : int = shape_embed_dim
a_ : List[str] = shape_vocab_size
a_ : List[Any] = concat_input
a_ : List[str] = position_embedding_type
a_ : Any = classifier_dropout
super().__init__(pad_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
| 473 | 0 |
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class _a ( lowerCamelCase_ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = 'microsoft/speecht5_tts'
__SCREAMING_SNAKE_CASE = (
'This is a tool that reads an English text out loud. It takes an input named `text` which should contain the '
'text to read (in English) and returns a waveform object containing the sound.'
)
__SCREAMING_SNAKE_CASE = 'text_reader'
__SCREAMING_SNAKE_CASE = SpeechTaProcessor
__SCREAMING_SNAKE_CASE = SpeechTaForTextToSpeech
__SCREAMING_SNAKE_CASE = SpeechTaHifiGan
__SCREAMING_SNAKE_CASE = ['text']
__SCREAMING_SNAKE_CASE = ['audio']
def __lowerCAmelCase ( self ):
if self.post_processor is None:
_lowercase ='microsoft/speecht5_hifigan'
super().setup()
def __lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=None ):
_lowercase =self.pre_processor(text=UpperCamelCase_ , return_tensors="pt" , truncation=UpperCamelCase_ )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError("Datasets needs to be installed if not passing speaker embeddings." )
_lowercase =load_dataset("Matthijs/cmu-arctic-xvectors" , split="validation" )
_lowercase =torch.tensor(embeddings_dataset[7305]["xvector"] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def __lowerCAmelCase ( self , lowerCAmelCase_ ):
with torch.no_grad():
return self.model.generate_speech(**UpperCamelCase_ )
def __lowerCAmelCase ( self , lowerCAmelCase_ ):
with torch.no_grad():
return self.post_processor(UpperCamelCase_ ).cpu().detach()
| 704 | from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
lowerCAmelCase__ = logging.get_logger(__name__)
class _a ( lowerCamelCase_ ):
"""simple docstring"""
def __lowerCAmelCase ( self , lowerCAmelCase_ ):
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_lowercase =[label.strip() for label in labels.split("," ) if label.strip()]
return labels
def __call__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
if len(lowerCAmelCase_ ) == 0 or len(lowerCAmelCase_ ) == 0:
raise ValueError("You must include at least one label and at least one sequence." )
if hypothesis_template.format(labels[0] ) == hypothesis_template:
raise ValueError(
(
"The provided hypothesis_template \"{}\" was not able to be formatted with the target labels. "
"Make sure the passed template includes formatting syntax such as {{}} where the label should go."
).format(lowerCAmelCase_ ) )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_lowercase =[sequences]
_lowercase =[]
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(lowerCAmelCase_ )] for label in labels] )
return sequence_pairs, sequences
@add_end_docstrings(lowerCamelCase_ )
class _a ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self , lowerCAmelCase_=ZeroShotClassificationArgumentHandler() , *lowerCAmelCase_ , **lowerCAmelCase_ ):
_lowercase =args_parser
super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ )
if self.entailment_id == -1:
logger.warning(
"Failed to determine 'entailment' label id from the label2id mapping in the model config. Setting to "
"-1. Define a descriptive label2id mapping in the model config to ensure correct outputs." )
@property
def __lowerCAmelCase ( self ):
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith("entail" ):
return ind
return -1
def __lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=TruncationStrategy.ONLY_FIRST , **lowerCAmelCase_ ):
_lowercase =self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
"Tokenizer was not supporting padding necessary for zero-shot, attempting to use "
" `pad_token=eos_token`" )
_lowercase =self.tokenizer.eos_token
try:
_lowercase =self.tokenizer(
lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , )
except Exception as e:
if "too short" in str(lowerCAmelCase_ ):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
_lowercase =self.tokenizer(
lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , padding=lowerCAmelCase_ , truncation=TruncationStrategy.DO_NOT_TRUNCATE , )
else:
raise e
return inputs
def __lowerCAmelCase ( self , **lowerCAmelCase_ ):
if kwargs.get("multi_class" , lowerCAmelCase_ ) is not None:
_lowercase =kwargs["multi_class"]
logger.warning(
"The `multi_class` argument has been deprecated and renamed to `multi_label`. "
"`multi_class` will be removed in a future version of Transformers." )
_lowercase ={}
if "candidate_labels" in kwargs:
_lowercase =self._args_parser._parse_labels(kwargs["candidate_labels"] )
if "hypothesis_template" in kwargs:
_lowercase =kwargs["hypothesis_template"]
_lowercase ={}
if "multi_label" in kwargs:
_lowercase =kwargs["multi_label"]
return preprocess_params, {}, postprocess_params
def __call__( self , lowerCAmelCase_ , *lowerCAmelCase_ , **lowerCAmelCase_ , ):
if len(lowerCAmelCase_ ) == 0:
pass
elif len(lowerCAmelCase_ ) == 1 and "candidate_labels" not in kwargs:
_lowercase =args[0]
else:
raise ValueError(F'''Unable to understand extra arguments {args}''' )
return super().__call__(lowerCAmelCase_ , **lowerCAmelCase_ )
def __lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_="This example is {}." ):
_lowercase , _lowercase =self._args_parser(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
for i, (candidate_label, sequence_pair) in enumerate(zip(lowerCAmelCase_ , lowerCAmelCase_ ) ):
_lowercase =self._parse_and_tokenize([sequence_pair] )
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(lowerCAmelCase_ ) - 1,
**model_input,
}
def __lowerCAmelCase ( self , lowerCAmelCase_ ):
_lowercase =inputs["candidate_label"]
_lowercase =inputs["sequence"]
_lowercase ={k: inputs[k] for k in self.tokenizer.model_input_names}
_lowercase =self.model(**lowerCAmelCase_ )
_lowercase ={
"candidate_label": candidate_label,
"sequence": sequence,
"is_last": inputs["is_last"],
**outputs,
}
return model_outputs
def __lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=False ):
_lowercase =[outputs["candidate_label"] for outputs in model_outputs]
_lowercase =[outputs["sequence"] for outputs in model_outputs]
_lowercase =np.concatenate([output["logits"].numpy() for output in model_outputs] )
_lowercase =logits.shape[0]
_lowercase =len(lowerCAmelCase_ )
_lowercase =N // n
_lowercase =logits.reshape((num_sequences, n, -1) )
if multi_label or len(lowerCAmelCase_ ) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
_lowercase =self.entailment_id
_lowercase =-1 if entailment_id == 0 else 0
_lowercase =reshaped_outputs[..., [contradiction_id, entailment_id]]
_lowercase =np.exp(lowerCAmelCase_ ) / np.exp(lowerCAmelCase_ ).sum(-1 , keepdims=lowerCAmelCase_ )
_lowercase =scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
_lowercase =reshaped_outputs[..., self.entailment_id]
_lowercase =np.exp(lowerCAmelCase_ ) / np.exp(lowerCAmelCase_ ).sum(-1 , keepdims=lowerCAmelCase_ )
_lowercase =list(reversed(scores[0].argsort() ) )
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
}
| 594 | 0 |
from typing import TYPE_CHECKING
from ....utils import _LazyModule
lowercase : int = {'tokenization_tapex': ['TapexTokenizer']}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
lowercase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 302 | """simple docstring"""
import numpy as np
from cva import destroyAllWindows, imread, imshow, waitKey
class __A :
def __init__( self , a__ , a__ , a__ ):
if dst_width < 0 or dst_height < 0:
raise ValueError("""Destination width/height should be > 0""" )
_lowerCAmelCase : List[Any] = img
_lowerCAmelCase : Dict = img.shape[1]
_lowerCAmelCase : Optional[Any] = img.shape[0]
_lowerCAmelCase : str = dst_width
_lowerCAmelCase : Tuple = dst_height
_lowerCAmelCase : Optional[int] = self.src_w / self.dst_w
_lowerCAmelCase : List[str] = self.src_h / self.dst_h
_lowerCAmelCase : Dict = (
np.ones((self.dst_h, self.dst_w, 3) , np.uinta ) * 255
)
def __A ( self ):
for i in range(self.dst_h ):
for j in range(self.dst_w ):
_lowerCAmelCase : int = self.img[self.get_y(a__ )][self.get_x(a__ )]
def __A ( self , a__ ):
return int(self.ratio_x * x )
def __A ( self , a__ ):
return int(self.ratio_y * y )
if __name__ == "__main__":
_a , _a : int = 800, 600
_a : Optional[int] = imread('image_data/lena.jpg', 1)
_a : Tuple = NearestNeighbour(im, dst_w, dst_h)
n.process()
imshow(
F"""Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}""", n.output
)
waitKey(0)
destroyAllWindows()
| 213 | 0 |
'''simple docstring'''
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
"The RoBERTa Model transformer with early exiting (DeeRoBERTa). " ,SCREAMING_SNAKE_CASE ,)
class _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__a : List[Any] = RobertaConfig
__a : Union[str, Any] = "roberta"
def __init__( self : Union[str, Any] , lowercase : List[str] ) -> Optional[Any]:
'''simple docstring'''
super().__init__(lowercase )
UpperCamelCase__ = RobertaEmbeddings(lowercase )
self.init_weights()
@add_start_docstrings(
"RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,\n also takes care of multi-layer training. " ,SCREAMING_SNAKE_CASE ,)
class _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__a : Dict = RobertaConfig
__a : Tuple = "roberta"
def __init__( self : Union[str, Any] , lowercase : Optional[Any] ) -> List[str]:
'''simple docstring'''
super().__init__(lowercase )
UpperCamelCase__ = config.num_labels
UpperCamelCase__ = config.num_hidden_layers
UpperCamelCase__ = DeeRobertaModel(lowercase )
UpperCamelCase__ = nn.Dropout(config.hidden_dropout_prob )
UpperCamelCase__ = nn.Linear(config.hidden_size , self.config.num_labels )
@add_start_docstrings_to_model_forward(lowercase )
def A ( self : Optional[Any] , lowercase : str=None , lowercase : Tuple=None , lowercase : List[str]=None , lowercase : Union[str, Any]=None , lowercase : int=None , lowercase : Dict=None , lowercase : Optional[int]=None , lowercase : Optional[Any]=-1 , lowercase : Optional[Any]=False , ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ = self.num_layers
try:
UpperCamelCase__ = self.roberta(
lowercase , attention_mask=lowercase , token_type_ids=lowercase , position_ids=lowercase , head_mask=lowercase , inputs_embeds=lowercase , )
UpperCamelCase__ = outputs[1]
UpperCamelCase__ = self.dropout(lowercase )
UpperCamelCase__ = self.classifier(lowercase )
UpperCamelCase__ = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
UpperCamelCase__ = e.message
UpperCamelCase__ = e.exit_layer
UpperCamelCase__ = outputs[0]
if not self.training:
UpperCamelCase__ = entropy(lowercase )
UpperCamelCase__ = []
UpperCamelCase__ = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
UpperCamelCase__ = MSELoss()
UpperCamelCase__ = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
UpperCamelCase__ = CrossEntropyLoss()
UpperCamelCase__ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
UpperCamelCase__ = []
for highway_exit in outputs[-1]:
UpperCamelCase__ = highway_exit[0]
if not self.training:
highway_logits_all.append(lowercase )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
UpperCamelCase__ = MSELoss()
UpperCamelCase__ = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
UpperCamelCase__ = CrossEntropyLoss()
UpperCamelCase__ = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(lowercase )
if train_highway:
UpperCamelCase__ = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
UpperCamelCase__ = (loss,) + outputs
if not self.training:
UpperCamelCase__ = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
UpperCamelCase__ = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 265 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__a : int = "microsoft/speecht5_tts"
__a : int = (
"This is a tool that reads an English text out loud. It takes an input named `text` which should contain the "
"text to read (in English) and returns a waveform object containing the sound."
)
__a : Optional[Any] = "text_reader"
__a : Tuple = SpeechTaProcessor
__a : int = SpeechTaForTextToSpeech
__a : int = SpeechTaHifiGan
__a : Optional[int] = ["text"]
__a : Union[str, Any] = ["audio"]
def A ( self : str ) -> Optional[int]:
'''simple docstring'''
if self.post_processor is None:
UpperCamelCase__ = """microsoft/speecht5_hifigan"""
super().setup()
def A ( self : List[Any] , lowercase : Any , lowercase : Union[str, Any]=None ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ = self.pre_processor(text=lowercase , return_tensors="""pt""" , truncation=lowercase )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError("""Datasets needs to be installed if not passing speaker embeddings.""" )
UpperCamelCase__ = load_dataset("""Matthijs/cmu-arctic-xvectors""" , split="""validation""" )
UpperCamelCase__ = torch.tensor(embeddings_dataset[7_3_0_5]["""xvector"""] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def A ( self : Union[str, Any] , lowercase : Optional[int] ) -> int:
'''simple docstring'''
with torch.no_grad():
return self.model.generate_speech(**lowercase )
def A ( self : str , lowercase : Union[str, Any] ) -> Tuple:
'''simple docstring'''
with torch.no_grad():
return self.post_processor(lowercase ).cpu().detach()
| 265 | 1 |
'''simple docstring'''
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
lowercase_ = 1.0_54_57_18_17e-34 # unit of ℏ : J * s
lowercase_ = 3e8 # unit of c : m * s^-1
def lowerCAmelCase (__A , __A , __A):
"""simple docstring"""
if (force, area, distance).count(0) != 1:
raise ValueError('''One and only one argument must be 0''')
if force < 0:
raise ValueError('''Magnitude of force can not be negative''')
if distance < 0:
raise ValueError('''Distance can not be negative''')
if area < 0:
raise ValueError('''Area can not be negative''')
if force == 0:
_a = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
240 * (distance) ** 4
)
return {"force": force}
elif area == 0:
_a = (240 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
_a = (
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError('''One and only one argument must be 0''')
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 11 |
"""simple docstring"""
import math
from collections.abc import Iterator
from itertools import takewhile
def __lowerCAmelCase ( lowercase : int ) -> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowercase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __lowerCAmelCase ( ) -> Iterator[int]:
"""simple docstring"""
snake_case : Optional[int] = 2
while True:
if is_prime(lowercase ):
yield num
num += 1
def __lowerCAmelCase ( lowercase : int = 200_0000 ) -> int:
"""simple docstring"""
return sum(takewhile(lambda lowercase : x < n , prime_generator() ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 178 | 0 |
'''simple docstring'''
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class __snake_case( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self ) -> List[str]:
debug_launcher(test_script.main )
def __snake_case ( self ) -> Optional[int]:
debug_launcher(test_ops.main ) | 344 |
'''simple docstring'''
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class __snake_case( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self ) -> Tuple:
lowerCAmelCase = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
lowerCAmelCase = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(A_ )
lowerCAmelCase = -1
lowerCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(A_ )
lowerCAmelCase = model.generate(A_ , max_new_tokens=10 , do_sample=A_ )
lowerCAmelCase = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
lowerCAmelCase = TextStreamer(A_ )
model.generate(A_ , max_new_tokens=10 , do_sample=A_ , streamer=A_ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
lowerCAmelCase = cs.out[:-1]
self.assertEqual(A_ , A_ )
def __snake_case ( self ) -> List[Any]:
lowerCAmelCase = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
lowerCAmelCase = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(A_ )
lowerCAmelCase = -1
lowerCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(A_ )
lowerCAmelCase = model.generate(A_ , max_new_tokens=10 , do_sample=A_ )
lowerCAmelCase = tokenizer.decode(greedy_ids[0] )
lowerCAmelCase = TextIteratorStreamer(A_ )
lowerCAmelCase = {"""input_ids""": input_ids, """max_new_tokens""": 10, """do_sample""": False, """streamer""": streamer}
lowerCAmelCase = Thread(target=model.generate , kwargs=A_ )
thread.start()
lowerCAmelCase = """"""
for new_text in streamer:
streamer_text += new_text
self.assertEqual(A_ , A_ )
def __snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
lowerCAmelCase = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(A_ )
lowerCAmelCase = -1
lowerCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(A_ )
lowerCAmelCase = model.generate(A_ , max_new_tokens=10 , do_sample=A_ )
lowerCAmelCase = greedy_ids[:, input_ids.shape[1] :]
lowerCAmelCase = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
lowerCAmelCase = TextStreamer(A_ , skip_prompt=A_ )
model.generate(A_ , max_new_tokens=10 , do_sample=A_ , streamer=A_ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
lowerCAmelCase = cs.out[:-1]
self.assertEqual(A_ , A_ )
def __snake_case ( self ) -> int:
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
lowerCAmelCase = AutoTokenizer.from_pretrained("""distilgpt2""" )
lowerCAmelCase = AutoModelForCausalLM.from_pretrained("""distilgpt2""" ).to(A_ )
lowerCAmelCase = -1
lowerCAmelCase = torch.ones((1, 5) , device=A_ ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
lowerCAmelCase = TextStreamer(A_ , skip_special_tokens=A_ )
model.generate(A_ , max_new_tokens=1 , do_sample=A_ , streamer=A_ )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
lowerCAmelCase = cs.out[:-1] # Remove the final "\n"
lowerCAmelCase = tokenizer(A_ , return_tensors="""pt""" )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def __snake_case ( self ) -> Tuple:
lowerCAmelCase = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
lowerCAmelCase = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(A_ )
lowerCAmelCase = -1
lowerCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(A_ )
lowerCAmelCase = TextIteratorStreamer(A_ , timeout=0.0_0_1 )
lowerCAmelCase = {"""input_ids""": input_ids, """max_new_tokens""": 10, """do_sample""": False, """streamer""": streamer}
lowerCAmelCase = Thread(target=model.generate , kwargs=A_ )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(A_ ):
lowerCAmelCase = """"""
for new_text in streamer:
streamer_text += new_text | 344 | 1 |
from __future__ import annotations
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = sum(_UpperCAmelCase)
create_state_space_tree(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
return result
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ):
if sum(_UpperCAmelCase) > max_sum or (remaining_nums_sum + sum(_UpperCAmelCase)) < max_sum:
return
if sum(_UpperCAmelCase) == max_sum:
result.append(_UpperCAmelCase)
return
for index in range(_UpperCAmelCase , len(_UpperCAmelCase)):
create_state_space_tree(
_UpperCAmelCase , _UpperCAmelCase , index + 1 , [*path, nums[index]] , _UpperCAmelCase , remaining_nums_sum - nums[index] , )
a_ : Tuple = [3, 34, 4, 12, 5, 2]
a_ : str = 9
a_ : Dict = generate_sum_of_subsets_soln(nums, max_sum)
print(*result)
| 73 |
"""simple docstring"""
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCamelCase_ :
"""simple docstring"""
def __init__( self : Optional[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int=1_3 , UpperCAmelCase__ : Optional[Any]=7 , UpperCAmelCase__ : str=True , UpperCAmelCase__ : str=True , UpperCAmelCase__ : Optional[int]=False , UpperCAmelCase__ : str=True , UpperCAmelCase__ : Optional[int]=9_9 , UpperCAmelCase__ : Dict=3_2 , UpperCAmelCase__ : List[str]=5 , UpperCAmelCase__ : Optional[int]=4 , UpperCAmelCase__ : Union[str, Any]=3_7 , UpperCAmelCase__ : Tuple="gelu" , UpperCAmelCase__ : Union[str, Any]=0.1 , UpperCAmelCase__ : Dict=0.1 , UpperCAmelCase__ : int=5_1_2 , UpperCAmelCase__ : List[str]=1_6 , UpperCAmelCase__ : Optional[Any]=2 , UpperCAmelCase__ : List[Any]=0.02 , UpperCAmelCase__ : List[str]=3 , UpperCAmelCase__ : str=4 , UpperCAmelCase__ : List[Any]=None , ) -> Any:
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = seq_length
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_input_mask
__SCREAMING_SNAKE_CASE = use_token_type_ids
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = type_vocab_size
__SCREAMING_SNAKE_CASE = type_sequence_label_size
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = num_labels
__SCREAMING_SNAKE_CASE = num_choices
__SCREAMING_SNAKE_CASE = scope
def UpperCAmelCase_ ( self : int ) -> Dict:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
__SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
__SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
if self.use_labels:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices )
__SCREAMING_SNAKE_CASE = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase_ ( self : int ) -> Union[str, Any]:
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , )
def UpperCAmelCase_ ( self : List[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = BioGptModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self : int , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : List[str] , ) -> List[Any]:
__SCREAMING_SNAKE_CASE = BioGptForCausalLM(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[Any] , *UpperCAmelCase__ : Optional[Any] ) -> Tuple:
__SCREAMING_SNAKE_CASE = BioGptModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
# create attention mask
__SCREAMING_SNAKE_CASE = torch.ones(input_ids.shape , dtype=torch.long , device=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = self.seq_length // 2
__SCREAMING_SNAKE_CASE = 0
# first forward pass
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ ).to_tuple()
# create hypothetical next token and extent to next_input_ids
__SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 1) , config.vocab_size )
# change a random masked slice from input_ids
__SCREAMING_SNAKE_CASE = ids_tensor((1,) , UpperCAmelCase__ ).item() + 1
__SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 )
__SCREAMING_SNAKE_CASE = random_other_next_tokens
# append to next input_ids and attn_mask
__SCREAMING_SNAKE_CASE = torch.cat([input_ids, next_tokens] , dim=-1 )
__SCREAMING_SNAKE_CASE = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=UpperCAmelCase__ )] , dim=1 , )
# get two different outputs
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )["last_hidden_state"]
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , past_key_values=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )["last_hidden_state"]
# select random slice
__SCREAMING_SNAKE_CASE = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__SCREAMING_SNAKE_CASE = output_from_no_past[:, -1, random_slice_idx].detach()
__SCREAMING_SNAKE_CASE = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-3 ) )
def UpperCAmelCase_ ( self : Dict , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Union[str, Any] , *UpperCAmelCase__ : Optional[Any] ) -> List[Any]:
__SCREAMING_SNAKE_CASE = BioGptModel(config=UpperCAmelCase__ ).to(UpperCAmelCase__ ).eval()
__SCREAMING_SNAKE_CASE = torch.ones(input_ids.shape , dtype=torch.long , device=UpperCAmelCase__ )
# first forward pass
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , use_cache=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
__SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3) , config.vocab_size )
__SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
__SCREAMING_SNAKE_CASE = torch.cat([input_ids, next_tokens] , dim=-1 )
__SCREAMING_SNAKE_CASE = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )["last_hidden_state"]
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , past_key_values=UpperCAmelCase__ )[
"last_hidden_state"
]
# select random slice
__SCREAMING_SNAKE_CASE = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__SCREAMING_SNAKE_CASE = output_from_no_past[:, -3:, random_slice_idx].detach()
__SCREAMING_SNAKE_CASE = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-3 ) )
def UpperCAmelCase_ ( self : str , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Any , *UpperCAmelCase__ : Any , UpperCAmelCase__ : int=False ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = BioGptForCausalLM(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
if gradient_checkpointing:
model.gradient_checkpointing_enable()
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
result.loss.backward()
def UpperCAmelCase_ ( self : List[Any] , UpperCAmelCase__ : str , *UpperCAmelCase__ : Optional[int] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = BioGptModel(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers )
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.001 )
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.01 )
def UpperCAmelCase_ ( self : Optional[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[Any] , *UpperCAmelCase__ : Dict ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = self.num_labels
__SCREAMING_SNAKE_CASE = BioGptForTokenClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase_ ( self : Optional[Any] ) -> str:
__SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
__SCREAMING_SNAKE_CASE = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase_ ( UpperCamelCase , UpperCamelCase , UpperCamelCase , unittest.TestCase):
"""simple docstring"""
snake_case__ : Union[str, Any] = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
snake_case__ : Optional[int] = (BioGptForCausalLM,) if is_torch_available() else ()
snake_case__ : Tuple = (
{
"feature-extraction": BioGptModel,
"text-classification": BioGptForSequenceClassification,
"text-generation": BioGptForCausalLM,
"token-classification": BioGptForTokenClassification,
"zero-shot": BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case__ : Optional[Any] = False
def UpperCAmelCase_ ( self : Union[str, Any] ) -> List[str]:
__SCREAMING_SNAKE_CASE = BioGptModelTester(self )
__SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=UpperCAmelCase__ , hidden_size=3_7 )
def UpperCAmelCase_ ( self : Tuple ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self : List[str] ) -> List[Any]:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def UpperCAmelCase_ ( self : int ) -> int:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__SCREAMING_SNAKE_CASE = type
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def UpperCAmelCase_ ( self : List[Any] ) -> Any:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*UpperCAmelCase__ , gradient_checkpointing=UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Any ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Tuple ) -> List[Any]:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Dict ) -> Any:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*UpperCAmelCase__ )
@slow
def UpperCAmelCase_ ( self : int ) -> List[str]:
__SCREAMING_SNAKE_CASE = BioGptForCausalLM.from_pretrained("microsoft/biogpt" )
model.to(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = BioGptTokenizer.from_pretrained("microsoft/biogpt" )
__SCREAMING_SNAKE_CASE = "left"
# Define PAD Token = EOS Token = 50256
__SCREAMING_SNAKE_CASE = tokenizer.eos_token
__SCREAMING_SNAKE_CASE = model.config.eos_token_id
# use different length sentences to test batching
__SCREAMING_SNAKE_CASE = [
"Hello, my dog is a little",
"Today, I",
]
__SCREAMING_SNAKE_CASE = tokenizer(UpperCAmelCase__ , return_tensors="pt" , padding=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = inputs["input_ids"].to(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = model.generate(
input_ids=UpperCAmelCase__ , attention_mask=inputs["attention_mask"].to(UpperCAmelCase__ ) , )
__SCREAMING_SNAKE_CASE = tokenizer(sentences[0] , return_tensors="pt" ).input_ids.to(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = model.generate(input_ids=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = inputs_non_padded.shape[-1] - inputs["attention_mask"][-1].long().sum().cpu().item()
__SCREAMING_SNAKE_CASE = tokenizer(sentences[1] , return_tensors="pt" ).input_ids.to(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = model.generate(input_ids=UpperCAmelCase__ , max_length=model.config.max_length - num_paddings )
__SCREAMING_SNAKE_CASE = tokenizer.batch_decode(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = tokenizer.decode(output_non_padded[0] , skip_special_tokens=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = tokenizer.decode(output_padded[0] , skip_special_tokens=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = [
"Hello, my dog is a little bit bigger than a little bit.",
"Today, I have a good idea of how to use the information",
]
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , [non_padded_sentence, padded_sentence] )
@slow
def UpperCAmelCase_ ( self : Optional[int] ) -> Optional[int]:
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE = BioGptModel.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Dict ) -> Dict:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
__SCREAMING_SNAKE_CASE = 3
__SCREAMING_SNAKE_CASE = input_dict["input_ids"]
__SCREAMING_SNAKE_CASE = input_ids.ne(1 ).to(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__SCREAMING_SNAKE_CASE = BioGptForSequenceClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCAmelCase_ ( self : List[Any] ) -> str:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
__SCREAMING_SNAKE_CASE = 3
__SCREAMING_SNAKE_CASE = "multi_label_classification"
__SCREAMING_SNAKE_CASE = input_dict["input_ids"]
__SCREAMING_SNAKE_CASE = input_ids.ne(1 ).to(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
__SCREAMING_SNAKE_CASE = BioGptForSequenceClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@require_torch
class UpperCamelCase_ ( unittest.TestCase):
"""simple docstring"""
@slow
def UpperCAmelCase_ ( self : int ) -> List[Any]:
__SCREAMING_SNAKE_CASE = BioGptForCausalLM.from_pretrained("microsoft/biogpt" )
__SCREAMING_SNAKE_CASE = torch.tensor([[2, 4_8_0_5, 9, 6_5_6, 2_1]] )
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ )[0]
__SCREAMING_SNAKE_CASE = 4_2_3_8_4
__SCREAMING_SNAKE_CASE = torch.Size((1, 5, vocab_size) )
self.assertEqual(output.shape , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = torch.tensor(
[[[-9.5_236, -9.8_918, 10.4_557], [-11.0_469, -9.6_423, 8.1_022], [-8.8_664, -7.8_826, 5.5_325]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase__ , atol=1E-4 ) )
@slow
def UpperCAmelCase_ ( self : Union[str, Any] ) -> int:
__SCREAMING_SNAKE_CASE = BioGptTokenizer.from_pretrained("microsoft/biogpt" )
__SCREAMING_SNAKE_CASE = BioGptForCausalLM.from_pretrained("microsoft/biogpt" )
model.to(UpperCAmelCase__ )
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = tokenizer("COVID-19 is" , return_tensors="pt" ).to(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = model.generate(
**UpperCAmelCase__ , min_length=1_0_0 , max_length=1_0_2_4 , num_beams=5 , early_stopping=UpperCAmelCase__ , )
__SCREAMING_SNAKE_CASE = tokenizer.decode(output_ids[0] , skip_special_tokens=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = (
"COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the"
" causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and"
" territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),"
" and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and"
" more than 800,000 deaths."
)
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
| 682 | 0 |
'''simple docstring'''
from collections.abc import Callable
import numpy as np
def __UpperCamelCase ( _A : Tuple , _A : Any , _A : Any , _A : Tuple , _A : Dict ) -> np.array:
"""simple docstring"""
lowerCAmelCase : List[Any] = int(np.ceil((x_end - xa) / step_size ) )
lowerCAmelCase : Tuple = np.zeros((n + 1,) )
lowerCAmelCase : Tuple = ya
lowerCAmelCase : Tuple = xa
for k in range(_A ):
lowerCAmelCase : Dict = y[k] + step_size * ode_func(_A , y[k] )
lowerCAmelCase : Optional[Any] = y[k] + (
(step_size / 2) * (ode_func(_A , y[k] ) + ode_func(x + step_size , _A ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 719 |
'''simple docstring'''
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
_lowerCAmelCase : List[Any] = logging.getLogger(__name__)
def __UpperCamelCase ( ) -> Any:
"""simple docstring"""
lowerCAmelCase : str = argparse.ArgumentParser(
description='Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.' )
parser.add_argument(
'--dataset_name' , type=_A , default='wikitext' , help='Name of the training. Explore datasets at: hf.co/datasets.' , )
parser.add_argument(
'--dataset_config' , type=_A , default='wikitext-103-raw-v1' , help='Configuration name of the dataset.' )
parser.add_argument(
'--tokenizer_name_or_path' , type=_A , default='sayakpaul/unigram-tokenizer-wikitext' , help='Tokenizer identifier. Can be a local filepath or a Hub identifier.' , )
parser.add_argument(
'--shard_size' , type=_A , default=10_00 , help='Number of entries to go in a single shard.' , )
parser.add_argument('--split' , type=_A , default='train' , choices=['train', 'test', 'validation'] )
parser.add_argument(
'--limit' , default=_A , type=_A , help='Limit the number of shards (used for debugging).' , )
parser.add_argument(
'--max_length' , type=_A , default=5_12 , help='Maximum sequence length. For training on TPUs, it helps to have a maximum'
' sequence length that is a multiple of 8.' , )
parser.add_argument(
'--output_dir' , default='tf-tpu' , type=_A , help='Output directory where the TFRecord shards will be saved. If the'
' path is appended with `gs://` (\'gs://tf-tpu\', for example) then the TFRecord'
' shards will be directly saved to a Google Cloud Storage bucket.' , )
lowerCAmelCase : Any = parser.parse_args()
return args
def __UpperCamelCase ( _A : Optional[int] ) -> int:
"""simple docstring"""
def fn(_A : Tuple ):
return tokenizer(examples['text'] )
return fn
def __UpperCamelCase ( _A : int ) -> int:
"""simple docstring"""
lowerCAmelCase : Tuple = []
for i in range(len(tokenized_data['input_ids'] ) ):
lowerCAmelCase : Optional[Any] = {
'input_ids': tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data['input_ids'][i] ) ),
'attention_mask': tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data['attention_mask'][i] ) ),
}
lowerCAmelCase : Any = tf.train.Features(feature=_A )
lowerCAmelCase : List[str] = tf.train.Example(features=_A )
lowerCAmelCase : Tuple = example.SerializeToString()
records.append(_A )
return records
def __UpperCamelCase ( _A : int ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
lowerCAmelCase : Optional[Any] = min(len(_A ) , args.limit )
lowerCAmelCase : Dict = dataset.select(range(_A ) )
print(F"Limiting the dataset to {args.limit} entries." )
lowerCAmelCase : str = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
lowerCAmelCase : Any = os.path.join(args.output_dir , args.split )
if not os.path.exists(_A ):
os.makedirs(_A )
else:
lowerCAmelCase : List[Any] = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
lowerCAmelCase : Any = tokenize_function(_A )
lowerCAmelCase : Optional[int] = dataset.map(_A , batched=_A , num_proc=4 , remove_columns=['text'] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(_A : str ):
# Concatenate all texts.
lowerCAmelCase : Optional[int] = {k: sum(examples[k] , [] ) for k in examples.keys()}
lowerCAmelCase : str = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
lowerCAmelCase : List[Any] = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
lowerCAmelCase : str = {
k: [t[i : i + args.max_length] for i in range(0 , _A , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
lowerCAmelCase : List[Any] = dataset_tokenized.map(_A , batched=_A , batch_size=10_00 , num_proc=4 )
lowerCAmelCase : Union[str, Any] = 0
lowerCAmelCase : Tuple = 0
for shard in range(0 , len(_A ) , args.shard_size ):
lowerCAmelCase : Optional[Any] = grouped_dataset[shard : shard + args.shard_size]
lowerCAmelCase : List[str] = len(dataset_snapshot['input_ids'] )
lowerCAmelCase : Union[str, Any] = os.path.join(_A , F"dataset-{shard_count}-{records_containing}.tfrecord" )
lowerCAmelCase : List[Any] = get_serialized_examples(_A )
with tf.io.TFRecordWriter(_A ) as out_file:
for i in range(len(_A ) ):
lowerCAmelCase : Union[str, Any] = serialized_examples[i]
out_file.write(_A )
print('Wrote file {} containing {} records'.format(_A , _A ) )
shard_count += 1
total_records += records_containing
with open(F"split-{args.split}-records-count.txt" , 'w' ) as f:
print(F"Total {args.split} records: {total_records}" , file=_A )
if __name__ == "__main__":
_lowerCAmelCase : List[Any] = parse_args()
main(args)
| 646 | 0 |
import os
from bleurt import score # From: git+https://github.com/google-research/bleurt.git
import datasets
__lowercase : Optional[Any] =datasets.logging.get_logger(__name__)
__lowercase : Optional[Any] ="""\
@inproceedings{bleurt,
title={BLEURT: Learning Robust Metrics for Text Generation},
author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh},
booktitle={ACL},
year={2020},
url={https://arxiv.org/abs/2004.04696}
}
"""
__lowercase : List[str] ="""\
BLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018)
and then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune
it for your specific application (the latter is expected to perform better).
See the project's README at https://github.com/google-research/bleurt#readme for more information.
"""
__lowercase : Optional[Any] ="""
BLEURT score.
Args:
`predictions` (list of str): prediction/candidate sentences
`references` (list of str): reference sentences
`checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None.
Returns:
'scores': List of scores.
Examples:
>>> predictions = [\"hello there\", \"general kenobi\"]
>>> references = [\"hello there\", \"general kenobi\"]
>>> bleurt = datasets.load_metric(\"bleurt\")
>>> results = bleurt.compute(predictions=predictions, references=references)
>>> print([round(v, 2) for v in results[\"scores\"]])
[1.03, 1.04]
"""
__lowercase : str ={
"""bleurt-tiny-128""": """https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip""",
"""bleurt-tiny-512""": """https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip""",
"""bleurt-base-128""": """https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip""",
"""bleurt-base-512""": """https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip""",
"""bleurt-large-128""": """https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip""",
"""bleurt-large-512""": """https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip""",
"""BLEURT-20-D3""": """https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip""",
"""BLEURT-20-D6""": """https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip""",
"""BLEURT-20-D12""": """https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip""",
"""BLEURT-20""": """https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip""",
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A ( datasets.Metric ):
def lowerCAmelCase__ ( self: Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="https://github.com/google-research/bleurt" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/google-research/bleurt"] , reference_urls=["https://github.com/google-research/bleurt", "https://arxiv.org/abs/2004.04696"] , )
def lowerCAmelCase__ ( self: Any , _lowerCAmelCase: List[Any] ) -> Tuple:
'''simple docstring'''
if self.config_name == "default":
logger.warning(
"Using default BLEURT-Base checkpoint for sequence maximum length 128. "
"You can use a bigger model for better results with e.g.: datasets.load_metric('bleurt', 'bleurt-large-512')." )
UpperCAmelCase_ ="bleurt-base-128"
if self.config_name.lower() in CHECKPOINT_URLS:
UpperCAmelCase_ =self.config_name.lower()
elif self.config_name.upper() in CHECKPOINT_URLS:
UpperCAmelCase_ =self.config_name.upper()
else:
raise KeyError(
F'{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}' )
# download the model checkpoint specified by self.config_name and set up the scorer
UpperCAmelCase_ =dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name] )
UpperCAmelCase_ =score.BleurtScorer(os.path.join(_lowerCAmelCase , _lowerCAmelCase ) )
def lowerCAmelCase__ ( self: Tuple , _lowerCAmelCase: List[Any] , _lowerCAmelCase: Tuple ) -> Any:
'''simple docstring'''
UpperCAmelCase_ =self.scorer.score(references=_lowerCAmelCase , candidates=_lowerCAmelCase )
return {"scores": scores}
| 54 |
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
SCREAMING_SNAKE_CASE__ : Union[str, Any] = logging.get_logger(__name__)
class a_ ( SCREAMING_SNAKE_CASE__ ):
def __init__( self , SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE_ = nn.ModuleList(SCREAMING_SNAKE_CASE )
def A_( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = True , ) -> Union[ControlNetOutput, Tuple]:
"""simple docstring"""
for i, (image, scale, controlnet) in enumerate(zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , self.nets ) ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = controlnet(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , )
# merge samples
if i == 0:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = down_samples, mid_sample
else:
SCREAMING_SNAKE_CASE_ = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def A_( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = None , ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
SCREAMING_SNAKE_CASE , is_main_process=SCREAMING_SNAKE_CASE , save_function=SCREAMING_SNAKE_CASE , safe_serialization=SCREAMING_SNAKE_CASE , variant=SCREAMING_SNAKE_CASE , )
idx += 1
SCREAMING_SNAKE_CASE_ = model_path_to_save + f'_{idx}'
@classmethod
def A_( cls , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
SCREAMING_SNAKE_CASE_ = pretrained_model_path
while os.path.isdir(SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ = ControlNetModel.from_pretrained(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
controlnets.append(SCREAMING_SNAKE_CASE )
idx += 1
SCREAMING_SNAKE_CASE_ = pretrained_model_path + f'_{idx}'
logger.info(f'{len(SCREAMING_SNAKE_CASE )} controlnets loaded from {pretrained_model_path}.' )
if len(SCREAMING_SNAKE_CASE ) == 0:
raise ValueError(
f'No ControlNets found under {os.path.dirname(SCREAMING_SNAKE_CASE )}. Expected at least {pretrained_model_path + "_0"}.' )
return cls(SCREAMING_SNAKE_CASE )
| 205 | 0 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class __A :
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__=13 , lowerCamelCase__=7 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=99 , lowerCamelCase__=32 , lowerCamelCase__=2 , lowerCamelCase__=4 , lowerCamelCase__=37 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=512 , lowerCamelCase__=16 , lowerCamelCase__=2 , lowerCamelCase__=0.02 , lowerCamelCase__=3 , lowerCamelCase__=4 , lowerCamelCase__=None , lowerCamelCase__=0 , ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] =parent
__UpperCamelCase : Optional[int] =batch_size
__UpperCamelCase : Optional[Any] =seq_length
__UpperCamelCase : Union[str, Any] =is_training
__UpperCamelCase : Dict =use_input_mask
__UpperCamelCase : List[Any] =use_token_type_ids
__UpperCamelCase : Any =use_labels
__UpperCamelCase : int =vocab_size
__UpperCamelCase : Dict =hidden_size
__UpperCamelCase : int =num_hidden_layers
__UpperCamelCase : Any =num_attention_heads
__UpperCamelCase : Dict =intermediate_size
__UpperCamelCase : int =hidden_act
__UpperCamelCase : int =hidden_dropout_prob
__UpperCamelCase : str =attention_probs_dropout_prob
__UpperCamelCase : List[Any] =max_position_embeddings
__UpperCamelCase : List[str] =type_vocab_size
__UpperCamelCase : Union[str, Any] =type_sequence_label_size
__UpperCamelCase : Optional[int] =initializer_range
__UpperCamelCase : int =num_labels
__UpperCamelCase : List[Any] =num_choices
__UpperCamelCase : int =scope
__UpperCamelCase : List[Any] =projection_dim
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : int =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase : str =None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
__UpperCamelCase : Optional[int] =random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase : Union[str, Any] =None
if self.use_token_type_ids:
__UpperCamelCase : Any =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCamelCase : List[str] =None
__UpperCamelCase : Dict =None
__UpperCamelCase : Union[str, Any] =None
if self.use_labels:
__UpperCamelCase : int =ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase : List[str] =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCamelCase : Tuple =ids_tensor([self.batch_size] , self.num_choices )
__UpperCamelCase : List[str] =BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , )
__UpperCamelCase : str =DPRConfig(projection_dim=self.projection_dim , **config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] =TFDPRContextEncoder(config=lowerCamelCase__ )
__UpperCamelCase : str =model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ )
__UpperCamelCase : List[str] =model(lowerCamelCase__ , token_type_ids=lowerCamelCase__ )
__UpperCamelCase : List[str] =model(lowerCamelCase__ )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] =TFDPRQuestionEncoder(config=lowerCamelCase__ )
__UpperCamelCase : Tuple =model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ )
__UpperCamelCase : List[Any] =model(lowerCamelCase__ , token_type_ids=lowerCamelCase__ )
__UpperCamelCase : Optional[Any] =model(lowerCamelCase__ )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : int =TFDPRReader(config=lowerCamelCase__ )
__UpperCamelCase : Optional[int] =model(lowerCamelCase__ , attention_mask=lowerCamelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,) )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : List[Any] =self.prepare_config_and_inputs()
(
__UpperCamelCase
) : Dict =config_and_inputs
__UpperCamelCase : int ={'input_ids': input_ids}
return config, inputs_dict
@require_tf
class __A ( a , a , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase__ : Any =(
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
UpperCamelCase__ : Optional[Any] ={"""feature-extraction""": TFDPRQuestionEncoder} if is_tf_available() else {}
UpperCamelCase__ : int =False
UpperCamelCase__ : int =False
UpperCamelCase__ : Optional[int] =False
UpperCamelCase__ : str =False
UpperCamelCase__ : Union[str, Any] =False
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] =TFDPRModelTester(self )
__UpperCamelCase : Tuple =ConfigTester(self , config_class=lowerCamelCase__ , hidden_size=37 )
def __lowercase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*lowerCamelCase__ )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*lowerCamelCase__ )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Optional[int] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*lowerCamelCase__ )
@slow
def __lowercase ( self ):
"""simple docstring"""
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase : List[str] =TFDPRContextEncoder.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase : List[Any] =TFDPRContextEncoder.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase : Union[str, Any] =TFDPRQuestionEncoder.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase : List[Any] =TFDPRReader.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
@require_tf
class __A ( unittest.TestCase ):
"""simple docstring"""
@slow
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : List[str] =TFDPRQuestionEncoder.from_pretrained('facebook/dpr-question_encoder-single-nq-base' )
__UpperCamelCase : Union[str, Any] =tf.constant(
[[101, 7592, 1010, 2003, 2026, 3899, 10140, 1029, 102]] ) # [CLS] hello, is my dog cute? [SEP]
__UpperCamelCase : Union[str, Any] =model(lowerCamelCase__ )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
__UpperCamelCase : Dict =tf.constant(
[
[
0.03_236_253,
0.12_753_335,
0.16_818_509,
0.00_279_786,
0.3_896_933,
0.24_264_945,
0.2_178_971,
-0.02_335_227,
-0.08_481_959,
-0.14_324_117,
]
] )
self.assertTrue(numpy.allclose(output[:, :10].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 716 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
A_ :List[str] = None
A_ :Dict = logging.get_logger(__name__)
A_ :Tuple = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
A_ :Union[str, Any] = {
'''vocab_file''': {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model''',
},
'''tokenizer_file''': {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json''',
},
}
A_ :Union[str, Any] = {
'''xlnet-base-cased''': None,
'''xlnet-large-cased''': None,
}
A_ :Any = '''▁'''
# Segments (not really needed)
A_ :Tuple = 0
A_ :Union[str, Any] = 1
A_ :Tuple = 2
A_ :str = 3
A_ :int = 4
class __A ( a ):
"""simple docstring"""
UpperCamelCase__ : List[str] =VOCAB_FILES_NAMES
UpperCamelCase__ : Union[str, Any] =PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ : Tuple =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ : Dict ="""left"""
UpperCamelCase__ : int =XLNetTokenizer
def __init__( self , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=False , lowerCamelCase__=True , lowerCamelCase__=False , lowerCamelCase__="<s>" , lowerCamelCase__="</s>" , lowerCamelCase__="<unk>" , lowerCamelCase__="<sep>" , lowerCamelCase__="<pad>" , lowerCamelCase__="<cls>" , lowerCamelCase__="<mask>" , lowerCamelCase__=["<eop>", "<eod>"] , **lowerCamelCase__ , ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] =AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else mask_token
super().__init__(
vocab_file=lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , do_lower_case=lowerCamelCase__ , remove_space=lowerCamelCase__ , keep_accents=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , additional_special_tokens=lowerCamelCase__ , **lowerCamelCase__ , )
__UpperCamelCase : Union[str, Any] =3
__UpperCamelCase : Optional[int] =do_lower_case
__UpperCamelCase : Optional[Any] =remove_space
__UpperCamelCase : Union[str, Any] =keep_accents
__UpperCamelCase : Optional[Any] =vocab_file
__UpperCamelCase : List[str] =False if not self.vocab_file else True
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
"""simple docstring"""
__UpperCamelCase : List[str] =[self.sep_token_id]
__UpperCamelCase : str =[self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
"""simple docstring"""
__UpperCamelCase : Tuple =[self.sep_token_id]
__UpperCamelCase : Dict =[2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(lowerCamelCase__ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
__UpperCamelCase : Tuple =os.path.join(
lowerCamelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase__ ):
copyfile(self.vocab_file , lowerCamelCase__ )
return (out_vocab_file,)
| 154 | 0 |
'''simple docstring'''
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
lowercase__ : str = logging.getLogger(__name__)
def _lowerCAmelCase ( __snake_case : Tuple , __snake_case : Optional[int] ) -> Optional[int]:
__A : Tuple = np.argmax(__snake_case , axis=1 )
return np.sum(outputs == labels )
def _lowerCAmelCase ( __snake_case : Optional[int] ) -> int:
with open(__snake_case , encoding='utf_8' ) as f:
__A : Dict = csv.reader(__snake_case )
__A : Optional[Any] = []
next(__snake_case ) # skip the first line
for line in tqdm(__snake_case ):
output.append((' '.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def _lowerCAmelCase ( __snake_case : Tuple , __snake_case : List[Any] , __snake_case : Dict , __snake_case : Dict , __snake_case : List[str] , __snake_case : Union[str, Any] ) -> Any:
__A : List[str] = []
for dataset in encoded_datasets:
__A : Tuple = len(__snake_case )
__A : List[str] = np.zeros((n_batch, 2, input_len) , dtype=np.intaa )
__A : Dict = np.zeros((n_batch, 2) , dtype=np.intaa )
__A : Dict = np.full((n_batch, 2, input_len) , fill_value=-1_00 , dtype=np.intaa )
__A : Any = np.zeros((n_batch,) , dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(__snake_case ):
__A : Tuple = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
__A : Any = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
__A : int = with_conta
__A : str = with_conta
__A : Optional[int] = len(__snake_case ) - 1
__A : List[Any] = len(__snake_case ) - 1
__A : List[Any] = with_conta
__A : Tuple = with_conta
__A : Dict = mc_label
__A : List[str] = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(__snake_case ) for t in all_inputs ) )
return tensor_datasets
def _lowerCAmelCase ( ) -> Union[str, Any]:
__A : Tuple = argparse.ArgumentParser()
parser.add_argument('--model_name' , type=__snake_case , default='openai-gpt' , help='pretrained model name' )
parser.add_argument('--do_train' , action='store_true' , help='Whether to run training.' )
parser.add_argument('--do_eval' , action='store_true' , help='Whether to run eval on the dev set.' )
parser.add_argument(
'--output_dir' , default=__snake_case , type=__snake_case , required=__snake_case , help='The output directory where the model predictions and checkpoints will be written.' , )
parser.add_argument('--train_dataset' , type=__snake_case , default='' )
parser.add_argument('--eval_dataset' , type=__snake_case , default='' )
parser.add_argument('--seed' , type=__snake_case , default=42 )
parser.add_argument('--num_train_epochs' , type=__snake_case , default=3 )
parser.add_argument('--train_batch_size' , type=__snake_case , default=8 )
parser.add_argument('--eval_batch_size' , type=__snake_case , default=16 )
parser.add_argument('--adam_epsilon' , default=1e-8 , type=__snake_case , help='Epsilon for Adam optimizer.' )
parser.add_argument('--max_grad_norm' , type=__snake_case , default=1 )
parser.add_argument(
'--max_steps' , default=-1 , type=__snake_case , help=(
'If > 0: set total number of training steps to perform. Override num_train_epochs.'
) , )
parser.add_argument(
'--gradient_accumulation_steps' , type=__snake_case , default=1 , help='Number of updates steps to accumulate before performing a backward/update pass.' , )
parser.add_argument('--learning_rate' , type=__snake_case , default=6.2_5e-5 )
parser.add_argument('--warmup_steps' , default=0 , type=__snake_case , help='Linear warmup over warmup_steps.' )
parser.add_argument('--lr_schedule' , type=__snake_case , default='warmup_linear' )
parser.add_argument('--weight_decay' , type=__snake_case , default=0.01 )
parser.add_argument('--lm_coef' , type=__snake_case , default=0.9 )
parser.add_argument('--n_valid' , type=__snake_case , default=3_74 )
parser.add_argument('--server_ip' , type=__snake_case , default='' , help='Can be used for distant debugging.' )
parser.add_argument('--server_port' , type=__snake_case , default='' , help='Can be used for distant debugging.' )
__A : List[Any] = parser.parse_args()
print(__snake_case )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('Waiting for debugger attach' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=__snake_case )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
__A : str = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
__A : List[Any] = torch.cuda.device_count()
logger.info('device: {}, n_gpu {}'.format(__snake_case , __snake_case ) )
if not args.do_train and not args.do_eval:
raise ValueError('At least one of `do_train` or `do_eval` must be True.' )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
__A : Optional[Any] = ['_start_', '_delimiter_', '_classify_']
__A : Tuple = OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(__snake_case )
__A : int = tokenizer.convert_tokens_to_ids(__snake_case )
__A : Union[str, Any] = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(__snake_case ) )
model.to(__snake_case )
# Load and encode the datasets
def tokenize_and_encode(__snake_case : Optional[Any] ):
if isinstance(__snake_case , __snake_case ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(__snake_case ) )
elif isinstance(__snake_case , __snake_case ):
return obj
return [tokenize_and_encode(__snake_case ) for o in obj]
logger.info('Encoding dataset...' )
__A : Tuple = load_rocstories_dataset(args.train_dataset )
__A : List[Any] = load_rocstories_dataset(args.eval_dataset )
__A : Any = (train_dataset, eval_dataset)
__A : List[Any] = tokenize_and_encode(__snake_case )
# Compute the max input length for the Transformer
__A : Dict = model.config.n_positions // 2 - 2
__A : Any = max(
len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
__A : Optional[Any] = min(__snake_case , model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
__A : str = pre_process_datasets(__snake_case , __snake_case , __snake_case , *__snake_case )
__A ,__A : Optional[int] = tensor_datasets[0], tensor_datasets[1]
__A : str = TensorDataset(*__snake_case )
__A : Optional[int] = RandomSampler(__snake_case )
__A : Optional[Any] = DataLoader(__snake_case , sampler=__snake_case , batch_size=args.train_batch_size )
__A : Optional[Any] = TensorDataset(*__snake_case )
__A : str = SequentialSampler(__snake_case )
__A : Tuple = DataLoader(__snake_case , sampler=__snake_case , batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
__A : Any = args.max_steps
__A : Optional[int] = args.max_steps // (len(__snake_case ) // args.gradient_accumulation_steps) + 1
else:
__A : Any = len(__snake_case ) // args.gradient_accumulation_steps * args.num_train_epochs
__A : Optional[Any] = list(model.named_parameters() )
__A : List[Any] = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
__A : str = [
{
'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
'weight_decay': args.weight_decay,
},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], 'weight_decay': 0.0},
]
__A : Optional[Any] = AdamW(__snake_case , lr=args.learning_rate , eps=args.adam_epsilon )
__A : List[str] = get_linear_schedule_with_warmup(
__snake_case , num_warmup_steps=args.warmup_steps , num_training_steps=__snake_case )
if args.do_train:
__A ,__A ,__A : Tuple = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) , desc='Epoch' ):
__A : str = 0
__A : str = 0
__A : Tuple = tqdm(__snake_case , desc='Training' )
for step, batch in enumerate(__snake_case ):
__A : Tuple = tuple(t.to(__snake_case ) for t in batch )
__A ,__A ,__A ,__A : List[str] = batch
__A : str = model(__snake_case , mc_token_ids=__snake_case , lm_labels=__snake_case , mc_labels=__snake_case )
__A : Dict = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
__A : Tuple = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
__A : List[str] = 'Training loss: {:.2e} lr: {:.2e}'.format(__snake_case , scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
__A : Dict = model.module if hasattr(__snake_case , 'module' ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
__A : int = os.path.join(args.output_dir , __snake_case )
__A : List[str] = os.path.join(args.output_dir , __snake_case )
torch.save(model_to_save.state_dict() , __snake_case )
model_to_save.config.to_json_file(__snake_case )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
__A : Union[str, Any] = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
__A : List[str] = OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(__snake_case )
if args.do_eval:
model.eval()
__A ,__A : Dict = 0, 0
__A ,__A : Any = 0, 0
for batch in tqdm(__snake_case , desc='Evaluating' ):
__A : int = tuple(t.to(__snake_case ) for t in batch )
__A ,__A ,__A ,__A : Any = batch
with torch.no_grad():
__A ,__A ,__A ,__A : Any = model(
__snake_case , mc_token_ids=__snake_case , lm_labels=__snake_case , mc_labels=__snake_case )
__A : Optional[Any] = mc_logits.detach().cpu().numpy()
__A : List[str] = mc_labels.to('cpu' ).numpy()
__A : Union[str, Any] = accuracy(__snake_case , __snake_case )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
__A : Optional[Any] = eval_loss / nb_eval_steps
__A : Any = eval_accuracy / nb_eval_examples
__A : Union[str, Any] = tr_loss / nb_tr_steps if args.do_train else None
__A : Any = {'eval_loss': eval_loss, 'eval_accuracy': eval_accuracy, 'train_loss': train_loss}
__A : str = os.path.join(args.output_dir , 'eval_results.txt' )
with open(__snake_case , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key in sorted(result.keys() ):
logger.info(' %s = %s' , __snake_case , str(result[key] ) )
writer.write('%s = %s\n' % (key, str(result[key] )) )
if __name__ == "__main__":
main() | 8 |
'''simple docstring'''
def _lowerCAmelCase ( __snake_case : int ) -> bool:
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print('''Program to check whether a number is a Perfect number or not...''')
lowercase__ : int = int(input('''Enter number: ''').strip())
print(f"""{number} is {"" if perfect(number) else "not "}a Perfect Number.""") | 8 | 1 |
'''simple docstring'''
import json
import os
import re
import unicodedata
from json.encoder import INFINITY
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import regex
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging
from ...utils.generic import _is_jax, _is_numpy
__lowerCAmelCase : List[str] = logging.get_logger(__name__)
__lowerCAmelCase : Union[str, Any] = {
"artists_file": "artists.json",
"lyrics_file": "lyrics.json",
"genres_file": "genres.json",
}
__lowerCAmelCase : Any = {
"artists_file": {
"jukebox": "https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json",
},
"genres_file": {
"jukebox": "https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json",
},
"lyrics_file": {
"jukebox": "https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json",
},
}
__lowerCAmelCase : Dict = {
"jukebox": 512,
}
class A ( UpperCAmelCase ):
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_LYRIC_TOKENS_SIZES
a_ = ['''input_ids''', '''attention_mask''']
def __init__( self : int , __a : str , __a : Optional[int] , __a : Tuple , __a : Union[str, Any]=["v3", "v2", "v2"] , __a : List[Any]=5_1_2 , __a : Tuple=5 , __a : List[Any]="<|endoftext|>" , **__a : Optional[Any] , ) -> Any:
__UpperCAmelCase = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else unk_token
super().__init__(
unk_token=__a , n_genres=__a , version=__a , max_n_lyric_tokens=__a , **__a , )
__UpperCAmelCase = version
__UpperCAmelCase = max_n_lyric_tokens
__UpperCAmelCase = n_genres
with open(__a , encoding='''utf-8''' ) as vocab_handle:
__UpperCAmelCase = json.load(__a )
with open(__a , encoding='''utf-8''' ) as vocab_handle:
__UpperCAmelCase = json.load(__a )
with open(__a , encoding='''utf-8''' ) as vocab_handle:
__UpperCAmelCase = json.load(__a )
__UpperCAmelCase = r'''[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+'''
# In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters.
if len(self.lyrics_encoder ) == 7_9:
__UpperCAmelCase = oov.replace(r'''\-\'''' , r'''\-+\'''' )
__UpperCAmelCase = regex.compile(__a )
__UpperCAmelCase = {v: k for k, v in self.artists_encoder.items()}
__UpperCAmelCase = {v: k for k, v in self.genres_encoder.items()}
__UpperCAmelCase = {v: k for k, v in self.lyrics_encoder.items()}
@property
def snake_case__ ( self : Optional[int] ) -> List[Any]:
return len(self.artists_encoder ) + len(self.genres_encoder ) + len(self.lyrics_encoder )
def snake_case__ ( self : Dict ) -> Optional[Any]:
return dict(self.artists_encoder , self.genres_encoder , self.lyrics_encoder )
def snake_case__ ( self : Optional[Any] , __a : List[Any] , __a : Tuple , __a : Optional[int] ) -> List[Any]:
__UpperCAmelCase = [self.artists_encoder.get(__a , 0 ) for artist in list_artists]
for genres in range(len(__a ) ):
__UpperCAmelCase = [self.genres_encoder.get(__a , 0 ) for genre in list_genres[genres]]
__UpperCAmelCase = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres] ))
__UpperCAmelCase = [[self.lyrics_encoder.get(__a , 0 ) for character in list_lyrics[0]], [], []]
return artists_id, list_genres, lyric_ids
def snake_case__ ( self : List[str] , __a : List[Any] ) -> Optional[int]:
return list(__a )
def snake_case__ ( self : Union[str, Any] , __a : Union[str, Any] , __a : Any , __a : str , **__a : Optional[Any] ) -> Dict:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = self.prepare_for_tokenization(__a , __a , __a )
__UpperCAmelCase = self._tokenize(__a )
return artist, genre, lyrics
def snake_case__ ( self : int , __a : str , __a : str , __a : str , __a : bool = False ) -> Tuple[str, str, str, Dict[str, Any]]:
for idx in range(len(self.version ) ):
if self.version[idx] == "v3":
__UpperCAmelCase = artists[idx].lower()
__UpperCAmelCase = [genres[idx].lower()]
else:
__UpperCAmelCase = self._normalize(artists[idx] ) + '''.v2'''
__UpperCAmelCase = [
self._normalize(__a ) + '''.v2''' for genre in genres[idx].split('''_''' )
] # split is for the full dictionary with combined genres
if self.version[0] == "v2":
__UpperCAmelCase = regex.compile(r'''[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+''' )
__UpperCAmelCase = '''ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+\'\"()[] \t\n'''
__UpperCAmelCase = {vocab[index]: index + 1 for index in range(len(__a ) )}
__UpperCAmelCase = 0
__UpperCAmelCase = len(__a ) + 1
__UpperCAmelCase = self.vocab
__UpperCAmelCase = {v: k for k, v in self.vocab.items()}
__UpperCAmelCase = ''''''
else:
__UpperCAmelCase = regex.compile(r'''[^A-Za-z0-9.,:;!?\-+\'\"()\[\] \t\n]+''' )
__UpperCAmelCase = self._run_strip_accents(__a )
__UpperCAmelCase = lyrics.replace('''\\''' , '''\n''' )
__UpperCAmelCase = self.out_of_vocab.sub('''''' , __a ), [], []
return artists, genres, lyrics
def snake_case__ ( self : Optional[int] , __a : Tuple ) -> Optional[int]:
__UpperCAmelCase = unicodedata.normalize('''NFD''' , __a )
__UpperCAmelCase = []
for char in text:
__UpperCAmelCase = unicodedata.category(__a )
if cat == "Mn":
continue
output.append(__a )
return "".join(__a )
def snake_case__ ( self : int , __a : str ) -> str:
__UpperCAmelCase = (
[chr(__a ) for i in range(ord('''a''' ) , ord('''z''' ) + 1 )]
+ [chr(__a ) for i in range(ord('''A''' ) , ord('''Z''' ) + 1 )]
+ [chr(__a ) for i in range(ord('''0''' ) , ord('''9''' ) + 1 )]
+ ['''.''']
)
__UpperCAmelCase = frozenset(__a )
__UpperCAmelCase = re.compile(r'''_+''' )
__UpperCAmelCase = ''''''.join([c if c in accepted else '''_''' for c in text.lower()] )
__UpperCAmelCase = pattern.sub('''_''' , __a ).strip('''_''' )
return text
def snake_case__ ( self : Union[str, Any] , __a : List[str] ) -> str:
return " ".join(__a )
def snake_case__ ( self : List[str] , __a : Dict , __a : Optional[Union[str, TensorType]] = None , __a : bool = False ) -> Union[str, Any]:
# Convert to TensorType
if not isinstance(__a , __a ):
__UpperCAmelCase = TensorType(__a )
# Get a function reference for the correct framework
if tensor_type == TensorType.TENSORFLOW:
if not is_tf_available():
raise ImportError(
'''Unable to convert output to TensorFlow tensors format, TensorFlow is not installed.''' )
import tensorflow as tf
__UpperCAmelCase = tf.constant
__UpperCAmelCase = tf.is_tensor
elif tensor_type == TensorType.PYTORCH:
if not is_torch_available():
raise ImportError('''Unable to convert output to PyTorch tensors format, PyTorch is not installed.''' )
import torch
__UpperCAmelCase = torch.tensor
__UpperCAmelCase = torch.is_tensor
elif tensor_type == TensorType.JAX:
if not is_flax_available():
raise ImportError('''Unable to convert output to JAX tensors format, JAX is not installed.''' )
import jax.numpy as jnp # noqa: F811
__UpperCAmelCase = jnp.array
__UpperCAmelCase = _is_jax
else:
__UpperCAmelCase = np.asarray
__UpperCAmelCase = _is_numpy
# Do the tensor conversion in batch
try:
if prepend_batch_axis:
__UpperCAmelCase = [inputs]
if not is_tensor(__a ):
__UpperCAmelCase = as_tensor(__a )
except: # noqa E722
raise ValueError(
'''Unable to create tensor, you should probably activate truncation and/or padding '''
'''with \'padding=True\' \'truncation=True\' to have batched tensors with the same length.''' )
return inputs
def __call__( self : Dict , __a : Optional[int] , __a : Union[str, Any] , __a : int="" , __a : Dict="pt" ) -> BatchEncoding:
__UpperCAmelCase = [0, 0, 0]
__UpperCAmelCase = [artist] * len(self.version )
__UpperCAmelCase = [genres] * len(self.version )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = self.tokenize(__a , __a , __a )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = self._convert_token_to_id(__a , __a , __a )
__UpperCAmelCase = [-INFINITY] * len(full_tokens[-1] )
__UpperCAmelCase = [
self.convert_to_tensors(
[input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] , tensor_type=__a )
for i in range(len(self.version ) )
]
return BatchEncoding({'''input_ids''': input_ids, '''attention_masks''': attention_masks} )
def snake_case__ ( self : Tuple , __a : str , __a : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__a ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__UpperCAmelCase = os.path.join(
__a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''artists_file'''] )
with open(__a , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.artists_encoder , ensure_ascii=__a ) )
__UpperCAmelCase = os.path.join(
__a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''genres_file'''] )
with open(__a , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.genres_encoder , ensure_ascii=__a ) )
__UpperCAmelCase = os.path.join(
__a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''lyrics_file'''] )
with open(__a , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.lyrics_encoder , ensure_ascii=__a ) )
return (artists_file, genres_file, lyrics_file)
def snake_case__ ( self : List[Any] , __a : Dict , __a : str , __a : str ) -> str:
__UpperCAmelCase = self.artists_decoder.get(__a )
__UpperCAmelCase = [self.genres_decoder.get(__a ) for genre in genres_index]
__UpperCAmelCase = [self.lyrics_decoder.get(__a ) for character in lyric_index]
return artist, genres, lyrics
| 654 | '''simple docstring'''
from ...configuration_utils import PretrainedConfig
class A ( UpperCAmelCase ):
a_ = '''bert-generation'''
def __init__( self : str , __a : str=5_0_3_5_8 , __a : int=1_0_2_4 , __a : Optional[Any]=2_4 , __a : Any=1_6 , __a : int=4_0_9_6 , __a : Any="gelu" , __a : Union[str, Any]=0.1 , __a : Any=0.1 , __a : Union[str, Any]=5_1_2 , __a : int=0.0_2 , __a : str=1e-12 , __a : List[str]=0 , __a : Optional[int]=2 , __a : Tuple=1 , __a : str="absolute" , __a : Optional[Any]=True , **__a : Tuple , ) -> Any:
super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a )
__UpperCAmelCase = vocab_size
__UpperCAmelCase = hidden_size
__UpperCAmelCase = num_hidden_layers
__UpperCAmelCase = num_attention_heads
__UpperCAmelCase = hidden_act
__UpperCAmelCase = intermediate_size
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = max_position_embeddings
__UpperCAmelCase = initializer_range
__UpperCAmelCase = layer_norm_eps
__UpperCAmelCase = position_embedding_type
__UpperCAmelCase = use_cache
| 654 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCamelCase: int = logging.get_logger(__name__)
__UpperCamelCase: Any = {
"""facebook/data2vec-vision-base-ft""": (
"""https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json"""
),
}
class __lowerCAmelCase ( _UpperCamelCase ):
'''simple docstring'''
_A = "data2vec-vision"
def __init__( self: Dict, lowerCamelCase_: Optional[int]=768, lowerCamelCase_: str=12, lowerCamelCase_: str=12, lowerCamelCase_: Any=3072, lowerCamelCase_: Any="gelu", lowerCamelCase_: Union[str, Any]=0.0, lowerCamelCase_: List[Any]=0.0, lowerCamelCase_: Tuple=0.0_2, lowerCamelCase_: Optional[Any]=1E-12, lowerCamelCase_: Any=224, lowerCamelCase_: List[Any]=16, lowerCamelCase_: Union[str, Any]=3, lowerCamelCase_: str=False, lowerCamelCase_: Optional[Any]=False, lowerCamelCase_: Optional[Any]=False, lowerCamelCase_: Any=False, lowerCamelCase_: Union[str, Any]=0.1, lowerCamelCase_: Tuple=0.1, lowerCamelCase_: str=True, lowerCamelCase_: Optional[Any]=[3, 5, 7, 11], lowerCamelCase_: Optional[int]=[1, 2, 3, 6], lowerCamelCase_: str=True, lowerCamelCase_: Optional[int]=0.4, lowerCamelCase_: Any=256, lowerCamelCase_: Optional[Any]=1, lowerCamelCase_: List[str]=False, lowerCamelCase_: str=255, **lowerCamelCase_: Dict, ):
super().__init__(**lowerCamelCase_ )
lowercase__ : Optional[Any] = hidden_size
lowercase__ : str = num_hidden_layers
lowercase__ : Optional[Any] = num_attention_heads
lowercase__ : List[str] = intermediate_size
lowercase__ : List[str] = hidden_act
lowercase__ : Union[str, Any] = hidden_dropout_prob
lowercase__ : Optional[int] = attention_probs_dropout_prob
lowercase__ : List[Any] = initializer_range
lowercase__ : Any = layer_norm_eps
lowercase__ : Union[str, Any] = image_size
lowercase__ : List[Any] = patch_size
lowercase__ : List[str] = num_channels
lowercase__ : Optional[int] = use_mask_token
lowercase__ : Union[str, Any] = use_absolute_position_embeddings
lowercase__ : Any = use_relative_position_bias
lowercase__ : Any = use_shared_relative_position_bias
lowercase__ : Optional[Any] = layer_scale_init_value
lowercase__ : Dict = drop_path_rate
lowercase__ : Dict = use_mean_pooling
# decode head attributes (semantic segmentation)
lowercase__ : Any = out_indices
lowercase__ : int = pool_scales
# auxiliary head attributes (semantic segmentation)
lowercase__ : int = use_auxiliary_head
lowercase__ : Dict = auxiliary_loss_weight
lowercase__ : List[Any] = auxiliary_channels
lowercase__ : List[Any] = auxiliary_num_convs
lowercase__ : Union[str, Any] = auxiliary_concat_input
lowercase__ : str = semantic_loss_ignore_index
class __lowerCAmelCase ( _UpperCamelCase ):
'''simple docstring'''
_A = version.parse("1.11" )
@property
def snake_case__( self: Dict ):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def snake_case__( self: List[str] ):
return 1E-4
| 266 |
import argparse
import os
import re
__UpperCamelCase: Any = """src/transformers/models/auto"""
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
__UpperCamelCase: Dict = re.compile(r"""[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict""")
# re pattern that matches identifiers in mappings
__UpperCamelCase: List[str] = re.compile(r"""\s*\(\s*\"(\S[^\"]+)\"""")
def SCREAMING_SNAKE_CASE__ ( _lowercase : int , _lowercase : bool = False ) -> List[str]:
'''simple docstring'''
with open(_lowercase , 'r' , encoding='utf-8' ) as f:
lowercase__ : Union[str, Any] = f.read()
lowercase__ : Optional[Any] = content.split('\n' )
lowercase__ : Optional[int] = []
lowercase__ : int = 0
while line_idx < len(_lowercase ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
lowercase__ : Tuple = len(re.search(r'^(\s*)\S' , lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(' ' * indent + '(' ):
new_lines.append(lines[line_idx] )
line_idx += 1
lowercase__ : Tuple = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
lowercase__ : Any = line_idx
while not lines[line_idx].startswith(' ' * indent + ')' ):
line_idx += 1
blocks.append('\n'.join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
lowercase__ : List[str] = sorted(_lowercase , key=lambda _lowercase : _re_identifier.search(_lowercase ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(_lowercase , 'w' , encoding='utf-8' ) as f:
f.write('\n'.join(_lowercase ) )
elif "\n".join(_lowercase ) != content:
return True
def SCREAMING_SNAKE_CASE__ ( _lowercase : bool = False ) -> List[Any]:
'''simple docstring'''
lowercase__ : List[Any] = [os.path.join(_lowercase , _lowercase ) for f in os.listdir(_lowercase ) if f.endswith('.py' )]
lowercase__ : str = [sort_auto_mapping(_lowercase , overwrite=_lowercase ) for fname in fnames]
if not overwrite and any(_lowercase ):
lowercase__ : List[Any] = [f for f, d in zip(_lowercase , _lowercase ) if d]
raise ValueError(
f"""The following files have auto mappings that need sorting: {", ".join(_lowercase )}. Run `make style` to fix"""
' this.' )
if __name__ == "__main__":
__UpperCamelCase: Tuple = argparse.ArgumentParser()
parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""")
__UpperCamelCase: List[Any] = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 266 | 1 |
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
__A =(
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
__A =[ord(letter) for letter in string.ascii_lowercase]
__A ={ord(char) for char in VALID_CHARS}
__A =["the", "be", "to", "of", "and", "in", "that", "have"]
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = ""
lowerCamelCase_ = 42
lowerCamelCase_ = 42
lowerCamelCase_ = 42
for keychar, cipherchar in zip(cycle(lowerCamelCase__ ) , lowerCamelCase__ ):
lowerCamelCase_ = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(lowerCamelCase__ )
return decoded
def lowerCamelCase_ ( lowerCamelCase__ ):
lowerCamelCase_ = []
for key in product(lowerCamelCase__ , repeat=3 ):
lowerCamelCase_ = try_key(lowerCamelCase__ , lowerCamelCase__ )
if encoded is not None:
possibles.append(lowerCamelCase__ )
return possibles
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
return [possible for possible in possibles if common_word in possible.lower()]
def lowerCamelCase_ ( lowerCamelCase__ = "p059_cipher.txt" ):
lowerCamelCase_ = 42
lowerCamelCase_ = 42
lowerCamelCase_ = 42
lowerCamelCase_ = 42
lowerCamelCase_ = Path(lowerCamelCase__ ).parent.joinpath(lowerCamelCase__ ).read_text(encoding="utf-8" )
lowerCamelCase_ = [int(lowerCamelCase__ ) for number in data.strip().split("," )]
lowerCamelCase_ = filter_valid_chars(lowerCamelCase__ )
for common_word in COMMON_WORDS:
lowerCamelCase_ = filter_common_word(lowerCamelCase__ , lowerCamelCase__ )
if len(lowerCamelCase__ ) == 1:
break
lowerCamelCase_ = possibles[0]
return sum(ord(lowerCamelCase__ ) for char in decoded_text )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 313 |
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_( self ) -> None:
lowerCamelCase_ = Vector([1, 2, 3] )
self.assertEqual(x.component(0 ) , 1 )
self.assertEqual(x.component(2 ) , 3 )
lowerCamelCase_ = Vector()
def SCREAMING_SNAKE_CASE_( self ) -> None:
lowerCamelCase_ = Vector([0, 0, 0, 0, 0, 1] )
self.assertEqual(str(lowercase ) , "(0,0,0,0,0,1)" )
def SCREAMING_SNAKE_CASE_( self ) -> None:
lowerCamelCase_ = Vector([1, 2, 3, 4] )
self.assertEqual(len(lowercase ) , 4 )
def SCREAMING_SNAKE_CASE_( self ) -> None:
lowerCamelCase_ = Vector([1, 2] )
lowerCamelCase_ = Vector([1, 2, 3, 4, 5] )
lowerCamelCase_ = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] )
lowerCamelCase_ = Vector([1, -1, 1, -1, 2, -3, 4, -5] )
self.assertAlmostEqual(x.euclidean_length() , 2.2_3_6 , 3 )
self.assertAlmostEqual(y.euclidean_length() , 7.4_1_6 , 3 )
self.assertEqual(z.euclidean_length() , 0 )
self.assertAlmostEqual(w.euclidean_length() , 7.6_1_6 , 3 )
def SCREAMING_SNAKE_CASE_( self ) -> None:
lowerCamelCase_ = Vector([1, 2, 3] )
lowerCamelCase_ = Vector([1, 1, 1] )
self.assertEqual((x + y).component(0 ) , 2 )
self.assertEqual((x + y).component(1 ) , 3 )
self.assertEqual((x + y).component(2 ) , 4 )
def SCREAMING_SNAKE_CASE_( self ) -> None:
lowerCamelCase_ = Vector([1, 2, 3] )
lowerCamelCase_ = Vector([1, 1, 1] )
self.assertEqual((x - y).component(0 ) , 0 )
self.assertEqual((x - y).component(1 ) , 1 )
self.assertEqual((x - y).component(2 ) , 2 )
def SCREAMING_SNAKE_CASE_( self ) -> None:
lowerCamelCase_ = Vector([1, 2, 3] )
lowerCamelCase_ = Vector([2, -1, 4] ) # for test of dot product
lowerCamelCase_ = Vector([1, -2, -1] )
self.assertEqual(str(x * 3.0 ) , "(3.0,6.0,9.0)" )
self.assertEqual((a * b) , 0 )
def SCREAMING_SNAKE_CASE_( self ) -> None:
self.assertEqual(str(zero_vector(10 ) ).count("0" ) , 10 )
def SCREAMING_SNAKE_CASE_( self ) -> None:
self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , "(0,1,0)" )
def SCREAMING_SNAKE_CASE_( self ) -> None:
lowerCamelCase_ = Vector([1, 2, 3] )
lowerCamelCase_ = Vector([1, 0, 1] )
self.assertEqual(str(axpy(2 , lowercase , lowercase ) ) , "(3,4,7)" )
def SCREAMING_SNAKE_CASE_( self ) -> None:
lowerCamelCase_ = Vector([1, 0, 0, 0, 0, 0] )
lowerCamelCase_ = x.copy()
self.assertEqual(str(lowercase ) , str(lowercase ) )
def SCREAMING_SNAKE_CASE_( self ) -> None:
lowerCamelCase_ = Vector([1, 0, 0] )
x.change_component(0 , 0 )
x.change_component(1 , 1 )
self.assertEqual(str(lowercase ) , "(0,1,0)" )
def SCREAMING_SNAKE_CASE_( self ) -> None:
lowerCamelCase_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual("|1,2,3|\n|2,4,5|\n|6,7,8|\n" , str(lowercase ) )
def SCREAMING_SNAKE_CASE_( self ) -> None:
lowerCamelCase_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
lowerCamelCase_ = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(minors[x][y] , a.minor(lowercase , lowercase ) )
def SCREAMING_SNAKE_CASE_( self ) -> None:
lowerCamelCase_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
lowerCamelCase_ = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(cofactors[x][y] , a.cofactor(lowercase , lowercase ) )
def SCREAMING_SNAKE_CASE_( self ) -> None:
lowerCamelCase_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(-5 , a.determinant() )
def SCREAMING_SNAKE_CASE_( self ) -> None:
lowerCamelCase_ = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 )
lowerCamelCase_ = Vector([1, 2, 3] )
self.assertEqual("(14,32,50)" , str(a * x ) )
self.assertEqual("|2,4,6|\n|8,10,12|\n|14,16,18|\n" , str(a * 2 ) )
def SCREAMING_SNAKE_CASE_( self ) -> None:
lowerCamelCase_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a.change_component(0 , 2 , 5 )
self.assertEqual("|1,2,5|\n|2,4,5|\n|6,7,8|\n" , str(lowercase ) )
def SCREAMING_SNAKE_CASE_( self ) -> None:
lowerCamelCase_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(7 , a.component(2 , 1 ) , 0.0_1 )
def SCREAMING_SNAKE_CASE_( self ) -> None:
lowerCamelCase_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
lowerCamelCase_ = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual("|2,4,10|\n|4,8,10|\n|12,14,18|\n" , str(a + b ) )
def SCREAMING_SNAKE_CASE_( self ) -> None:
lowerCamelCase_ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
lowerCamelCase_ = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual("|0,0,-4|\n|0,0,0|\n|0,0,-2|\n" , str(a - b ) )
def SCREAMING_SNAKE_CASE_( self ) -> None:
self.assertEqual(
"|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n" , str(square_zero_matrix(5 ) ) , )
if __name__ == "__main__":
unittest.main()
| 313 | 1 |
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class __magic_name__ (unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
snake_case__ = '''| <pad> <unk> <s> </s> a b c d e f g h i j k'''.split()
snake_case__ = dict(zip(_a , range(len(_a ) ) ) )
snake_case__ = {
'''unk_token''': '''<unk>''',
'''bos_token''': '''<s>''',
'''eos_token''': '''</s>''',
}
snake_case__ = {
'''feature_size''': 1,
'''padding_value''': 0.0,
'''sampling_rate''': 1_60_00,
'''return_attention_mask''': False,
'''do_normalize''': True,
}
snake_case__ = tempfile.mkdtemp()
snake_case__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
snake_case__ = os.path.join(self.tmpdirname , _a )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_a ) + '''\n''' )
with open(self.feature_extraction_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_a ) + '''\n''' )
# load decoder from hub
snake_case__ = '''hf-internal-testing/ngram-beam-search-decoder'''
def SCREAMING_SNAKE_CASE__ ( self:str , **_a:List[Any] ):
snake_case__ = self.add_kwargs_tokens_map.copy()
kwargs.update(_a )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **_a )
def SCREAMING_SNAKE_CASE__ ( self:Dict , **_a:List[str] ):
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **_a )
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] , **_a:int ):
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **_a )
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
snake_case__ = self.get_tokenizer()
snake_case__ = self.get_feature_extractor()
snake_case__ = self.get_decoder()
snake_case__ = WavaVecaProcessorWithLM(tokenizer=_a , feature_extractor=_a , decoder=_a )
processor.save_pretrained(self.tmpdirname )
snake_case__ = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , _a )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , _a )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , _a )
def SCREAMING_SNAKE_CASE__ ( self:str ):
snake_case__ = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
snake_case__ = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
snake_case__ = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(['''xx'''] )
with self.assertRaisesRegex(_a , '''include''' ):
WavaVecaProcessorWithLM(
tokenizer=_a , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
snake_case__ = self.get_feature_extractor()
snake_case__ = self.get_tokenizer()
snake_case__ = self.get_decoder()
snake_case__ = WavaVecaProcessorWithLM(tokenizer=_a , feature_extractor=_a , decoder=_a )
snake_case__ = floats_list((3, 10_00) )
snake_case__ = feature_extractor(_a , return_tensors='''np''' )
snake_case__ = processor(_a , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
snake_case__ = self.get_feature_extractor()
snake_case__ = self.get_tokenizer()
snake_case__ = self.get_decoder()
snake_case__ = WavaVecaProcessorWithLM(tokenizer=_a , feature_extractor=_a , decoder=_a )
snake_case__ = '''This is a test string'''
snake_case__ = processor(text=_a )
snake_case__ = tokenizer(_a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] , _a:int=(2, 10, 16) , _a:Dict=77 ):
np.random.seed(_a )
return np.random.rand(*_a )
def SCREAMING_SNAKE_CASE__ ( self:str ):
snake_case__ = self.get_feature_extractor()
snake_case__ = self.get_tokenizer()
snake_case__ = self.get_decoder()
snake_case__ = WavaVecaProcessorWithLM(tokenizer=_a , feature_extractor=_a , decoder=_a )
snake_case__ = self._get_dummy_logits(shape=(10, 16) , seed=13 )
snake_case__ = processor.decode(_a )
snake_case__ = decoder.decode_beams(_a )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual('''</s> <s> </s>''' , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ['''fork'''], ['''spawn''']] )
def SCREAMING_SNAKE_CASE__ ( self:int , _a:Union[str, Any] ):
snake_case__ = self.get_feature_extractor()
snake_case__ = self.get_tokenizer()
snake_case__ = self.get_decoder()
snake_case__ = WavaVecaProcessorWithLM(tokenizer=_a , feature_extractor=_a , decoder=_a )
snake_case__ = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
snake_case__ = processor.batch_decode(_a )
else:
with get_context(_a ).Pool() as pool:
snake_case__ = processor.batch_decode(_a , _a )
snake_case__ = list(_a )
with get_context('''fork''' ).Pool() as p:
snake_case__ = decoder.decode_beams_batch(_a , _a )
snake_case__ , snake_case__ , snake_case__ = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(_a , decoded_processor.text )
self.assertListEqual(['''<s> <s> </s>''', '''<s> <s> <s>'''] , decoded_processor.text )
self.assertListEqual(_a , decoded_processor.logit_score )
self.assertListEqual(_a , decoded_processor.lm_score )
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
snake_case__ = self.get_feature_extractor()
snake_case__ = self.get_tokenizer()
snake_case__ = self.get_decoder()
snake_case__ = WavaVecaProcessorWithLM(tokenizer=_a , feature_extractor=_a , decoder=_a )
snake_case__ = self._get_dummy_logits()
snake_case__ = 15
snake_case__ = -20.0
snake_case__ = -4.0
snake_case__ = processor.batch_decode(
_a , beam_width=_a , beam_prune_logp=_a , token_min_logp=_a , )
snake_case__ = decoded_processor_out.text
snake_case__ = list(_a )
with get_context('''fork''' ).Pool() as pool:
snake_case__ = decoder.decode_beams_batch(
_a , _a , beam_width=_a , beam_prune_logp=_a , token_min_logp=_a , )
snake_case__ = [d[0][0] for d in decoded_decoder_out]
snake_case__ = [d[0][2] for d in decoded_decoder_out]
snake_case__ = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(_a , _a )
self.assertListEqual(['''</s> <s> <s>''', '''<s> <s> <s>'''] , _a )
self.assertTrue(np.array_equal(_a , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.054, -18.447] , _a , atol=1e-3 ) )
self.assertTrue(np.array_equal(_a , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.554, -13.9474] , _a , atol=1e-3 ) )
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
snake_case__ = self.get_feature_extractor()
snake_case__ = self.get_tokenizer()
snake_case__ = self.get_decoder()
snake_case__ = WavaVecaProcessorWithLM(tokenizer=_a , feature_extractor=_a , decoder=_a )
snake_case__ = self._get_dummy_logits()
snake_case__ = 2.0
snake_case__ = 5.0
snake_case__ = -20.0
snake_case__ = True
snake_case__ = processor.batch_decode(
_a , alpha=_a , beta=_a , unk_score_offset=_a , lm_score_boundary=_a , )
snake_case__ = decoded_processor_out.text
snake_case__ = list(_a )
decoder.reset_params(
alpha=_a , beta=_a , unk_score_offset=_a , lm_score_boundary=_a , )
with get_context('''fork''' ).Pool() as pool:
snake_case__ = decoder.decode_beams_batch(
_a , _a , )
snake_case__ = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(_a , _a )
self.assertListEqual(['''<s> </s> <s> </s> </s>''', '''</s> </s> <s> </s> </s>'''] , _a )
snake_case__ = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -20.0 )
self.assertEqual(lm_model.score_boundary , _a )
def SCREAMING_SNAKE_CASE__ ( self:int ):
snake_case__ = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
snake_case__ = processor.decoder.model_container[processor.decoder._model_key]
snake_case__ = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
snake_case__ = os.listdir(_a )
snake_case__ = ['''alphabet.json''', '''language_model''']
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(_a , _a )
def SCREAMING_SNAKE_CASE__ ( self:int ):
snake_case__ = snapshot_download('''hf-internal-testing/processor_with_lm''' )
snake_case__ = WavaVecaProcessorWithLM.from_pretrained(_a )
snake_case__ = processor.decoder.model_container[processor.decoder._model_key]
snake_case__ = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
snake_case__ = os.listdir(_a )
snake_case__ = os.listdir(_a )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(_a , _a )
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
snake_case__ = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
snake_case__ = AutoProcessor.from_pretrained('''hf-internal-testing/processor_with_lm''' )
snake_case__ = floats_list((3, 10_00) )
snake_case__ = processor_wavaveca(_a , return_tensors='''np''' )
snake_case__ = processor_auto(_a , return_tensors='''np''' )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1e-2 )
snake_case__ = self._get_dummy_logits()
snake_case__ = processor_wavaveca.batch_decode(_a )
snake_case__ = processor_auto.batch_decode(_a )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
snake_case__ = self.get_feature_extractor()
snake_case__ = self.get_tokenizer()
snake_case__ = self.get_decoder()
snake_case__ = WavaVecaProcessorWithLM(tokenizer=_a , feature_extractor=_a , decoder=_a )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg='''`processor` and `feature_extractor` model input names do not match''' , )
@staticmethod
def SCREAMING_SNAKE_CASE__ ( _a:Union[str, Any] , _a:Dict ):
snake_case__ = [d[key] for d in offsets]
return retrieved_list
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
snake_case__ = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
snake_case__ = self._get_dummy_logits()[0]
snake_case__ = processor.decode(_a , output_word_offsets=_a )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(_a , _a ) )
self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''start_offset''' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''end_offset''' ) , [1, 3, 5] )
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
snake_case__ = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
snake_case__ = self._get_dummy_logits()
snake_case__ = processor.batch_decode(_a , output_word_offsets=_a )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(_a , _a ) )
self.assertListEqual(
[''' '''.join(self.get_from_offsets(_a , '''word''' ) ) for o in outputs['''word_offsets''']] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''start_offset''' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''end_offset''' ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def SCREAMING_SNAKE_CASE__ ( self:Any ):
import torch
snake_case__ = load_dataset('''common_voice''' , '''en''' , split='''train''' , streaming=_a )
snake_case__ = ds.cast_column('''audio''' , datasets.Audio(sampling_rate=1_60_00 ) )
snake_case__ = iter(_a )
snake_case__ = next(_a )
snake_case__ = AutoProcessor.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
snake_case__ = WavaVecaForCTC.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
snake_case__ = processor(sample['''audio''']['''array'''] , return_tensors='''pt''' ).input_values
with torch.no_grad():
snake_case__ = model(_a ).logits.cpu().numpy()
snake_case__ = processor.decode(logits[0] , output_word_offsets=_a )
snake_case__ = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
snake_case__ = [
{
'''start_time''': d['''start_offset'''] * time_offset,
'''end_time''': d['''end_offset'''] * time_offset,
'''word''': d['''word'''],
}
for d in output['''word_offsets''']
]
snake_case__ = '''WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL'''
# output words
self.assertEqual(''' '''.join(self.get_from_offsets(_a , '''word''' ) ) , _a )
self.assertEqual(''' '''.join(self.get_from_offsets(_a , '''word''' ) ) , output.text )
# output times
snake_case__ = torch.tensor(self.get_from_offsets(_a , '''start_time''' ) )
snake_case__ = torch.tensor(self.get_from_offsets(_a , '''end_time''' ) )
# fmt: off
snake_case__ = torch.tensor([1.4199, 1.6599, 2.2599, 3.0, 3.24, 3.5999, 3.7999, 4.0999, 4.26, 4.94, 5.28, 5.6599, 5.78, 5.94, 6.32, 6.5399, 6.6599] )
snake_case__ = torch.tensor([1.5399, 1.8999, 2.9, 3.16, 3.5399, 3.72, 4.0199, 4.1799, 4.76, 5.1599, 5.5599, 5.6999, 5.86, 6.1999, 6.38, 6.6199, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(_a , _a , atol=0.01 ) )
self.assertTrue(torch.allclose(_a , _a , atol=0.01 ) )
| 33 |
"""simple docstring"""
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
lowerCamelCase__ = "CompVis/stable-diffusion-v1-1"
lowerCamelCase__ = "CompVis/stable-diffusion-v1-2"
lowerCamelCase__ = "CompVis/stable-diffusion-v1-3"
lowerCamelCase__ = "CompVis/stable-diffusion-v1-4"
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
def __init__( self : Union[str, Any] , __a : AutoencoderKL , __a : CLIPTextModel , __a : CLIPTokenizer , __a : UNetaDConditionModel , __a : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __a : StableDiffusionSafetyChecker , __a : CLIPImageProcessor , __a : bool = True , ) -> List[str]:
super()._init_()
_UpperCamelCase : Optional[int] = StableDiffusionPipeline.from_pretrained(__a )
_UpperCamelCase : int = StableDiffusionPipeline.from_pretrained(__a )
_UpperCamelCase : List[Any] = StableDiffusionPipeline.from_pretrained(__a )
_UpperCamelCase : int = StableDiffusionPipeline(
vae=__a , text_encoder=__a , tokenizer=__a , unet=__a , scheduler=__a , safety_checker=__a , feature_extractor=__a , requires_safety_checker=__a , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict[str, Any]:
return {k: getattr(self , __a ) for k in self.config.keys() if not k.startswith("_" )}
def __SCREAMING_SNAKE_CASE ( self : Any , __a : Optional[Union[str, int]] = "auto" ) -> Optional[Any]:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
_UpperCamelCase : Tuple = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__a )
def __SCREAMING_SNAKE_CASE ( self : str ) -> Any:
self.enable_attention_slicing(__a )
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : Union[str, List[str]] , __a : int = 512 , __a : int = 512 , __a : int = 50 , __a : float = 7.5 , __a : Optional[Union[str, List[str]]] = None , __a : Optional[int] = 1 , __a : float = 0.0 , __a : Optional[torch.Generator] = None , __a : Optional[torch.FloatTensor] = None , __a : Optional[str] = "pil" , __a : bool = True , __a : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __a : int = 1 , **__a : Any , ) -> Optional[int]:
return self.pipea(
prompt=__a , height=__a , width=__a , num_inference_steps=__a , guidance_scale=__a , negative_prompt=__a , num_images_per_prompt=__a , eta=__a , generator=__a , latents=__a , output_type=__a , return_dict=__a , callback=__a , callback_steps=__a , **__a , )
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : Union[str, List[str]] , __a : int = 512 , __a : int = 512 , __a : int = 50 , __a : float = 7.5 , __a : Optional[Union[str, List[str]]] = None , __a : Optional[int] = 1 , __a : float = 0.0 , __a : Optional[torch.Generator] = None , __a : Optional[torch.FloatTensor] = None , __a : Optional[str] = "pil" , __a : bool = True , __a : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __a : int = 1 , **__a : Tuple , ) -> str:
return self.pipea(
prompt=__a , height=__a , width=__a , num_inference_steps=__a , guidance_scale=__a , negative_prompt=__a , num_images_per_prompt=__a , eta=__a , generator=__a , latents=__a , output_type=__a , return_dict=__a , callback=__a , callback_steps=__a , **__a , )
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : Union[str, List[str]] , __a : int = 512 , __a : int = 512 , __a : int = 50 , __a : float = 7.5 , __a : Optional[Union[str, List[str]]] = None , __a : Optional[int] = 1 , __a : float = 0.0 , __a : Optional[torch.Generator] = None , __a : Optional[torch.FloatTensor] = None , __a : Optional[str] = "pil" , __a : bool = True , __a : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __a : int = 1 , **__a : Tuple , ) -> Any:
return self.pipea(
prompt=__a , height=__a , width=__a , num_inference_steps=__a , guidance_scale=__a , negative_prompt=__a , num_images_per_prompt=__a , eta=__a , generator=__a , latents=__a , output_type=__a , return_dict=__a , callback=__a , callback_steps=__a , **__a , )
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : Union[str, List[str]] , __a : int = 512 , __a : int = 512 , __a : int = 50 , __a : float = 7.5 , __a : Optional[Union[str, List[str]]] = None , __a : Optional[int] = 1 , __a : float = 0.0 , __a : Optional[torch.Generator] = None , __a : Optional[torch.FloatTensor] = None , __a : Optional[str] = "pil" , __a : bool = True , __a : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __a : int = 1 , **__a : List[Any] , ) -> Union[str, Any]:
return self.pipea(
prompt=__a , height=__a , width=__a , num_inference_steps=__a , guidance_scale=__a , negative_prompt=__a , num_images_per_prompt=__a , eta=__a , generator=__a , latents=__a , output_type=__a , return_dict=__a , callback=__a , callback_steps=__a , **__a , )
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( self : str , __a : Union[str, List[str]] , __a : int = 512 , __a : int = 512 , __a : int = 50 , __a : float = 7.5 , __a : Optional[Union[str, List[str]]] = None , __a : Optional[int] = 1 , __a : float = 0.0 , __a : Optional[torch.Generator] = None , __a : Optional[torch.FloatTensor] = None , __a : Optional[str] = "pil" , __a : bool = True , __a : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __a : int = 1 , **__a : int , ) -> List[Any]:
_UpperCamelCase : Tuple = "cuda" if torch.cuda.is_available() else "cpu"
self.to(__a )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F'''`height` and `width` must be divisible by 8 but are {height} and {width}.''' )
# Get first result from Stable Diffusion Checkpoint v1.1
_UpperCamelCase : List[str] = self.textaimg_sda_a(
prompt=__a , height=__a , width=__a , num_inference_steps=__a , guidance_scale=__a , negative_prompt=__a , num_images_per_prompt=__a , eta=__a , generator=__a , latents=__a , output_type=__a , return_dict=__a , callback=__a , callback_steps=__a , **__a , )
# Get first result from Stable Diffusion Checkpoint v1.2
_UpperCamelCase : Optional[Any] = self.textaimg_sda_a(
prompt=__a , height=__a , width=__a , num_inference_steps=__a , guidance_scale=__a , negative_prompt=__a , num_images_per_prompt=__a , eta=__a , generator=__a , latents=__a , output_type=__a , return_dict=__a , callback=__a , callback_steps=__a , **__a , )
# Get first result from Stable Diffusion Checkpoint v1.3
_UpperCamelCase : str = self.textaimg_sda_a(
prompt=__a , height=__a , width=__a , num_inference_steps=__a , guidance_scale=__a , negative_prompt=__a , num_images_per_prompt=__a , eta=__a , generator=__a , latents=__a , output_type=__a , return_dict=__a , callback=__a , callback_steps=__a , **__a , )
# Get first result from Stable Diffusion Checkpoint v1.4
_UpperCamelCase : str = self.textaimg_sda_a(
prompt=__a , height=__a , width=__a , num_inference_steps=__a , guidance_scale=__a , negative_prompt=__a , num_images_per_prompt=__a , eta=__a , generator=__a , latents=__a , output_type=__a , return_dict=__a , callback=__a , callback_steps=__a , **__a , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 624 | 0 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
lowerCAmelCase__: int = random.Random()
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=1.0 , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None ) -> str:
if rng is None:
SCREAMING_SNAKE_CASE_ : List[str] = global_rng
SCREAMING_SNAKE_CASE_ : Dict = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class snake_case_ ( unittest.TestCase ):
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=7 , __lowerCAmelCase=400 , __lowerCAmelCase=2_000 , __lowerCAmelCase=10 , __lowerCAmelCase=160 , __lowerCAmelCase=8 , __lowerCAmelCase=0.0 , __lowerCAmelCase=4_000 , __lowerCAmelCase=False , __lowerCAmelCase=True , ):
SCREAMING_SNAKE_CASE_ : List[Any] = parent
SCREAMING_SNAKE_CASE_ : Union[str, Any] = batch_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = min_seq_length
SCREAMING_SNAKE_CASE_ : Optional[Any] = max_seq_length
SCREAMING_SNAKE_CASE_ : Dict = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
SCREAMING_SNAKE_CASE_ : Optional[Any] = padding_value
SCREAMING_SNAKE_CASE_ : Union[str, Any] = sampling_rate
SCREAMING_SNAKE_CASE_ : Any = return_attention_mask
SCREAMING_SNAKE_CASE_ : Optional[Any] = do_normalize
SCREAMING_SNAKE_CASE_ : Union[str, Any] = feature_size
SCREAMING_SNAKE_CASE_ : int = chunk_length
SCREAMING_SNAKE_CASE_ : int = hop_length
def __A ( self ):
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def __A ( self , __lowerCAmelCase=False , __lowerCAmelCase=False ):
def _flatten(__lowerCAmelCase ):
return list(itertools.chain(*lowercase_ ) )
if equal_length:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
SCREAMING_SNAKE_CASE_ : Tuple = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
SCREAMING_SNAKE_CASE_ : str = [np.asarray(lowercase_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class snake_case_ ( snake_case__ , unittest.TestCase ):
__lowerCamelCase : Union[str, Any] = WhisperFeatureExtractor if is_speech_available() else None
def __A ( self ):
SCREAMING_SNAKE_CASE_ : List[str] = WhisperFeatureExtractionTester(self )
def __A ( self ):
SCREAMING_SNAKE_CASE_ : int = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE_ : Optional[int] = feat_extract_first.save_pretrained(lowercase_ )[0]
check_json_file_has_correct_format(lowercase_ )
SCREAMING_SNAKE_CASE_ : List[str] = self.feature_extraction_class.from_pretrained(lowercase_ )
SCREAMING_SNAKE_CASE_ : Dict = feat_extract_first.to_dict()
SCREAMING_SNAKE_CASE_ : Tuple = feat_extract_second.to_dict()
SCREAMING_SNAKE_CASE_ : List[str] = feat_extract_first.mel_filters
SCREAMING_SNAKE_CASE_ : int = feat_extract_second.mel_filters
self.assertTrue(np.allclose(lowercase_ , lowercase_ ) )
self.assertEqual(lowercase_ , lowercase_ )
def __A ( self ):
SCREAMING_SNAKE_CASE_ : List[str] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE_ : str = os.path.join(lowercase_ , 'feat_extract.json' )
feat_extract_first.to_json_file(lowercase_ )
SCREAMING_SNAKE_CASE_ : Any = self.feature_extraction_class.from_json_file(lowercase_ )
SCREAMING_SNAKE_CASE_ : int = feat_extract_first.to_dict()
SCREAMING_SNAKE_CASE_ : Optional[Any] = feat_extract_second.to_dict()
SCREAMING_SNAKE_CASE_ : Dict = feat_extract_first.mel_filters
SCREAMING_SNAKE_CASE_ : int = feat_extract_second.mel_filters
self.assertTrue(np.allclose(lowercase_ , lowercase_ ) )
self.assertEqual(lowercase_ , lowercase_ )
def __A ( self ):
# Tests that all call wrap to encode_plus and batch_encode_plus
SCREAMING_SNAKE_CASE_ : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
SCREAMING_SNAKE_CASE_ : Any = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
SCREAMING_SNAKE_CASE_ : List[str] = [np.asarray(lowercase_ ) for speech_input in speech_inputs]
# Test feature size
SCREAMING_SNAKE_CASE_ : List[Any] = feature_extractor(lowercase_ , padding='max_length' , return_tensors='np' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
SCREAMING_SNAKE_CASE_ : List[Any] = feature_extractor(speech_inputs[0] , return_tensors='np' ).input_features
SCREAMING_SNAKE_CASE_ : List[Any] = feature_extractor(np_speech_inputs[0] , return_tensors='np' ).input_features
self.assertTrue(np.allclose(lowercase_ , lowercase_ , atol=1e-3 ) )
# Test batched
SCREAMING_SNAKE_CASE_ : Tuple = feature_extractor(lowercase_ , return_tensors='np' ).input_features
SCREAMING_SNAKE_CASE_ : List[Any] = feature_extractor(lowercase_ , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(lowercase_ , lowercase_ ):
self.assertTrue(np.allclose(lowercase_ , lowercase_ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
SCREAMING_SNAKE_CASE_ : Dict = [floats_list((1, x) )[0] for x in (800, 800, 800)]
SCREAMING_SNAKE_CASE_ : Optional[int] = np.asarray(lowercase_ )
SCREAMING_SNAKE_CASE_ : Dict = feature_extractor(lowercase_ , return_tensors='np' ).input_features
SCREAMING_SNAKE_CASE_ : Any = feature_extractor(lowercase_ , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(lowercase_ , lowercase_ ):
self.assertTrue(np.allclose(lowercase_ , lowercase_ , atol=1e-3 ) )
# Test truncation required
SCREAMING_SNAKE_CASE_ : Tuple = [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
SCREAMING_SNAKE_CASE_ : List[str] = [np.asarray(lowercase_ ) for speech_input in speech_inputs]
SCREAMING_SNAKE_CASE_ : Dict = [x[: feature_extractor.n_samples] for x in speech_inputs]
SCREAMING_SNAKE_CASE_ : List[Any] = [np.asarray(lowercase_ ) for speech_input in speech_inputs_truncated]
SCREAMING_SNAKE_CASE_ : str = feature_extractor(lowercase_ , return_tensors='np' ).input_features
SCREAMING_SNAKE_CASE_ : List[str] = feature_extractor(lowercase_ , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(lowercase_ , lowercase_ ):
self.assertTrue(np.allclose(lowercase_ , lowercase_ , atol=1e-3 ) )
def __A ( self ):
import torch
SCREAMING_SNAKE_CASE_ : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE_ : Optional[Any] = np.random.rand(100 , 32 ).astype(np.floataa )
SCREAMING_SNAKE_CASE_ : int = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
SCREAMING_SNAKE_CASE_ : Tuple = feature_extractor.pad([{'input_features': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
SCREAMING_SNAKE_CASE_ : int = feature_extractor.pad([{'input_features': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def __A ( self , __lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Optional[int] = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
SCREAMING_SNAKE_CASE_ : Any = ds.sort('id' ).select(range(lowercase_ ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def __A ( self ):
# fmt: off
SCREAMING_SNAKE_CASE_ : Tuple = torch.tensor(
[
0.11_93, -0.09_46, -0.10_98, -0.01_96, 0.02_25, -0.06_90, -0.17_36, 0.09_51,
0.09_71, -0.08_17, -0.07_02, 0.01_62, 0.02_60, 0.00_17, -0.01_92, -0.16_78,
0.07_09, -0.18_67, -0.06_55, -0.02_74, -0.02_34, -0.18_84, -0.05_16, -0.05_54,
-0.02_74, -0.14_25, -0.14_23, 0.08_37, 0.03_77, -0.08_54
] )
# fmt: on
SCREAMING_SNAKE_CASE_ : Tuple = self._load_datasamples(1 )
SCREAMING_SNAKE_CASE_ : int = WhisperFeatureExtractor()
SCREAMING_SNAKE_CASE_ : Optional[int] = feature_extractor(lowercase_ , return_tensors='pt' ).input_features
self.assertEqual(input_features.shape , (1, 80, 3_000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , lowercase_ , atol=1e-4 ) )
def __A ( self ):
SCREAMING_SNAKE_CASE_ : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE_ : Dict = self._load_datasamples(1 )[0]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ((audio - audio.min()) / (audio.max() - audio.min())) * 65_535 # Rescale to [0, 65535] to show issue
SCREAMING_SNAKE_CASE_ : Union[str, Any] = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=lowercase_ )[0]
self.assertTrue(np.all(np.mean(lowercase_ ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(lowercase_ ) - 1 ) < 1e-3 ) )
| 704 |
from jiwer import compute_measures
import datasets
lowerCAmelCase__: Union[str, Any] = "\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n"
lowerCAmelCase__: str = "\\nWord error rate (WER) is a common metric of the performance of an automatic speech recognition system.\n\nThe general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.\n\nThis problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.\n\nWord error rate can then be computed as:\n\nWER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct words,\nN is the number of words in the reference (N=S+D+C).\n\nThis value indicates the average number of errors per reference word. The lower the value, the better the\nperformance of the ASR system with a WER of 0 being a perfect score.\n"
lowerCAmelCase__: int = "\nCompute WER score of transcribed segments against references.\n\nArgs:\n references: List of references for each speech input.\n predictions: List of transcriptions to score.\n concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.\n\nReturns:\n (float): the word error rate\n\nExamples:\n\n >>> predictions = [\"this is the prediction\", \"there is an other sample\"]\n >>> references = [\"this is the reference\", \"there is another one\"]\n >>> wer = datasets.load_metric(\"wer\")\n >>> wer_score = wer.compute(predictions=predictions, references=references)\n >>> print(wer_score)\n 0.5\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case_ ( datasets.Metric ):
def __A ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/jitsi/jiwer/'] , reference_urls=[
'https://en.wikipedia.org/wiki/Word_error_rate',
] , )
def __A ( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=False ):
if concatenate_texts:
return compute_measures(__lowerCAmelCase , __lowerCAmelCase )["wer"]
else:
SCREAMING_SNAKE_CASE_ : List[Any] = 0
SCREAMING_SNAKE_CASE_ : Optional[Any] = 0
for prediction, reference in zip(__lowerCAmelCase , __lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Tuple = compute_measures(__lowerCAmelCase , __lowerCAmelCase )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 311 | 0 |
'''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class __magic_name__ :
def __init__( self , snake_case_ , snake_case_=13 , snake_case_=7 , snake_case_=True , snake_case_=True , snake_case_=False , snake_case_=True , snake_case_=99 , snake_case_=32 , snake_case_=5 , snake_case_=4 , snake_case_=37 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=5_12 , snake_case_=16 , snake_case_=2 , snake_case_=0.02 , snake_case_=3 , snake_case_=4 , snake_case_=None , ):
lowercase =parent
lowercase =batch_size
lowercase =seq_length
lowercase =is_training
lowercase =use_input_mask
lowercase =use_token_type_ids
lowercase =use_labels
lowercase =vocab_size
lowercase =hidden_size
lowercase =num_hidden_layers
lowercase =num_attention_heads
lowercase =intermediate_size
lowercase =hidden_act
lowercase =hidden_dropout_prob
lowercase =attention_probs_dropout_prob
lowercase =max_position_embeddings
lowercase =type_vocab_size
lowercase =type_sequence_label_size
lowercase =initializer_range
lowercase =num_labels
lowercase =num_choices
lowercase =scope
def _A( self ):
lowercase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase =None
if self.use_input_mask:
lowercase =random_attention_mask([self.batch_size, self.seq_length] )
lowercase =None
if self.use_token_type_ids:
lowercase =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase =None
lowercase =None
lowercase =None
if self.use_labels:
lowercase =ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase =ids_tensor([self.batch_size] , self.num_choices )
lowercase =self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _A( self ):
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case_ , initializer_range=self.initializer_range , use_stable_embedding=snake_case_ , )
def _A( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
lowercase =OpenLlamaModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
lowercase =model(snake_case_ , attention_mask=snake_case_ )
lowercase =model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _A( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ):
lowercase =True
lowercase =OpenLlamaModel(snake_case_ )
model.to(snake_case_ )
model.eval()
lowercase =model(
snake_case_ , attention_mask=snake_case_ , encoder_hidden_states=snake_case_ , encoder_attention_mask=snake_case_ , )
lowercase =model(
snake_case_ , attention_mask=snake_case_ , encoder_hidden_states=snake_case_ , )
lowercase =model(snake_case_ , attention_mask=snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _A( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ):
lowercase =OpenLlamaForCausalLM(config=snake_case_ )
model.to(snake_case_ )
model.eval()
lowercase =model(snake_case_ , attention_mask=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _A( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ):
lowercase =True
lowercase =True
lowercase =OpenLlamaForCausalLM(config=snake_case_ )
model.to(snake_case_ )
model.eval()
# first forward pass
lowercase =model(
snake_case_ , attention_mask=snake_case_ , encoder_hidden_states=snake_case_ , encoder_attention_mask=snake_case_ , use_cache=snake_case_ , )
lowercase =outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowercase =ids_tensor((self.batch_size, 3) , config.vocab_size )
lowercase =ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowercase =torch.cat([input_ids, next_tokens] , dim=-1 )
lowercase =torch.cat([input_mask, next_mask] , dim=-1 )
lowercase =model(
snake_case_ , attention_mask=snake_case_ , encoder_hidden_states=snake_case_ , encoder_attention_mask=snake_case_ , output_hidden_states=snake_case_ , )['''hidden_states'''][0]
lowercase =model(
snake_case_ , attention_mask=snake_case_ , encoder_hidden_states=snake_case_ , encoder_attention_mask=snake_case_ , past_key_values=snake_case_ , output_hidden_states=snake_case_ , )['''hidden_states'''][0]
# select random slice
lowercase =ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowercase =output_from_no_past[:, -3:, random_slice_idx].detach()
lowercase =output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case_ , snake_case_ , atol=1E-3 ) )
def _A( self ):
lowercase =self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) =config_and_inputs
lowercase ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __magic_name__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
UpperCamelCase__ = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
UpperCamelCase__ = (OpenLlamaForCausalLM,) if is_torch_available() else ()
UpperCamelCase__ = (
{
'feature-extraction': OpenLlamaModel,
'text-classification': OpenLlamaForSequenceClassification,
'text-generation': OpenLlamaForCausalLM,
'zero-shot': OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase__ = False
UpperCamelCase__ = False
def _A( self ):
lowercase =OpenLlamaModelTester(self )
lowercase =ConfigTester(self , config_class=snake_case_ , hidden_size=37 )
def _A( self ):
self.config_tester.run_common_tests()
def _A( self ):
lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def _A( self ):
lowercase =self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowercase =type
self.model_tester.create_and_check_model(*snake_case_ )
def _A( self ):
lowercase , lowercase =self.model_tester.prepare_config_and_inputs_for_common()
lowercase =3
lowercase =input_dict['''input_ids''']
lowercase =input_ids.ne(1 ).to(snake_case_ )
lowercase =ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowercase =OpenLlamaForSequenceClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
lowercase =model(snake_case_ , attention_mask=snake_case_ , labels=snake_case_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _A( self ):
lowercase , lowercase =self.model_tester.prepare_config_and_inputs_for_common()
lowercase =3
lowercase ='''single_label_classification'''
lowercase =input_dict['''input_ids''']
lowercase =input_ids.ne(1 ).to(snake_case_ )
lowercase =ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowercase =OpenLlamaForSequenceClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
lowercase =model(snake_case_ , attention_mask=snake_case_ , labels=snake_case_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _A( self ):
lowercase , lowercase =self.model_tester.prepare_config_and_inputs_for_common()
lowercase =3
lowercase ='''multi_label_classification'''
lowercase =input_dict['''input_ids''']
lowercase =input_ids.ne(1 ).to(snake_case_ )
lowercase =ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
lowercase =OpenLlamaForSequenceClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
lowercase =model(snake_case_ , attention_mask=snake_case_ , labels=snake_case_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('''Open-Llama buffers include complex numbers, which breaks this test''' )
def _A( self ):
pass
@parameterized.expand([('''linear''',), ('''dynamic''',)] )
def _A( self , snake_case_ ):
lowercase , lowercase =self.model_tester.prepare_config_and_inputs_for_common()
lowercase =ids_tensor([1, 10] , config.vocab_size )
lowercase =ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowercase =OpenLlamaModel(snake_case_ )
original_model.to(snake_case_ )
original_model.eval()
lowercase =original_model(snake_case_ ).last_hidden_state
lowercase =original_model(snake_case_ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowercase ={'''type''': scaling_type, '''factor''': 10.0}
lowercase =OpenLlamaModel(snake_case_ )
scaled_model.to(snake_case_ )
scaled_model.eval()
lowercase =scaled_model(snake_case_ ).last_hidden_state
lowercase =scaled_model(snake_case_ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(snake_case_ , snake_case_ , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(snake_case_ , snake_case_ , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(snake_case_ , snake_case_ , atol=1E-5 ) )
| 72 |
'''simple docstring'''
def UpperCamelCase ( lowercase_ : int ) -> int:
'''simple docstring'''
if n == 1 or not isinstance(lowercase_ , lowercase_ ):
return 0
elif n == 2:
return 1
else:
lowercase =[0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def UpperCamelCase ( lowercase_ : int ) -> int:
'''simple docstring'''
lowercase =0
lowercase =2
while digits < n:
index += 1
lowercase =len(str(fibonacci(lowercase_ ) ) )
return index
def UpperCamelCase ( lowercase_ : int = 1_0_0_0 ) -> int:
'''simple docstring'''
return fibonacci_digits_index(lowercase_ )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 72 | 1 |
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase__ ( __magic_name__ : float , __magic_name__ : float , __magic_name__ : float , ) -> tuple:
'''simple docstring'''
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError("""You cannot supply more or less than 2 values""" )
elif electron_conc < 0:
raise ValueError("""Electron concentration cannot be negative in a semiconductor""" )
elif hole_conc < 0:
raise ValueError("""Hole concentration cannot be negative in a semiconductor""" )
elif intrinsic_conc < 0:
raise ValueError(
"""Intrinsic concentration cannot be negative in a semiconductor""" )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 419 |
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self ):
snake_case__ : List[str] = tempfile.mkdtemp()
snake_case__ : Tuple = BlipImageProcessor()
snake_case__ : Dict = GPTaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-GPT2Model""" )
snake_case__ : Dict = BlipaProcessor(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
processor.save_pretrained(self.tmpdirname )
def __UpperCamelCase ( self , **__SCREAMING_SNAKE_CASE ):
return AutoProcessor.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE ).tokenizer
def __UpperCamelCase ( self , **__SCREAMING_SNAKE_CASE ):
return AutoProcessor.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE ).image_processor
def __UpperCamelCase ( self ):
shutil.rmtree(self.tmpdirname )
def __UpperCamelCase ( self ):
snake_case__ : int = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
snake_case__ : Union[str, Any] = [Image.fromarray(np.moveaxis(__SCREAMING_SNAKE_CASE , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __UpperCamelCase ( self ):
snake_case__ : List[Any] = BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
snake_case__ : Optional[Any] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
snake_case__ : Optional[int] = self.get_image_processor(do_normalize=__SCREAMING_SNAKE_CASE , padding_value=1.0 )
snake_case__ : Any = BlipaProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__SCREAMING_SNAKE_CASE , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __SCREAMING_SNAKE_CASE )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : List[Any] = self.get_image_processor()
snake_case__ : Optional[int] = self.get_tokenizer()
snake_case__ : Optional[Any] = BlipaProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
snake_case__ : str = self.prepare_image_inputs()
snake_case__ : Optional[int] = image_processor(__SCREAMING_SNAKE_CASE , return_tensors="""np""" )
snake_case__ : int = processor(images=__SCREAMING_SNAKE_CASE , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __UpperCamelCase ( self ):
snake_case__ : Union[str, Any] = self.get_image_processor()
snake_case__ : int = self.get_tokenizer()
snake_case__ : List[Any] = BlipaProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = """lower newer"""
snake_case__ : List[Any] = processor(text=__SCREAMING_SNAKE_CASE )
snake_case__ : int = tokenizer(__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __UpperCamelCase ( self ):
snake_case__ : str = self.get_image_processor()
snake_case__ : int = self.get_tokenizer()
snake_case__ : List[str] = BlipaProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = """lower newer"""
snake_case__ : Optional[int] = self.prepare_image_inputs()
snake_case__ : Tuple = processor(text=__SCREAMING_SNAKE_CASE , images=__SCREAMING_SNAKE_CASE )
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] )
# test if it raises when no input is passed
with pytest.raises(__SCREAMING_SNAKE_CASE ):
processor()
def __UpperCamelCase ( self ):
snake_case__ : Union[str, Any] = self.get_image_processor()
snake_case__ : Optional[Any] = self.get_tokenizer()
snake_case__ : int = BlipaProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
snake_case__ : List[Any] = processor.batch_decode(__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = tokenizer.batch_decode(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : List[Any] = self.get_image_processor()
snake_case__ : List[Any] = self.get_tokenizer()
snake_case__ : List[Any] = BlipaProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] = """lower newer"""
snake_case__ : List[Any] = self.prepare_image_inputs()
snake_case__ : Optional[Any] = processor(text=__SCREAMING_SNAKE_CASE , images=__SCREAMING_SNAKE_CASE )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] )
| 419 | 1 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
| 533 |
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
UpperCAmelCase__ : Union[str, Any] = logging.getLogger(__name__)
class __lowercase ( lowerCamelCase__ ):
def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_=None) -> List[Any]:
super().__init__(
lowercase_ , question_encoder_tokenizer=lowercase_ , generator_tokenizer=lowercase_ , index=lowercase_ , init_retrieval=lowercase_ , )
__snake_case = None
def _a ( self , lowercase_) -> Union[str, Any]:
logger.info('initializing retrieval')
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info('dist initialized')
# needs to be set manually
__snake_case = self._infer_socket_ifname()
# avoid clash with the NCCL port
__snake_case = str(distributed_port + 1)
__snake_case = dist.new_group(ranks=lowercase_ , backend='gloo')
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info('dist not initialized / main')
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group)
def _a ( self) -> int:
return dist.get_rank(group=self.process_group) == 0
def _a ( self , lowercase_ , lowercase_ , lowercase_=torch.floataa) -> Dict:
__snake_case = torch.empty(lowercase_ , dtype=lowercase_)
dist.scatter(lowercase_ , src=0 , scatter_list=lowercase_ , group=self.process_group)
return target_tensor
def _a ( self) -> str:
__snake_case = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
__snake_case = next((addr for addr in addrs if addr.startswith('e')) , lowercase_)
return ifname
def _a ( self , lowercase_ , lowercase_) -> Tuple[np.ndarray, List[dict]]:
# single GPU training
if not dist.is_initialized():
__snake_case , __snake_case = self._main_retrieve(lowercase_ , lowercase_)
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(lowercase_)
# distributed training
__snake_case = dist.get_world_size(group=self.process_group)
# gather logic
__snake_case = None
if self._is_main():
__snake_case = [torch.empty(question_hidden_states.shape , dtype=torch.floataa) for _ in range(lowercase_)]
dist.gather(torch.tensor(lowercase_) , dst=0 , gather_list=lowercase_ , group=self.process_group)
# scatter logic
__snake_case = question_hidden_states.shape[0]
__snake_case = []
__snake_case = []
if self._is_main():
assert len(lowercase_) == world_size
__snake_case , __snake_case = self._main_retrieve(torch.cat(lowercase_).numpy() , lowercase_)
__snake_case , __snake_case = torch.tensor(lowercase_), torch.tensor(lowercase_)
__snake_case = self._chunk_tensor(lowercase_ , lowercase_)
__snake_case = self._chunk_tensor(lowercase_ , lowercase_)
__snake_case = self._scattered(lowercase_ , [n_queries, n_docs] , target_type=torch.intaa)
__snake_case = self._scattered(lowercase_ , [n_queries, n_docs, question_hidden_states.shape[1]])
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(lowercase_)
| 313 | 0 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCamelCase = logging.get_logger(__name__)
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__=False ) -> Tuple:
A_ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''deit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''deit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''deit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''deit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''deit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''deit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''deit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''deit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''deit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''deit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
("""cls_token""", """deit.embeddings.cls_token"""),
("""dist_token""", """deit.embeddings.distillation_token"""),
("""patch_embed.proj.weight""", """deit.embeddings.patch_embeddings.projection.weight"""),
("""patch_embed.proj.bias""", """deit.embeddings.patch_embeddings.projection.bias"""),
("""pos_embed""", """deit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
("""pre_logits.fc.weight""", """pooler.dense.weight"""),
("""pre_logits.fc.bias""", """pooler.dense.bias"""),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
A_ = [(pair[0], pair[1][4:]) if pair[1].startswith("""deit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
("""norm.weight""", """deit.layernorm.weight"""),
("""norm.bias""", """deit.layernorm.bias"""),
("""head.weight""", """cls_classifier.weight"""),
("""head.bias""", """cls_classifier.bias"""),
("""head_dist.weight""", """distillation_classifier.weight"""),
("""head_dist.bias""", """distillation_classifier.bias"""),
] )
return rename_keys
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__=False ) -> int:
for i in range(config.num_hidden_layers ):
if base_model:
A_ = """"""
else:
A_ = """deit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A_ = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' )
A_ = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
A_ = in_proj_weight[
: config.hidden_size, :
]
A_ = in_proj_bias[: config.hidden_size]
A_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A_ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A_ = in_proj_weight[
-config.hidden_size :, :
]
A_ = in_proj_bias[-config.hidden_size :]
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> Any:
A_ = dct.pop(UpperCAmelCase__ )
A_ = val
def UpperCAmelCase__ ( ) -> Optional[Any]:
A_ = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A_ = Image.open(requests.get(UpperCAmelCase__, stream=UpperCAmelCase__ ).raw )
return im
@torch.no_grad()
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> str:
A_ = DeiTConfig()
# all deit models have fine-tuned heads
A_ = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
A_ = 10_00
A_ = """huggingface/label-files"""
A_ = """imagenet-1k-id2label.json"""
A_ = json.load(open(hf_hub_download(UpperCAmelCase__, UpperCAmelCase__, repo_type="""dataset""" ), """r""" ) )
A_ = {int(UpperCAmelCase__ ): v for k, v in idalabel.items()}
A_ = idalabel
A_ = {v: k for k, v in idalabel.items()}
A_ = int(deit_name[-6:-4] )
A_ = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith("""tiny""" ):
A_ = 1_92
A_ = 7_68
A_ = 12
A_ = 3
elif deit_name[9:].startswith("""small""" ):
A_ = 3_84
A_ = 15_36
A_ = 12
A_ = 6
if deit_name[9:].startswith("""base""" ):
pass
elif deit_name[4:].startswith("""large""" ):
A_ = 10_24
A_ = 40_96
A_ = 24
A_ = 16
# load original model from timm
A_ = timm.create_model(UpperCAmelCase__, pretrained=UpperCAmelCase__ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
A_ = timm_model.state_dict()
A_ = create_rename_keys(UpperCAmelCase__, UpperCAmelCase__ )
for src, dest in rename_keys:
rename_key(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
read_in_q_k_v(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
# load HuggingFace model
A_ = DeiTForImageClassificationWithTeacher(UpperCAmelCase__ ).eval()
model.load_state_dict(UpperCAmelCase__ )
# Check outputs on an image, prepared by DeiTImageProcessor
A_ = int(
(2_56 / 2_24) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
A_ = DeiTImageProcessor(size=UpperCAmelCase__, crop_size=config.image_size )
A_ = image_processor(images=prepare_img(), return_tensors="""pt""" )
A_ = encoding["""pixel_values"""]
A_ = model(UpperCAmelCase__ )
A_ = timm_model(UpperCAmelCase__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(UpperCAmelCase__, outputs.logits, atol=1e-3 )
Path(UpperCAmelCase__ ).mkdir(exist_ok=UpperCAmelCase__ )
print(F'''Saving model {deit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCAmelCase__ )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(UpperCAmelCase__ )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--deit_name''',
default='''vit_deit_base_distilled_patch16_224''',
type=str,
help='''Name of the DeiT timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
__lowerCamelCase = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 715 |
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> List[Any]:
# Load configuration defined in the metadata file
with open(UpperCAmelCase__ ) as metadata_file:
A_ = json.load(UpperCAmelCase__ )
A_ = LukeConfig(use_entity_aware_attention=UpperCAmelCase__, **metadata["""model_config"""] )
# Load in the weights from the checkpoint_path
A_ = torch.load(UpperCAmelCase__, map_location="""cpu""" )["""module"""]
# Load the entity vocab file
A_ = load_original_entity_vocab(UpperCAmelCase__ )
# add an entry for [MASK2]
A_ = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
A_ = XLMRobertaTokenizer.from_pretrained(metadata["""model_config"""]["""bert_model_name"""] )
# Add special tokens to the token vocabulary for downstream tasks
A_ = AddedToken("""<ent>""", lstrip=UpperCAmelCase__, rstrip=UpperCAmelCase__ )
A_ = AddedToken("""<ent2>""", lstrip=UpperCAmelCase__, rstrip=UpperCAmelCase__ )
tokenizer.add_special_tokens({"""additional_special_tokens""": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F'''Saving tokenizer to {pytorch_dump_folder_path}''' )
tokenizer.save_pretrained(UpperCAmelCase__ )
with open(os.path.join(UpperCAmelCase__, """tokenizer_config.json""" ), """r""" ) as f:
A_ = json.load(UpperCAmelCase__ )
A_ = """MLukeTokenizer"""
with open(os.path.join(UpperCAmelCase__, """tokenizer_config.json""" ), """w""" ) as f:
json.dump(UpperCAmelCase__, UpperCAmelCase__ )
with open(os.path.join(UpperCAmelCase__, MLukeTokenizer.vocab_files_names["""entity_vocab_file"""] ), """w""" ) as f:
json.dump(UpperCAmelCase__, UpperCAmelCase__ )
A_ = MLukeTokenizer.from_pretrained(UpperCAmelCase__ )
# Initialize the embeddings of the special tokens
A_ = tokenizer.convert_tokens_to_ids(["""@"""] )[0]
A_ = tokenizer.convert_tokens_to_ids(["""#"""] )[0]
A_ = state_dict["""embeddings.word_embeddings.weight"""]
A_ = word_emb[ent_init_index].unsqueeze(0 )
A_ = word_emb[enta_init_index].unsqueeze(0 )
A_ = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
A_ = state_dict[bias_name]
A_ = decoder_bias[ent_init_index].unsqueeze(0 )
A_ = decoder_bias[enta_init_index].unsqueeze(0 )
A_ = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
A_ = F'''encoder.layer.{layer_index}.attention.self.'''
A_ = state_dict[prefix + matrix_name]
A_ = state_dict[prefix + matrix_name]
A_ = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
A_ = state_dict["""entity_embeddings.entity_embeddings.weight"""]
A_ = entity_emb[entity_vocab["""[MASK]"""]].unsqueeze(0 )
A_ = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
A_ = state_dict["""entity_predictions.bias"""]
A_ = entity_prediction_bias[entity_vocab["""[MASK]"""]].unsqueeze(0 )
A_ = torch.cat([entity_prediction_bias, entity_mask_bias] )
A_ = LukeForMaskedLM(config=UpperCAmelCase__ ).eval()
state_dict.pop("""entity_predictions.decoder.weight""" )
state_dict.pop("""lm_head.decoder.weight""" )
state_dict.pop("""lm_head.decoder.bias""" )
A_ = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith("""lm_head""" ) or key.startswith("""entity_predictions""" )):
A_ = state_dict[key]
else:
A_ = state_dict[key]
A_ , A_ = model.load_state_dict(UpperCAmelCase__, strict=UpperCAmelCase__ )
if set(UpperCAmelCase__ ) != {"luke.embeddings.position_ids"}:
raise ValueError(F'''Unexpected unexpected_keys: {unexpected_keys}''' )
if set(UpperCAmelCase__ ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F'''Unexpected missing_keys: {missing_keys}''' )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
A_ = MLukeTokenizer.from_pretrained(UpperCAmelCase__, task="""entity_classification""" )
A_ = """ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)."""
A_ = (0, 9)
A_ = tokenizer(UpperCAmelCase__, entity_spans=[span], return_tensors="""pt""" )
A_ = model(**UpperCAmelCase__ )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
A_ = torch.Size((1, 33, 7_68) )
A_ = torch.tensor([[0.0_892, 0.0_596, -0.2_819], [0.0_134, 0.1_199, 0.0_573], [-0.0_169, 0.0_927, 0.0_644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3], UpperCAmelCase__, atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
A_ = torch.Size((1, 1, 7_68) )
A_ = torch.tensor([[-0.1_482, 0.0_609, 0.0_322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'''
F''' {expected_shape}''' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3], UpperCAmelCase__, atol=1e-4 ):
raise ValueError
# Verify masked word/entity prediction
A_ = MLukeTokenizer.from_pretrained(UpperCAmelCase__ )
A_ = """Tokyo is the capital of <mask>."""
A_ = (24, 30)
A_ = tokenizer(UpperCAmelCase__, entity_spans=[span], return_tensors="""pt""" )
A_ = model(**UpperCAmelCase__ )
A_ = encoding["""input_ids"""][0].tolist()
A_ = input_ids.index(tokenizer.convert_tokens_to_ids("""<mask>""" ) )
A_ = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(UpperCAmelCase__ )
A_ = outputs.entity_logits[0][0].argmax().item()
A_ = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith("""en:""" )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print("""Saving PyTorch model to {}""".format(UpperCAmelCase__ ) )
model.save_pretrained(UpperCAmelCase__ )
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
A_ = ["""[MASK]""", """[PAD]""", """[UNK]"""]
A_ = [json.loads(UpperCAmelCase__ ) for line in open(UpperCAmelCase__ )]
A_ = {}
for entry in data:
A_ = entry["""id"""]
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
A_ = entity_id
break
A_ = F'''{language}:{entity_name}'''
A_ = entity_id
return new_mapping
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Path to a pytorch_model.bin file.''')
parser.add_argument(
'''--metadata_path''', default=None, type=str, help='''Path to a metadata.json file, defining the configuration.'''
)
parser.add_argument(
'''--entity_vocab_path''',
default=None,
type=str,
help='''Path to an entity_vocab.tsv file, containing the entity vocabulary.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to where to dump the output PyTorch model.'''
)
parser.add_argument(
'''--model_size''', default='''base''', type=str, choices=['''base''', '''large'''], help='''Size of the model to be converted.'''
)
__lowerCamelCase = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 667 | 0 |
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def a ( lowerCamelCase_ ):
'''simple docstring'''
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4E_00 and cp <= 0X9F_FF)
or (cp >= 0X34_00 and cp <= 0X4D_BF) #
or (cp >= 0X2_00_00 and cp <= 0X2_A6_DF) #
or (cp >= 0X2_A7_00 and cp <= 0X2_B7_3F) #
or (cp >= 0X2_B7_40 and cp <= 0X2_B8_1F) #
or (cp >= 0X2_B8_20 and cp <= 0X2_CE_AF) #
or (cp >= 0XF9_00 and cp <= 0XFA_FF)
or (cp >= 0X2_F8_00 and cp <= 0X2_FA_1F) #
): #
return True
return False
def a ( lowerCamelCase_ ):
'''simple docstring'''
# word like '180' or '身高' or '神'
for char in word:
lowercase__ = ord(lowerCamelCase_ )
if not _is_chinese_char(lowerCamelCase_ ):
return 0
return 1
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = set()
for token in tokens:
lowercase__ = len(lowerCamelCase_ ) > 1 and is_chinese(lowerCamelCase_ )
if chinese_word:
word_set.add(lowerCamelCase_ )
lowercase__ = list(lowerCamelCase_ )
return word_list
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
if not chinese_word_set:
return bert_tokens
lowercase__ = max([len(lowerCamelCase_ ) for w in chinese_word_set] )
lowercase__ = bert_tokens
lowercase__ , lowercase__ = 0, len(lowerCamelCase_ )
while start < end:
lowercase__ = True
if is_chinese(bert_word[start] ):
lowercase__ = min(end - start , lowerCamelCase_ )
for i in range(lowerCamelCase_ , 1 , -1 ):
lowercase__ = ''''''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
lowercase__ = '''##''' + bert_word[j]
lowercase__ = start + i
lowercase__ = False
break
if single_word:
start += 1
return bert_word
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = []
for i in range(0 , len(lowerCamelCase_ ) , 100 ):
lowercase__ = ltp_tokenizer.pipeline(lines[i : i + 100] , tasks=['''cws'''] ).cws
lowercase__ = [get_chinese_word(lowerCamelCase_ ) for r in res]
ltp_res.extend(lowerCamelCase_ )
assert len(lowerCamelCase_ ) == len(lowerCamelCase_ )
lowercase__ = []
for i in range(0 , len(lowerCamelCase_ ) , 100 ):
lowercase__ = bert_tokenizer(lines[i : i + 100] , add_special_tokens=lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=512 )
bert_res.extend(res['''input_ids'''] )
assert len(lowerCamelCase_ ) == len(lowerCamelCase_ )
lowercase__ = []
for input_ids, chinese_word in zip(lowerCamelCase_ , lowerCamelCase_ ):
lowercase__ = []
for id in input_ids:
lowercase__ = bert_tokenizer._convert_id_to_token(lowerCamelCase_ )
input_tokens.append(lowerCamelCase_ )
lowercase__ = add_sub_symbol(lowerCamelCase_ , lowerCamelCase_ )
lowercase__ = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(lowerCamelCase_ ):
if token[:2] == "##":
lowercase__ = token[2:]
# save chinese tokens' pos
if len(lowerCamelCase_ ) == 1 and _is_chinese_char(ord(lowerCamelCase_ ) ):
ref_id.append(lowerCamelCase_ )
ref_ids.append(lowerCamelCase_ )
assert len(lowerCamelCase_ ) == len(lowerCamelCase_ )
return ref_ids
def a ( lowerCamelCase_ ):
'''simple docstring'''
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name , '''r''' , encoding='''utf-8''' ) as f:
lowercase__ = f.readlines()
lowercase__ = [line.strip() for line in data if len(lowerCamelCase_ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
lowercase__ = LTP(args.ltp ) # faster in GPU device
lowercase__ = BertTokenizer.from_pretrained(args.bert )
lowercase__ = prepare_ref(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
with open(args.save_path , '''w''' , encoding='''utf-8''' ) as f:
lowercase__ = [json.dumps(lowerCamelCase_ ) + '''\n''' for ref in ref_ids]
f.writelines(lowerCamelCase_ )
if __name__ == "__main__":
A__ : Any = argparse.ArgumentParser(description='prepare_chinese_ref')
parser.add_argument(
'--file_name',
required=False,
type=str,
default='./resources/chinese-demo.txt',
help='file need process, same as training data in lm',
)
parser.add_argument(
'--ltp',
required=False,
type=str,
default='./resources/ltp',
help='resources for LTP tokenizer, usually a path',
)
parser.add_argument(
'--bert',
required=False,
type=str,
default='./resources/robert',
help='resources for Bert tokenizer',
)
parser.add_argument(
'--save_path',
required=False,
type=str,
default='./resources/ref.txt',
help='path to save res',
)
A__ : Optional[Any] = parser.parse_args()
main(args)
| 183 |
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
@slow
@require_torch
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase__ = EncoderDecoderModel.from_encoder_decoder_pretrained('''prajjwal1/bert-tiny''', '''prajjwal1/bert-tiny''' )
lowercase__ = BertTokenizer.from_pretrained('''bert-base-uncased''' )
lowercase__ = bertabert.config.encoder.vocab_size
lowercase__ = tokenizer.sep_token_id
lowercase__ = tokenizer.cls_token_id
lowercase__ = 128
lowercase__ = datasets.load_dataset('''cnn_dailymail''', '''3.0.0''', split='''train[:1%]''' )
lowercase__ = datasets.load_dataset('''cnn_dailymail''', '''3.0.0''', split='''validation[:1%]''' )
lowercase__ = train_dataset.select(range(32 ) )
lowercase__ = val_dataset.select(range(16 ) )
lowercase__ = 4
def _map_to_encoder_decoder_inputs(lowerCamelCase : str ):
# Tokenizer will automatically set [BOS] <text> [EOS]
lowercase__ = tokenizer(batch['''article'''], padding='''max_length''', truncation=lowerCamelCase, max_length=512 )
lowercase__ = tokenizer(batch['''highlights'''], padding='''max_length''', truncation=lowerCamelCase, max_length=128 )
lowercase__ = inputs.input_ids
lowercase__ = inputs.attention_mask
lowercase__ = outputs.input_ids
lowercase__ = outputs.input_ids.copy()
lowercase__ = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['''labels''']
]
lowercase__ = outputs.attention_mask
assert all(len(lowerCamelCase ) == 512 for x in inputs.input_ids )
assert all(len(lowerCamelCase ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(lowerCamelCase : Any ):
lowercase__ = pred.label_ids
lowercase__ = pred.predictions
# all unnecessary tokens are removed
lowercase__ = tokenizer.batch_decode(lowerCamelCase, skip_special_tokens=lowerCamelCase )
lowercase__ = tokenizer.batch_decode(lowerCamelCase, skip_special_tokens=lowerCamelCase )
lowercase__ = sum([int(pred_str[i] == label_str[i] ) for i in range(len(lowerCamelCase ) )] ) / len(lowerCamelCase )
return {"accuracy": accuracy}
# map train dataset
lowercase__ = train_dataset.map(
_map_to_encoder_decoder_inputs, batched=lowerCamelCase, batch_size=lowerCamelCase, remove_columns=['''article''', '''highlights'''], )
train_dataset.set_format(
type='''torch''', columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''], )
# same for validation dataset
lowercase__ = val_dataset.map(
_map_to_encoder_decoder_inputs, batched=lowerCamelCase, batch_size=lowerCamelCase, remove_columns=['''article''', '''highlights'''], )
val_dataset.set_format(
type='''torch''', columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''], )
lowercase__ = self.get_auto_remove_tmp_dir()
lowercase__ = SeqaSeqTrainingArguments(
output_dir=lowerCamelCase, per_device_train_batch_size=lowerCamelCase, per_device_eval_batch_size=lowerCamelCase, predict_with_generate=lowerCamelCase, evaluation_strategy='''steps''', do_train=lowerCamelCase, do_eval=lowerCamelCase, warmup_steps=0, eval_steps=2, logging_steps=2, )
# instantiate trainer
lowercase__ = SeqaSeqTrainer(
model=lowerCamelCase, args=lowerCamelCase, compute_metrics=_compute_metrics, train_dataset=lowerCamelCase, eval_dataset=lowerCamelCase, tokenizer=lowerCamelCase, )
# start training
trainer.train()
| 183 | 1 |
def UpperCAmelCase ( _lowerCamelCase : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def UpperCAmelCase ( _lowerCamelCase : int = 100 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = 1
SCREAMING_SNAKE_CASE__ : Dict = 2
for i in range(2 , max_n + 1 ):
SCREAMING_SNAKE_CASE__ : Optional[int] = pre_numerator
SCREAMING_SNAKE_CASE__ : List[Any] = 2 * i // 3 if i % 3 == 0 else 1
SCREAMING_SNAKE_CASE__ : str = cur_numerator
SCREAMING_SNAKE_CASE__ : Any = e_cont * pre_numerator + temp
return sum_digits(_lowerCamelCase )
if __name__ == "__main__":
print(f"{solution() = }") | 26 |
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
__lowercase :str = ["\nclass", "\ndef", "\n#", "\n@", "\nprint", "\nif"]
class _a ( lowercase__ ):
"""simple docstring"""
def __init__( self : List[str] , a : Optional[int] , a : str , a : int=None , a : Optional[Any]=1 ) ->Optional[Any]:
SCREAMING_SNAKE_CASE__ : Dict = tokenizer
SCREAMING_SNAKE_CASE__ : Optional[int] = dataset
SCREAMING_SNAKE_CASE__ : Optional[Any] = len(a ) if n_tasks is None else n_tasks
SCREAMING_SNAKE_CASE__ : Dict = n_copies
def __iter__( self : str ) ->Tuple:
SCREAMING_SNAKE_CASE__ : str = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]["prompt"].strip() )
SCREAMING_SNAKE_CASE__ : int = self.tokenizer(a , padding=a , return_tensors="pt" )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class _a ( lowercase__ ):
"""simple docstring"""
def __init__( self : Dict , a : int , a : int , a : Tuple ) ->Dict:
SCREAMING_SNAKE_CASE__ : Dict = start_length
SCREAMING_SNAKE_CASE__ : Any = eof_strings
SCREAMING_SNAKE_CASE__ : Any = tokenizer
def __call__( self : Any , a : Optional[int] , a : int , **a : Union[str, Any] ) ->List[str]:
SCREAMING_SNAKE_CASE__ : Dict = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
SCREAMING_SNAKE_CASE__ : int = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(a )
def UpperCAmelCase ( _lowerCamelCase : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = re.split("(%s)" % "|".join(_lowerCamelCase ) , _lowerCamelCase )
# last string should be ""
return "".join(string_list[:-2] )
def UpperCAmelCase ( _lowerCamelCase : Dict , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Tuple , _lowerCamelCase : str , _lowerCamelCase : int , _lowerCamelCase : str=20 , **_lowerCamelCase : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = defaultdict(_lowerCamelCase ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(_lowerCamelCase ) ):
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : str = batch["ids"].shape[-1]
SCREAMING_SNAKE_CASE__ : List[Any] = accelerator.unwrap_model(_lowerCamelCase ).generate(
input_ids=batch["ids"][:, : batch["input_len"]] , num_return_sequences=_lowerCamelCase , **_lowerCamelCase )
# each task is generated batch_size times
SCREAMING_SNAKE_CASE__ : Dict = batch["task_id"].repeat(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Dict = accelerator.pad_across_processes(
_lowerCamelCase , dim=1 , pad_index=tokenizer.pad_token_id )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Any = accelerator.gather((generated_tokens, generated_tasks) )
SCREAMING_SNAKE_CASE__ : Dict = generated_tokens.cpu().numpy()
SCREAMING_SNAKE_CASE__ : Any = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(_lowerCamelCase , _lowerCamelCase ):
gen_token_dict[task].append(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = [[] for _ in range(_lowerCamelCase )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
SCREAMING_SNAKE_CASE__ : List[Any] = tokenizer.decode(_lowerCamelCase , skip_special_tokens=_lowerCamelCase , clean_up_tokenization_spaces=_lowerCamelCase )
code_gens[task].append(remove_last_block(_lowerCamelCase ) )
return code_gens
def UpperCAmelCase ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = HfArgumentParser(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
SCREAMING_SNAKE_CASE__ : List[str] = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
SCREAMING_SNAKE_CASE__ : str = "false"
if args.num_workers is None:
SCREAMING_SNAKE_CASE__ : Dict = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
SCREAMING_SNAKE_CASE__ : Dict = Accelerator()
set_seed(args.seed , device_specific=_lowerCamelCase )
# Load model and tokenizer
SCREAMING_SNAKE_CASE__ : Any = AutoTokenizer.from_pretrained(args.model_ckpt )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tokenizer.eos_token
SCREAMING_SNAKE_CASE__ : List[str] = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
SCREAMING_SNAKE_CASE__ : List[Any] = {
"do_sample": args.do_sample,
"temperature": args.temperature,
"max_new_tokens": args.max_new_tokens,
"top_p": args.top_p,
"top_k": args.top_k,
"stopping_criteria": StoppingCriteriaList([EndOfFunctionCriteria(0 , _lowerCamelCase , _lowerCamelCase )] ),
}
# Load evaluation dataset and metric
SCREAMING_SNAKE_CASE__ : str = load_dataset("openai_humaneval" )
SCREAMING_SNAKE_CASE__ : Any = load_metric("code_eval" )
SCREAMING_SNAKE_CASE__ : Dict = args.num_tasks if args.num_tasks is not None else len(human_eval["test"] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = args.n_samples // args.batch_size
SCREAMING_SNAKE_CASE__ : Dict = TokenizedDataset(_lowerCamelCase , human_eval["test"] , n_copies=_lowerCamelCase , n_tasks=_lowerCamelCase )
# do not confuse args.batch_size, which is actually the num_return_sequences
SCREAMING_SNAKE_CASE__ : Optional[int] = DataLoader(_lowerCamelCase , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
SCREAMING_SNAKE_CASE__ : int = code_eval_metric.compute(references=[""] , predictions=[[""]] )
except ValueError as exception:
print(
"Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL=\"1\"`"
" flag to enable code evaluation." )
raise exception
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[int] = accelerator.prepare(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Tuple = complete_code(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , n_tasks=_lowerCamelCase , batch_size=args.batch_size , **_lowerCamelCase , )
if accelerator.is_main_process:
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
for task in tqdm(range(_lowerCamelCase ) ):
SCREAMING_SNAKE_CASE__ : List[Any] = human_eval["test"][task]["test"]
SCREAMING_SNAKE_CASE__ : List[Any] = f"""check({human_eval['test'][task]['entry_point']})"""
references.append("\n" + test_func + "\n" + entry_point )
# Evaluate completions with "code_eval" metric
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Dict = code_eval_metric.compute(
references=_lowerCamelCase , predictions=_lowerCamelCase , num_workers=args.num_workers )
print(f"""Results: {pass_at_k}""" )
# Save results to json file
with open(args.output_file , "w" ) as fp:
json.dump(_lowerCamelCase , _lowerCamelCase )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main() | 26 | 1 |
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = F'''{sampling_rate}'''
SCREAMING_SNAKE_CASE : Any = "1"
SCREAMING_SNAKE_CASE : str = "f32le"
SCREAMING_SNAKE_CASE : Any = [
"ffmpeg",
"-i",
"pipe:0",
"-ac",
ac,
"-ar",
ar,
"-f",
format_for_conversion,
"-hide_banner",
"-loglevel",
"quiet",
"pipe:1",
]
try:
with subprocess.Popen(lowercase , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
SCREAMING_SNAKE_CASE : List[Any] = ffmpeg_process.communicate(lowercase )
except FileNotFoundError as error:
raise ValueError("ffmpeg was not found but is required to load audio files from filename" ) from error
SCREAMING_SNAKE_CASE : Dict = output_stream[0]
SCREAMING_SNAKE_CASE : List[Any] = np.frombuffer(lowercase , np.floataa )
if audio.shape[0] == 0:
raise ValueError("Malformed soundfile" )
return audio
def lowerCamelCase__ ( lowercase , lowercase , lowercase = "f32le" , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = F'''{sampling_rate}'''
SCREAMING_SNAKE_CASE : List[str] = "1"
if format_for_conversion == "s16le":
SCREAMING_SNAKE_CASE : int = 2
elif format_for_conversion == "f32le":
SCREAMING_SNAKE_CASE : int = 4
else:
raise ValueError(F'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
SCREAMING_SNAKE_CASE : int = platform.system()
if system == "Linux":
SCREAMING_SNAKE_CASE : Dict = "alsa"
SCREAMING_SNAKE_CASE : List[str] = "default"
elif system == "Darwin":
SCREAMING_SNAKE_CASE : List[Any] = "avfoundation"
SCREAMING_SNAKE_CASE : Tuple = ":0"
elif system == "Windows":
SCREAMING_SNAKE_CASE : Optional[Any] = "dshow"
SCREAMING_SNAKE_CASE : Dict = "default"
SCREAMING_SNAKE_CASE : Dict = [
"ffmpeg",
"-f",
format_,
"-i",
input_,
"-ac",
ac,
"-ar",
ar,
"-f",
format_for_conversion,
"-fflags",
"nobuffer",
"-hide_banner",
"-loglevel",
"quiet",
"pipe:1",
]
SCREAMING_SNAKE_CASE : List[str] = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
SCREAMING_SNAKE_CASE : Any = _ffmpeg_stream(lowercase , lowercase )
for item in iterator:
yield item
def lowerCamelCase__ ( lowercase , lowercase , lowercase = None , lowercase = None , lowercase = "f32le" , ):
"""simple docstring"""
if stream_chunk_s is not None:
SCREAMING_SNAKE_CASE : List[Any] = stream_chunk_s
else:
SCREAMING_SNAKE_CASE : Dict = chunk_length_s
SCREAMING_SNAKE_CASE : Tuple = ffmpeg_microphone(lowercase , lowercase , format_for_conversion=lowercase )
if format_for_conversion == "s16le":
SCREAMING_SNAKE_CASE : Union[str, Any] = np.intaa
SCREAMING_SNAKE_CASE : str = 2
elif format_for_conversion == "f32le":
SCREAMING_SNAKE_CASE : Optional[int] = np.floataa
SCREAMING_SNAKE_CASE : Any = 4
else:
raise ValueError(F'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
if stride_length_s is None:
SCREAMING_SNAKE_CASE : List[str] = chunk_length_s / 6
SCREAMING_SNAKE_CASE : str = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(lowercase , (int, float) ):
SCREAMING_SNAKE_CASE : Dict = [stride_length_s, stride_length_s]
SCREAMING_SNAKE_CASE : Union[str, Any] = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
SCREAMING_SNAKE_CASE : List[Any] = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
SCREAMING_SNAKE_CASE : int = datetime.datetime.now()
SCREAMING_SNAKE_CASE : Dict = datetime.timedelta(seconds=lowercase )
for item in chunk_bytes_iter(lowercase , lowercase , stride=(stride_left, stride_right) , stream=lowercase ):
# Put everything back in numpy scale
SCREAMING_SNAKE_CASE : int = np.frombuffer(item["raw"] , dtype=lowercase )
SCREAMING_SNAKE_CASE : Dict = (
item["stride"][0] // size_of_sample,
item["stride"][1] // size_of_sample,
)
SCREAMING_SNAKE_CASE : Optional[Any] = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def lowerCamelCase__ ( lowercase , lowercase , lowercase , lowercase = False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = B""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
F'''Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}''' )
SCREAMING_SNAKE_CASE : Optional[Any] = 0
for raw in iterator:
acc += raw
if stream and len(lowercase ) < chunk_len:
SCREAMING_SNAKE_CASE : Any = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(lowercase ) >= chunk_len:
# We are flushing the accumulator
SCREAMING_SNAKE_CASE : int = (_stride_left, stride_right)
SCREAMING_SNAKE_CASE : Any = {"raw": acc[:chunk_len], "stride": stride}
if stream:
SCREAMING_SNAKE_CASE : List[Any] = False
yield item
SCREAMING_SNAKE_CASE : int = stride_left
SCREAMING_SNAKE_CASE : str = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(lowercase ) > stride_left:
SCREAMING_SNAKE_CASE : Union[str, Any] = {"raw": acc, "stride": (_stride_left, 0)}
if stream:
SCREAMING_SNAKE_CASE : Union[str, Any] = False
yield item
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = 2**24 # 16Mo
try:
with subprocess.Popen(lowercase , stdout=subprocess.PIPE , bufsize=lowercase ) as ffmpeg_process:
while True:
SCREAMING_SNAKE_CASE : Optional[int] = ffmpeg_process.stdout.read(lowercase )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError("ffmpeg was not found but is required to stream audio files from filename" ) from error
| 62 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A : Union[str, Any] = {
"""configuration_m2m_100""": ["""M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP""", """M2M100Config""", """M2M100OnnxConfig"""],
"""tokenization_m2m_100""": ["""M2M100Tokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Optional[Any] = [
"""M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""M2M100ForConditionalGeneration""",
"""M2M100Model""",
"""M2M100PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
A : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 349 | 0 |
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
A = ['small', 'medium', 'large']
A = 'lm_head.decoder.weight'
A = 'lm_head.weight'
def a(lowercase__ , lowercase__ ):
'''simple docstring'''
snake_case_ = torch.load(lowercase__ )
snake_case_ = d.pop(lowercase__ )
os.makedirs(lowercase__ , exist_ok=lowercase__ )
torch.save(lowercase__ , os.path.join(lowercase__ , lowercase__ ) )
if __name__ == "__main__":
A = argparse.ArgumentParser()
parser.add_argument('--dialogpt_path', default='.', type=str)
A = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
A = os.path.join(args.dialogpt_path, f"""{MODEL}_ft.pkl""")
A = f"""./DialoGPT-{MODEL}"""
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 46 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {
'microsoft/unispeech-sat-base-100h-libri-ft': (
'https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json'
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class SCREAMING_SNAKE_CASE ( __snake_case ):
"""simple docstring"""
__A = """unispeech-sat"""
def __init__( self , __UpperCamelCase=32 , __UpperCamelCase=7_68 , __UpperCamelCase=12 , __UpperCamelCase=12 , __UpperCamelCase=30_72 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=0.0 , __UpperCamelCase=0.0 , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=0.02 , __UpperCamelCase=1E-5 , __UpperCamelCase="group" , __UpperCamelCase="gelu" , __UpperCamelCase=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , __UpperCamelCase=(5, 2, 2, 2, 2, 2, 2) , __UpperCamelCase=(10, 3, 3, 3, 3, 2, 2) , __UpperCamelCase=False , __UpperCamelCase=1_28 , __UpperCamelCase=16 , __UpperCamelCase=False , __UpperCamelCase=True , __UpperCamelCase=0.05 , __UpperCamelCase=10 , __UpperCamelCase=2 , __UpperCamelCase=0.0 , __UpperCamelCase=10 , __UpperCamelCase=0 , __UpperCamelCase=3_20 , __UpperCamelCase=2 , __UpperCamelCase=0.1 , __UpperCamelCase=1_00 , __UpperCamelCase=2_56 , __UpperCamelCase=2_56 , __UpperCamelCase=0.1 , __UpperCamelCase="mean" , __UpperCamelCase=False , __UpperCamelCase=False , __UpperCamelCase=2_56 , __UpperCamelCase=(5_12, 5_12, 5_12, 5_12, 15_00) , __UpperCamelCase=(5, 3, 3, 1, 1) , __UpperCamelCase=(1, 2, 3, 1, 1) , __UpperCamelCase=5_12 , __UpperCamelCase=0 , __UpperCamelCase=1 , __UpperCamelCase=2 , __UpperCamelCase=5_04 , **__UpperCamelCase , ):
"""simple docstring"""
super().__init__(**__UpperCamelCase , pad_token_id=__UpperCamelCase , bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase )
snake_case_ = hidden_size
snake_case_ = feat_extract_norm
snake_case_ = feat_extract_activation
snake_case_ = list(__UpperCamelCase )
snake_case_ = list(__UpperCamelCase )
snake_case_ = list(__UpperCamelCase )
snake_case_ = conv_bias
snake_case_ = num_conv_pos_embeddings
snake_case_ = num_conv_pos_embedding_groups
snake_case_ = len(self.conv_dim )
snake_case_ = num_hidden_layers
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = num_attention_heads
snake_case_ = hidden_dropout
snake_case_ = attention_dropout
snake_case_ = activation_dropout
snake_case_ = feat_proj_dropout
snake_case_ = final_dropout
snake_case_ = layerdrop
snake_case_ = layer_norm_eps
snake_case_ = initializer_range
snake_case_ = vocab_size
snake_case_ = num_clusters
snake_case_ = do_stable_layer_norm
snake_case_ = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
f""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
snake_case_ = apply_spec_augment
snake_case_ = mask_time_prob
snake_case_ = mask_time_length
snake_case_ = mask_time_min_masks
snake_case_ = mask_feature_prob
snake_case_ = mask_feature_length
snake_case_ = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
snake_case_ = num_codevectors_per_group
snake_case_ = num_codevector_groups
snake_case_ = contrastive_logits_temperature
snake_case_ = feat_quantizer_dropout
snake_case_ = num_negatives
snake_case_ = codevector_dim
snake_case_ = proj_codevector_dim
snake_case_ = diversity_loss_weight
# ctc loss
snake_case_ = ctc_loss_reduction
snake_case_ = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
snake_case_ = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
snake_case_ = list(__UpperCamelCase )
snake_case_ = list(__UpperCamelCase )
snake_case_ = list(__UpperCamelCase )
snake_case_ = xvector_output_dim
@property
def __lowerCAmelCase ( self ):
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 46 | 1 |
'''simple docstring'''
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def UpperCamelCase_ ( ) -> Optional[Any]:
"""simple docstring"""
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
_UpperCAmelCase : Optional[int] = "__test_patch_submodule_mock__"
with patch_submodule(_test_patching , "os.path.join" , _UpperCAmelCase ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os , _PatchedModuleObj )
assert isinstance(_test_patching.os.path , _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path , _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os , _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path , _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def UpperCamelCase_ ( ) -> Union[str, Any]:
"""simple docstring"""
assert _test_patching.open is open
_UpperCAmelCase : Optional[int] = "__test_patch_submodule_builtin_mock__"
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching , "open" , _UpperCAmelCase ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def UpperCamelCase_ ( ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = "__test_patch_submodule_missing_mock__"
with patch_submodule(_test_patching , "pandas.read_csv" , _UpperCAmelCase ):
pass
def UpperCamelCase_ ( ) -> str:
"""simple docstring"""
_UpperCAmelCase : Tuple = "__test_patch_submodule_missing_builtin_mock__"
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching , "len" , _UpperCAmelCase ) is None
with patch_submodule(_test_patching , "len" , _UpperCAmelCase ):
assert _test_patching.len is mock
assert _test_patching.len is len
def UpperCamelCase_ ( ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase : Dict = "__test_patch_submodule_start_and_stop_mock__"
_UpperCAmelCase : List[Any] = patch_submodule(_test_patching , "open" , _UpperCAmelCase )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def UpperCamelCase_ ( ) -> Dict:
"""simple docstring"""
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
_UpperCAmelCase : Optional[int] = "__test_patch_submodule_successive_join__"
_UpperCAmelCase : Dict = "__test_patch_submodule_successive_dirname__"
_UpperCAmelCase : Tuple = "__test_patch_submodule_successive_rename__"
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching , "os.path.join" , _UpperCAmelCase ):
with patch_submodule(_test_patching , "os.rename" , _UpperCAmelCase ):
with patch_submodule(_test_patching , "os.path.dirname" , _UpperCAmelCase ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching , "os.rename" , _UpperCAmelCase ):
with patch_submodule(_test_patching , "os.path.join" , _UpperCAmelCase ):
with patch_submodule(_test_patching , "os.path.dirname" , _UpperCAmelCase ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def UpperCamelCase_ ( ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase : str = "__test_patch_submodule_doesnt_exist_mock__"
with patch_submodule(_test_patching , "__module_that_doesn_exist__.__attribute_that_doesn_exist__" , _UpperCAmelCase ):
pass
with patch_submodule(_test_patching , "os.__attribute_that_doesn_exist__" , _UpperCAmelCase ):
pass
| 244 | '''simple docstring'''
def UpperCamelCase_ ( _UpperCAmelCase : int , _UpperCAmelCase : int ) -> int:
"""simple docstring"""
return int((input_a, input_a).count(0 ) == 0 )
def UpperCamelCase_ ( ) -> None:
"""simple docstring"""
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 244 | 1 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.linear_k': 'encoder.layers.*.self_attn.linear_k',
'self_attn.linear_v': 'encoder.layers.*.self_attn.linear_v',
'self_attn.linear_q': 'encoder.layers.*.self_attn.linear_q',
'self_attn.pos_bias_u': 'encoder.layers.*.self_attn.pos_bias_u',
'self_attn.pos_bias_v': 'encoder.layers.*.self_attn.pos_bias_v',
'self_attn.linear_out': 'encoder.layers.*.self_attn.linear_out',
'self_attn.linear_pos': 'encoder.layers.*.self_attn.linear_pos',
'self_attn.rotary_emb': 'encoder.embed_positions',
'self_attn_layer_norm': 'encoder.layers.*.self_attn_layer_norm',
'conv_module.pointwise_conv1': 'encoder.layers.*.conv_module.pointwise_conv1',
'conv_module.pointwise_conv2': 'encoder.layers.*.conv_module.pointwise_conv2',
'conv_module.depthwise_conv': 'encoder.layers.*.conv_module.depthwise_conv',
'conv_module.batch_norm': 'encoder.layers.*.conv_module.batch_norm',
'conv_module.layer_norm': 'encoder.layers.*.conv_module.layer_norm',
'ffn1.w_1': 'encoder.layers.*.ffn1.intermediate_dense',
'ffn1.w_2': 'encoder.layers.*.ffn1.output_dense',
'ffn1.layer_norm': 'encoder.layers.*.ffn1_layer_norm',
'ffn2.w_1': 'encoder.layers.*.ffn2.intermediate_dense',
'ffn2.w_2': 'encoder.layers.*.ffn2.output_dense',
'ffn2.layer_norm': 'encoder.layers.*.ffn2_layer_norm',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
UpperCamelCase = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def lowerCamelCase_ ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> str:
for attribute in key.split("." ):
__A : Dict = getattr(_lowercase , _lowercase )
if weight_type is not None:
__A : str = getattr(_lowercase , _lowercase ).shape
else:
__A : Union[str, Any] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}" )
if weight_type == "weight":
__A : Any = value
elif weight_type == "weight_g":
__A : Tuple = value
elif weight_type == "weight_v":
__A : Tuple = value
elif weight_type == "bias":
__A : Dict = value
elif weight_type == "running_mean":
__A : str = value
elif weight_type == "running_var":
__A : List[str] = value
elif weight_type == "num_batches_tracked":
__A : Any = value
elif weight_type == "inv_freq":
__A : Optional[Any] = value
else:
__A : Any = value
logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def lowerCamelCase_ ( _lowercase , _lowercase , _lowercase ) -> Any:
__A : Any = []
__A : Any = fairseq_model.state_dict()
__A : List[Any] = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
__A : List[Any] = False
if "conv_layers" in name:
load_conv_layer(
_lowercase , _lowercase , _lowercase , _lowercase , hf_model.config.feat_extract_norm == "group" , )
__A : List[str] = True
else:
for key, mapped_key in MAPPING.items():
__A : Optional[Any] = "wav2vec2_conformer." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
__A : Optional[Any] = True
if "*" in mapped_key:
__A : Tuple = name.split(_lowercase )[0].split("." )[-2]
__A : Union[str, Any] = mapped_key.replace("*" , _lowercase )
if "pos_bias_u" in name:
__A : Union[str, Any] = None
elif "pos_bias_v" in name:
__A : Optional[Any] = None
elif "weight_g" in name:
__A : List[Any] = "weight_g"
elif "weight_v" in name:
__A : Optional[Any] = "weight_v"
elif "bias" in name:
__A : Any = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__A : List[Any] = "weight"
elif "running_mean" in name:
__A : Tuple = "running_mean"
elif "inv_freq" in name:
__A : Dict = "inv_freq"
elif "running_var" in name:
__A : Optional[int] = "running_var"
elif "num_batches_tracked" in name:
__A : List[Any] = "num_batches_tracked"
else:
__A : Union[str, Any] = None
set_recursively(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
continue
if not is_used:
unused_weights.append(_lowercase )
logger.warning(F"Unused weights: {unused_weights}" )
def lowerCamelCase_ ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> str:
__A : Any = full_name.split("conv_layers." )[-1]
__A : int = name.split("." )
__A : str = int(items[0] )
__A : List[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." )
__A : str = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." )
__A : Any = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found." )
__A : Optional[int] = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found." )
__A : List[str] = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(_lowercase )
@torch.no_grad()
def lowerCamelCase_ ( _lowercase , _lowercase , _lowercase=None , _lowercase=None , _lowercase=True ) -> Optional[int]:
if config_path is not None:
__A : List[Any] = WavaVecaConformerConfig.from_pretrained(_lowercase , hidden_act="swish" )
else:
__A : List[str] = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
__A : Any = "rotary"
if is_finetuned:
if dict_path:
__A : List[Any] = Dictionary.load(_lowercase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__A : List[Any] = target_dict.pad_index
__A : List[Any] = target_dict.bos_index
__A : Tuple = target_dict.eos_index
__A : Any = len(target_dict.symbols )
__A : Optional[int] = os.path.join(_lowercase , "vocab.json" )
if not os.path.isdir(_lowercase ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(_lowercase ) )
return
os.makedirs(_lowercase , exist_ok=_lowercase )
__A : List[Any] = target_dict.indices
# fairseq has the <pad> and <s> switched
__A : Tuple = 0
__A : int = 1
with open(_lowercase , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(_lowercase , _lowercase )
__A : Optional[Any] = WavaVecaCTCTokenizer(
_lowercase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=_lowercase , )
__A : Any = True if config.feat_extract_norm == "layer" else False
__A : Optional[int] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_lowercase , return_attention_mask=_lowercase , )
__A : int = WavaVecaProcessor(feature_extractor=_lowercase , tokenizer=_lowercase )
processor.save_pretrained(_lowercase )
__A : str = WavaVecaConformerForCTC(_lowercase )
else:
__A : List[Any] = WavaVecaConformerForPreTraining(_lowercase )
if is_finetuned:
__A , __A , __A : Optional[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
__A : Optional[int] = argparse.Namespace(task="audio_pretraining" )
__A : int = fairseq.tasks.setup_task(_lowercase )
__A , __A , __A : Dict = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=_lowercase )
__A : List[str] = model[0].eval()
recursively_load_weights(_lowercase , _lowercase , not is_finetuned )
hf_wavavec.save_pretrained(_lowercase )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
UpperCamelCase = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 387 | import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class _a :
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=2 , __UpperCAmelCase=32 , __UpperCAmelCase=16 , __UpperCAmelCase=3 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=32 , __UpperCAmelCase=4 , __UpperCAmelCase=[0, 1, 2, 3] , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.02 , __UpperCAmelCase=3 , __UpperCAmelCase=[1, 384, 24, 24] , __UpperCAmelCase=True , __UpperCAmelCase=None , ):
__A : Dict = parent
__A : Union[str, Any] = batch_size
__A : str = image_size
__A : Optional[Any] = patch_size
__A : str = num_channels
__A : str = is_training
__A : Optional[Any] = use_labels
__A : Union[str, Any] = hidden_size
__A : int = num_hidden_layers
__A : List[Any] = backbone_out_indices
__A : Dict = num_attention_heads
__A : Dict = intermediate_size
__A : Tuple = hidden_act
__A : List[str] = hidden_dropout_prob
__A : int = attention_probs_dropout_prob
__A : int = initializer_range
__A : List[str] = num_labels
__A : str = backbone_featmap_shape
__A : int = scope
__A : Dict = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
__A : Optional[Any] = (image_size // patch_size) ** 2
__A : List[Any] = num_patches + 1
def __UpperCAmelCase( self ):
__A : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__A : Tuple = None
if self.use_labels:
__A : Any = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__A : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def __UpperCAmelCase( self ):
__A : Any = {
"global_padding": "same",
"layer_type": "bottleneck",
"depths": [3, 4, 9],
"out_features": ["stage1", "stage2", "stage3"],
"embedding_dynamic_padding": True,
"hidden_sizes": [96, 192, 384, 768],
"num_groups": 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=__UpperCAmelCase , backbone_featmap_shape=self.backbone_featmap_shape , )
def __UpperCAmelCase( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
__A : Tuple = DPTModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__A : int = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCAmelCase( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
__A : Dict = self.num_labels
__A : Optional[int] = DPTForDepthEstimation(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__A : Dict = model(__UpperCAmelCase )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def __UpperCAmelCase( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
__A : int = self.num_labels
__A : List[Any] = DPTForSemanticSegmentation(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__A : Union[str, Any] = model(__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def __UpperCAmelCase( self ):
__A : int = self.prepare_config_and_inputs()
__A , __A , __A : Dict = config_and_inputs
__A : Optional[Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _a ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ : Tuple = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
lowerCamelCase_ : Dict = (
{
"""depth-estimation""": DPTForDepthEstimation,
"""feature-extraction""": DPTModel,
"""image-segmentation""": DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowerCamelCase_ : Tuple = False
lowerCamelCase_ : Union[str, Any] = False
lowerCamelCase_ : Optional[Any] = False
def __UpperCAmelCase( self ):
__A : str = DPTModelTester(self )
__A : Dict = ConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase , hidden_size=37 )
def __UpperCAmelCase( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="DPT does not use inputs_embeds" )
def __UpperCAmelCase( self ):
pass
def __UpperCAmelCase( self ):
__A , __A : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A : List[str] = model_class(__UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__A : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCAmelCase , nn.Linear ) )
def __UpperCAmelCase( self ):
__A , __A : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A : List[Any] = model_class(__UpperCAmelCase )
__A : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__A : Any = [*signature.parameters.keys()]
__A : Optional[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __UpperCAmelCase )
def __UpperCAmelCase( self ):
__A : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def __UpperCAmelCase( self ):
__A : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*__UpperCAmelCase )
def __UpperCAmelCase( self ):
__A : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__UpperCAmelCase )
def __UpperCAmelCase( self ):
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
__A , __A : str = self.model_tester.prepare_config_and_inputs_for_common()
__A : int = True
if model_class in get_values(__UpperCAmelCase ):
continue
__A : Dict = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.train()
__A : str = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
__A : Optional[Any] = model(**__UpperCAmelCase ).loss
loss.backward()
def __UpperCAmelCase( self ):
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
__A , __A : str = self.model_tester.prepare_config_and_inputs_for_common()
__A : Any = False
__A : Optional[int] = True
if model_class in get_values(__UpperCAmelCase ) or not model_class.supports_gradient_checkpointing:
continue
__A : Tuple = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.gradient_checkpointing_enable()
model.train()
__A : Any = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
__A : List[Any] = model(**__UpperCAmelCase ).loss
loss.backward()
def __UpperCAmelCase( self ):
__A , __A : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
__A : str = _config_zero_init(__UpperCAmelCase )
for model_class in self.all_model_classes:
__A : List[Any] = model_class(config=__UpperCAmelCase )
# Skip the check for the backbone
__A : List[str] = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
__A : Any = [F"{name}.{key}" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F"Parameter {name} of model {model_class} seems not properly initialized" , )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def __UpperCAmelCase( self ):
pass
@slow
def __UpperCAmelCase( self ):
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
__A : Tuple = DPTModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
def __UpperCAmelCase( self ):
# We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type
__A , __A : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
__A : Dict = "add"
with self.assertRaises(__UpperCAmelCase ):
__A : int = DPTForDepthEstimation(__UpperCAmelCase )
def lowerCamelCase_ ( ) -> Optional[Any]:
__A : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
@slow
class _a ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase( self ):
__A : List[str] = DPTImageProcessor.from_pretrained("Intel/dpt-hybrid-midas" )
__A : List[Any] = DPTForDepthEstimation.from_pretrained("Intel/dpt-hybrid-midas" ).to(__UpperCAmelCase )
__A : Dict = prepare_img()
__A : Optional[int] = image_processor(images=__UpperCAmelCase , return_tensors="pt" ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
__A : List[str] = model(**__UpperCAmelCase )
__A : str = outputs.predicted_depth
# verify the predicted depth
__A : Optional[int] = torch.Size((1, 384, 384) )
self.assertEqual(predicted_depth.shape , __UpperCAmelCase )
__A : Any = torch.tensor(
[[[5.64_37, 5.61_46, 5.65_11], [5.43_71, 5.56_49, 5.59_58], [5.52_15, 5.51_84, 5.52_93]]] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 , __UpperCAmelCase , atol=1e-4 ) )
| 387 | 1 |
def lowerCamelCase_ ( _UpperCamelCase ) -> list[int]:
"""simple docstring"""
snake_case_ : Optional[int] = [0 for i in range(len(_UpperCamelCase ) )]
# initialize interval's left pointer and right pointer
snake_case_ , snake_case_ : Tuple = 0, 0
for i in range(1 , len(_UpperCamelCase ) ):
# case when current index is inside the interval
if i <= right_pointer:
snake_case_ : Any = min(right_pointer - i + 1 , z_result[i - left_pointer] )
snake_case_ : Tuple = min_edge
while go_next(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
snake_case_ , snake_case_ : int = i, i + z_result[i] - 1
return z_result
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> bool:
"""simple docstring"""
return i + z_result[i] < len(_UpperCamelCase ) and s[z_result[i]] == s[i + z_result[i]]
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> int:
"""simple docstring"""
snake_case_ : Tuple = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
snake_case_ : Union[str, Any] = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(_UpperCamelCase ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod()
| 60 |
def UpperCamelCase_( _snake_case : int = 600851475143 ):
"""simple docstring"""
try:
__a =int(_snake_case )
except (TypeError, ValueError):
raise TypeError('Parameter n must be int or castable to int.' )
if n <= 0:
raise ValueError('Parameter n must be greater than or equal to one.' )
__a =2
__a =0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
__a =i
while n % i == 0:
__a =n // i
i += 1
return int(_snake_case )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 242 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ : Union[str, Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Any = {}
class a__( lowercase__ ):
a_ : Optional[int] = """llama"""
a_ : str = ["""past_key_values"""]
def __init__( self , _UpperCAmelCase=3_2000 , _UpperCAmelCase=4096 , _UpperCAmelCase=1_1008 , _UpperCAmelCase=32 , _UpperCAmelCase=32 , _UpperCAmelCase=None , _UpperCAmelCase="silu" , _UpperCAmelCase=2048 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1E-6 , _UpperCAmelCase=True , _UpperCAmelCase=0 , _UpperCAmelCase=1 , _UpperCAmelCase=2 , _UpperCAmelCase=1 , _UpperCAmelCase=False , _UpperCAmelCase=None , **_UpperCAmelCase , ) -> Optional[int]:
snake_case__ =vocab_size
snake_case__ =max_position_embeddings
snake_case__ =hidden_size
snake_case__ =intermediate_size
snake_case__ =num_hidden_layers
snake_case__ =num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
snake_case__ =num_attention_heads
snake_case__ =num_key_value_heads
snake_case__ =hidden_act
snake_case__ =initializer_range
snake_case__ =rms_norm_eps
snake_case__ =pretraining_tp
snake_case__ =use_cache
snake_case__ =rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase , tie_word_embeddings=__lowercase , **__lowercase , )
def _lowercase ( self ) -> Union[str, Any]:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __lowercase ) or len(self.rope_scaling ) != 2:
raise ValueError(
'`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '
f"""got {self.rope_scaling}""" )
snake_case__ =self.rope_scaling.get('type' , __lowercase )
snake_case__ =self.rope_scaling.get('factor' , __lowercase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"""`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(__lowercase , __lowercase ) or rope_scaling_factor <= 1.0:
raise ValueError(f"""`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 715 |
'''simple docstring'''
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def a ( UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Dict ) -> Any:
assert isinstance(UpperCamelCase_ , UpperCamelCase_ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def a ( UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Union[str, Any] ) -> Union[str, Any]:
snake_case__ =tmp_path / 'cache'
snake_case__ ={'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
snake_case__ =JsonDatasetReader(UpperCamelCase_ , cache_dir=UpperCamelCase_ , keep_in_memory=UpperCamelCase_ ).read()
_check_json_dataset(UpperCamelCase_ , UpperCamelCase_ )
@pytest.mark.parametrize(
'features' , [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
] , )
def a ( UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : List[Any] ) -> Optional[int]:
snake_case__ =tmp_path / 'cache'
snake_case__ ={'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
snake_case__ =features.copy() if features else default_expected_features
snake_case__ =(
Features({feature: Value(UpperCamelCase_ ) for feature, dtype in features.items()} ) if features is not None else None
)
snake_case__ =JsonDatasetReader(UpperCamelCase_ , features=UpperCamelCase_ , cache_dir=UpperCamelCase_ ).read()
_check_json_dataset(UpperCamelCase_ , UpperCamelCase_ )
@pytest.mark.parametrize(
'features' , [
None,
{'col_3': 'float64', 'col_1': 'string', 'col_2': 'int64'},
] , )
def a ( UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[Any] ) -> List[str]:
snake_case__ =tmp_path / 'cache'
snake_case__ ={'col_3': 'float64', 'col_1': 'string', 'col_2': 'int64'}
snake_case__ =features.copy() if features else default_expected_features
snake_case__ =(
Features({feature: Value(UpperCamelCase_ ) for feature, dtype in features.items()} ) if features is not None else None
)
snake_case__ =JsonDatasetReader(UpperCamelCase_ , features=UpperCamelCase_ , cache_dir=UpperCamelCase_ ).read()
assert isinstance(UpperCamelCase_ , UpperCamelCase_ )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def a ( UpperCamelCase_ : List[str] , UpperCamelCase_ : List[Any] ) -> str:
# jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"}
snake_case__ ={'col_2': 'int64', 'col_3': 'float64', 'col_1': 'string'}
snake_case__ =features.copy()
snake_case__ =(
Features({feature: Value(UpperCamelCase_ ) for feature, dtype in features.items()} ) if features is not None else None
)
snake_case__ =tmp_path / 'cache'
snake_case__ =JsonDatasetReader(UpperCamelCase_ , features=UpperCamelCase_ , cache_dir=UpperCamelCase_ ).read()
assert isinstance(UpperCamelCase_ , UpperCamelCase_ )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def a ( UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[Any] ) -> Any:
snake_case__ =tmp_path / 'cache'
snake_case__ ={'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
snake_case__ =JsonDatasetReader(UpperCamelCase_ , cache_dir=UpperCamelCase_ , split=UpperCamelCase_ ).read()
_check_json_dataset(UpperCamelCase_ , UpperCamelCase_ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('path_type' , [str, list] )
def a ( UpperCamelCase_ : List[str] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[Any] ) -> str:
if issubclass(UpperCamelCase_ , UpperCamelCase_ ):
snake_case__ =jsonl_path
elif issubclass(UpperCamelCase_ , UpperCamelCase_ ):
snake_case__ =[jsonl_path]
snake_case__ =tmp_path / 'cache'
snake_case__ ={'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
snake_case__ =JsonDatasetReader(UpperCamelCase_ , cache_dir=UpperCamelCase_ ).read()
_check_json_dataset(UpperCamelCase_ , UpperCamelCase_ )
def a ( UpperCamelCase_ : int , UpperCamelCase_ : str , UpperCamelCase_ : str=("train",) ) -> str:
assert isinstance(UpperCamelCase_ , UpperCamelCase_ )
for split in splits:
snake_case__ =dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def a ( UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : str ) -> List[Any]:
snake_case__ =tmp_path / 'cache'
snake_case__ ={'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
snake_case__ =JsonDatasetReader({'train': jsonl_path} , cache_dir=UpperCamelCase_ , keep_in_memory=UpperCamelCase_ ).read()
_check_json_datasetdict(UpperCamelCase_ , UpperCamelCase_ )
@pytest.mark.parametrize(
'features' , [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
] , )
def a ( UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[Any] ) -> Optional[int]:
snake_case__ =tmp_path / 'cache'
snake_case__ ={'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
snake_case__ =features.copy() if features else default_expected_features
snake_case__ =(
Features({feature: Value(UpperCamelCase_ ) for feature, dtype in features.items()} ) if features is not None else None
)
snake_case__ =JsonDatasetReader({'train': jsonl_path} , features=UpperCamelCase_ , cache_dir=UpperCamelCase_ ).read()
_check_json_datasetdict(UpperCamelCase_ , UpperCamelCase_ )
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def a ( UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Any ) -> Any:
if split:
snake_case__ ={split: jsonl_path}
else:
snake_case__ ='train'
snake_case__ ={'train': jsonl_path, 'test': jsonl_path}
snake_case__ =tmp_path / 'cache'
snake_case__ ={'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
snake_case__ =JsonDatasetReader(UpperCamelCase_ , cache_dir=UpperCamelCase_ ).read()
_check_json_datasetdict(UpperCamelCase_ , UpperCamelCase_ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def a ( UpperCamelCase_ : Union[str, Any] ) -> int:
return json.load(UpperCamelCase_ )
def a ( UpperCamelCase_ : str ) -> Union[str, Any]:
return [json.loads(UpperCamelCase_ ) for line in buffer]
class a__:
@pytest.mark.parametrize('lines, load_json_function' , [(True, load_json_lines), (False, load_json)] )
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> str:
with io.BytesIO() as buffer:
JsonDatasetWriter(_UpperCAmelCase , _UpperCAmelCase , lines=_UpperCAmelCase ).write()
buffer.seek(0 )
snake_case__ =load_json_function(_UpperCAmelCase )
assert isinstance(_UpperCAmelCase , _UpperCAmelCase )
assert isinstance(exported_content[0] , _UpperCAmelCase )
assert len(_UpperCAmelCase ) == 10
@pytest.mark.parametrize(
'orient, container, keys, len_at' , [
('records', list, {'tokens', 'labels', 'answers', 'id'}, None),
('split', dict, {'columns', 'data'}, 'data'),
('index', dict, set('0123456789' ), None),
('columns', dict, {'tokens', 'labels', 'answers', 'id'}, 'tokens'),
('values', list, None, None),
('table', dict, {'schema', 'data'}, 'data'),
] , )
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Dict:
with io.BytesIO() as buffer:
JsonDatasetWriter(_UpperCAmelCase , _UpperCAmelCase , lines=_UpperCAmelCase , orient=_UpperCAmelCase ).write()
buffer.seek(0 )
snake_case__ =load_json(_UpperCAmelCase )
assert isinstance(_UpperCAmelCase , _UpperCAmelCase )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(_UpperCAmelCase , 'keys' ) and not hasattr(exported_content[0] , 'keys' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(_UpperCAmelCase ) == 10
@pytest.mark.parametrize('lines, load_json_function' , [(True, load_json_lines), (False, load_json)] )
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Tuple:
with io.BytesIO() as buffer:
JsonDatasetWriter(_UpperCAmelCase , _UpperCAmelCase , lines=_UpperCAmelCase , num_proc=2 ).write()
buffer.seek(0 )
snake_case__ =load_json_function(_UpperCAmelCase )
assert isinstance(_UpperCAmelCase , _UpperCAmelCase )
assert isinstance(exported_content[0] , _UpperCAmelCase )
assert len(_UpperCAmelCase ) == 10
@pytest.mark.parametrize(
'orient, container, keys, len_at' , [
('records', list, {'tokens', 'labels', 'answers', 'id'}, None),
('split', dict, {'columns', 'data'}, 'data'),
('index', dict, set('0123456789' ), None),
('columns', dict, {'tokens', 'labels', 'answers', 'id'}, 'tokens'),
('values', list, None, None),
('table', dict, {'schema', 'data'}, 'data'),
] , )
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Dict:
with io.BytesIO() as buffer:
JsonDatasetWriter(_UpperCAmelCase , _UpperCAmelCase , lines=_UpperCAmelCase , orient=_UpperCAmelCase , num_proc=2 ).write()
buffer.seek(0 )
snake_case__ =load_json(_UpperCAmelCase )
assert isinstance(_UpperCAmelCase , _UpperCAmelCase )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(_UpperCAmelCase , 'keys' ) and not hasattr(exported_content[0] , 'keys' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(_UpperCAmelCase ) == 10
def _lowercase ( self , _UpperCAmelCase ) -> List[str]:
with pytest.raises(_UpperCAmelCase ):
with io.BytesIO() as buffer:
JsonDatasetWriter(_UpperCAmelCase , _UpperCAmelCase , num_proc=0 )
@pytest.mark.parametrize('compression, extension' , [('gzip', 'gz'), ('bz2', 'bz2'), ('xz', 'xz')] )
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> List[str]:
snake_case__ =tmp_path_factory.mktemp('data' ) / f"""test.json.{extension}"""
snake_case__ =str(shared_datadir / f"""test_file.json.{extension}""" )
JsonDatasetWriter(_UpperCAmelCase , _UpperCAmelCase , compression=_UpperCAmelCase ).write()
with fsspec.open(_UpperCAmelCase , 'rb' , compression='infer' ) as f:
snake_case__ =f.read()
with fsspec.open(_UpperCAmelCase , 'rb' , compression='infer' ) as f:
snake_case__ =f.read()
assert exported_content == original_content
| 581 | 0 |
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : int , __lowerCamelCase : List[str] , __lowerCamelCase : Dict=2 , __lowerCamelCase : List[Any]=3 , __lowerCamelCase : Optional[Any]=4 , __lowerCamelCase : Union[str, Any]=2 , __lowerCamelCase : Any=7 , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : List[str]=True , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Union[str, Any]=99 , __lowerCamelCase : Dict=36 , __lowerCamelCase : Optional[int]=3 , __lowerCamelCase : str=4 , __lowerCamelCase : Union[str, Any]=37 , __lowerCamelCase : List[str]="gelu" , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : List[Any]=0.1 , __lowerCamelCase : List[str]=512 , __lowerCamelCase : Any=16 , __lowerCamelCase : List[str]=2 , __lowerCamelCase : Any=0.02 , __lowerCamelCase : List[str]=6 , __lowerCamelCase : Dict=6 , __lowerCamelCase : int=3 , __lowerCamelCase : Union[str, Any]=4 , __lowerCamelCase : Any=None , __lowerCamelCase : Any=1000 , ):
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = patch_size
SCREAMING_SNAKE_CASE = text_seq_length
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_input_mask
SCREAMING_SNAKE_CASE = use_token_type_ids
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = type_vocab_size
SCREAMING_SNAKE_CASE = type_sequence_label_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = coordinate_size
SCREAMING_SNAKE_CASE = shape_size
SCREAMING_SNAKE_CASE = num_labels
SCREAMING_SNAKE_CASE = num_choices
SCREAMING_SNAKE_CASE = scope
SCREAMING_SNAKE_CASE = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
SCREAMING_SNAKE_CASE = text_seq_length
SCREAMING_SNAKE_CASE = (image_size // patch_size) ** 2 + 1
SCREAMING_SNAKE_CASE = self.text_seq_length + self.image_seq_length
def _snake_case ( self : Dict ):
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
SCREAMING_SNAKE_CASE = bbox[i, j, 3]
SCREAMING_SNAKE_CASE = bbox[i, j, 1]
SCREAMING_SNAKE_CASE = t
if bbox[i, j, 2] < bbox[i, j, 0]:
SCREAMING_SNAKE_CASE = bbox[i, j, 2]
SCREAMING_SNAKE_CASE = bbox[i, j, 0]
SCREAMING_SNAKE_CASE = t
SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.text_seq_length] )
SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def _snake_case ( self : List[str] , __lowerCamelCase : Dict , __lowerCamelCase : List[str] , __lowerCamelCase : Any , __lowerCamelCase : List[str] , __lowerCamelCase : Dict , __lowerCamelCase : Dict , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[Any] ):
SCREAMING_SNAKE_CASE = LayoutLMvaModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
# text + image
SCREAMING_SNAKE_CASE = model(__lowerCamelCase , pixel_values=__lowerCamelCase )
SCREAMING_SNAKE_CASE = model(
__lowerCamelCase , bbox=__lowerCamelCase , pixel_values=__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase )
SCREAMING_SNAKE_CASE = model(__lowerCamelCase , bbox=__lowerCamelCase , pixel_values=__lowerCamelCase , token_type_ids=__lowerCamelCase )
SCREAMING_SNAKE_CASE = model(__lowerCamelCase , bbox=__lowerCamelCase , pixel_values=__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
SCREAMING_SNAKE_CASE = model(__lowerCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
SCREAMING_SNAKE_CASE = model(pixel_values=__lowerCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def _snake_case ( self : List[str] , __lowerCamelCase : int , __lowerCamelCase : List[Any] , __lowerCamelCase : int , __lowerCamelCase : List[str] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[Any] ):
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = LayoutLMvaForSequenceClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = model(
__lowerCamelCase , bbox=__lowerCamelCase , pixel_values=__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self : Union[str, Any] , __lowerCamelCase : List[str] , __lowerCamelCase : int , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : int , __lowerCamelCase : List[str] , __lowerCamelCase : str ):
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = LayoutLMvaForTokenClassification(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = model(
__lowerCamelCase , bbox=__lowerCamelCase , pixel_values=__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def _snake_case ( self : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Any ):
SCREAMING_SNAKE_CASE = LayoutLMvaForQuestionAnswering(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = model(
__lowerCamelCase , bbox=__lowerCamelCase , pixel_values=__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , start_positions=__lowerCamelCase , end_positions=__lowerCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _snake_case ( self : List[str] ):
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE = {
"input_ids": input_ids,
"bbox": bbox,
"pixel_values": pixel_values,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCamelCase__ = (
{"document-question-answering": LayoutLMvaForQuestionAnswering, "feature-extraction": LayoutLMvaModel}
if is_torch_available()
else {}
)
def _snake_case ( self : Any , __lowerCamelCase : str , __lowerCamelCase : int , __lowerCamelCase : Tuple , __lowerCamelCase : List[Any] , __lowerCamelCase : Union[str, Any] ):
# `DocumentQuestionAnsweringPipeline` is expected to work with this model, but it combines the text and visual
# embedding along the sequence dimension (dim 1), which causes an error during post-processing as `p_mask` has
# the sequence dimension of the text embedding only.
# (see the line `embedding_output = torch.cat([embedding_output, visual_embeddings], dim=1)`)
return True
def _snake_case ( self : str ):
SCREAMING_SNAKE_CASE = LayoutLMvaModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=__lowerCamelCase , hidden_size=37 )
def _snake_case ( self : List[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : List[Any]=False ):
SCREAMING_SNAKE_CASE = copy.deepcopy(__lowerCamelCase )
if model_class in get_values(__lowerCamelCase ):
SCREAMING_SNAKE_CASE = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(__lowerCamelCase , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(__lowerCamelCase ):
SCREAMING_SNAKE_CASE = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=__lowerCamelCase )
elif model_class in get_values(__lowerCamelCase ):
SCREAMING_SNAKE_CASE = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowerCamelCase )
SCREAMING_SNAKE_CASE = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowerCamelCase )
elif model_class in [
*get_values(__lowerCamelCase ),
]:
SCREAMING_SNAKE_CASE = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowerCamelCase )
elif model_class in [
*get_values(__lowerCamelCase ),
]:
SCREAMING_SNAKE_CASE = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=__lowerCamelCase , )
return inputs_dict
def _snake_case ( self : Any ):
self.config_tester.run_common_tests()
def _snake_case ( self : Dict ):
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def _snake_case ( self : Optional[int] ):
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE = type
self.model_tester.create_and_check_model(*__lowerCamelCase )
def _snake_case ( self : List[Any] ):
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__lowerCamelCase )
def _snake_case ( self : Any ):
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__lowerCamelCase )
def _snake_case ( self : List[Any] ):
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__lowerCamelCase )
@slow
def _snake_case ( self : Optional[Any] ):
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = LayoutLMvaModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def __a ( ):
SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _snake_case ( self : List[Any] ):
return LayoutLMvaImageProcessor(apply_ocr=__lowerCamelCase ) if is_vision_available() else None
@slow
def _snake_case ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE = LayoutLMvaModel.from_pretrained("microsoft/layoutlmv3-base" ).to(__lowerCamelCase )
SCREAMING_SNAKE_CASE = self.default_image_processor
SCREAMING_SNAKE_CASE = prepare_img()
SCREAMING_SNAKE_CASE = image_processor(images=__lowerCamelCase , return_tensors="pt" ).pixel_values.to(__lowerCamelCase )
SCREAMING_SNAKE_CASE = torch.tensor([[1, 2]] )
SCREAMING_SNAKE_CASE = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
SCREAMING_SNAKE_CASE = model(
input_ids=input_ids.to(__lowerCamelCase ) , bbox=bbox.to(__lowerCamelCase ) , pixel_values=pixel_values.to(__lowerCamelCase ) , )
# verify the logits
SCREAMING_SNAKE_CASE = torch.Size((1, 199, 768) )
self.assertEqual(outputs.last_hidden_state.shape , __lowerCamelCase )
SCREAMING_SNAKE_CASE = torch.tensor(
[[-0.0_529, 0.3_618, 0.1_632], [-0.1_587, -0.1_667, -0.0_400], [-0.1_557, -0.1_671, -0.0_505]] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , __lowerCamelCase , atol=1e-4 ) ) | 16 |
from typing import Any
class __lowercase :
def __init__( self , lowercase_) -> str:
__snake_case = data
__snake_case = None
def __repr__( self) -> str:
return F"Node({self.data})"
class __lowercase :
def __init__( self) -> Dict:
__snake_case = None
def __iter__( self) -> Any:
__snake_case = self.head
while node:
yield node.data
__snake_case = node.next
def __len__( self) -> int:
return sum(1 for _ in self)
def __repr__( self) -> str:
return "->".join([str(lowercase_) for item in self])
def __getitem__( self , lowercase_) -> Any:
if not 0 <= index < len(self):
raise ValueError('list index out of range.')
for i, node in enumerate(self):
if i == index:
return node
return None
def __setitem__( self , lowercase_ , lowercase_) -> None:
if not 0 <= index < len(self):
raise ValueError('list index out of range.')
__snake_case = self.head
for _ in range(lowercase_):
__snake_case = current.next
__snake_case = data
def _a ( self , lowercase_) -> None:
self.insert_nth(len(self) , lowercase_)
def _a ( self , lowercase_) -> None:
self.insert_nth(0 , lowercase_)
def _a ( self , lowercase_ , lowercase_) -> None:
if not 0 <= index <= len(self):
raise IndexError('list index out of range')
__snake_case = Node(lowercase_)
if self.head is None:
__snake_case = new_node
elif index == 0:
__snake_case = self.head # link new_node to head
__snake_case = new_node
else:
__snake_case = self.head
for _ in range(index - 1):
__snake_case = temp.next
__snake_case = temp.next
__snake_case = new_node
def _a ( self) -> None: # print every node data
print(self)
def _a ( self) -> Any:
return self.delete_nth(0)
def _a ( self) -> Any: # delete from tail
return self.delete_nth(len(self) - 1)
def _a ( self , lowercase_ = 0) -> Any:
if not 0 <= index <= len(self) - 1: # test if index is valid
raise IndexError('List index out of range.')
__snake_case = self.head # default first node
if index == 0:
__snake_case = self.head.next
else:
__snake_case = self.head
for _ in range(index - 1):
__snake_case = temp.next
__snake_case = temp.next
__snake_case = temp.next.next
return delete_node.data
def _a ( self) -> bool:
return self.head is None
def _a ( self) -> None:
__snake_case = None
__snake_case = self.head
while current:
# Store the current node's next node.
__snake_case = current.next
# Make the current node's next point backwards
__snake_case = prev
# Make the previous node be the current node
__snake_case = current
# Make the current node the next node (to progress iteration)
__snake_case = next_node
# Return prev in order to put the head at the end
__snake_case = prev
def A ( ) -> None:
'''simple docstring'''
__snake_case = LinkedList()
assert linked_list.is_empty() is True
assert str(snake_case__ ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(snake_case__ ) == i
linked_list.insert_nth(snake_case__ , i + 1 )
assert str(snake_case__ ) == "->".join(str(snake_case__ ) for i in range(1 , 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(snake_case__ ) == "->".join(str(snake_case__ ) for i in range(0 , 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(snake_case__ ) == 9
assert str(snake_case__ ) == "->".join(str(snake_case__ ) for i in range(1 , 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
__snake_case = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(snake_case__ ) == "->".join(str(snake_case__ ) for i in range(-8 , 1 ) )
def A ( ) -> None:
'''simple docstring'''
__snake_case = [
-9,
100,
Node(7734_5112 ),
'dlrow olleH',
7,
5555,
0,
-192.55_555,
'Hello, world!',
77.9,
Node(10 ),
None,
None,
12.20,
]
__snake_case = LinkedList()
for i in test_input:
linked_list.insert_tail(snake_case__ )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(snake_case__ ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
__snake_case = linked_list.delete_head()
assert result == -9
assert (
str(snake_case__ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
__snake_case = linked_list.delete_tail()
assert result == 12.2
assert (
str(snake_case__ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
__snake_case = linked_list.delete_nth(10 )
assert result is None
assert (
str(snake_case__ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node('Hello again, world!' ) )
assert (
str(snake_case__ )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(snake_case__ )
assert (
str(snake_case__ )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(snake_case__ )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def A ( ) -> str:
'''simple docstring'''
from doctest import testmod
testmod()
__snake_case = LinkedList()
linked_list.insert_head(input('Inserting 1st at head ' ).strip() )
linked_list.insert_head(input('Inserting 2nd at head ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
linked_list.insert_tail(input('\nInserting 1st at tail ' ).strip() )
linked_list.insert_tail(input('Inserting 2nd at tail ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
print('\nDelete head' )
linked_list.delete_head()
print('Delete tail' )
linked_list.delete_tail()
print('\nPrint list:' )
linked_list.print_list()
print('\nReverse linked list' )
linked_list.reverse()
print('\nPrint list:' )
linked_list.print_list()
print('\nString representation of linked list:' )
print(snake_case__ )
print('\nReading/changing Node data using indexing:' )
print(f"Element at Position 1: {linked_list[1]}" )
__snake_case = input('Enter New Value: ' ).strip()
print('New list:' )
print(snake_case__ )
print(f"length of linked_list is : {len(snake_case__ )}" )
if __name__ == "__main__":
main()
| 313 | 0 |
"""simple docstring"""
from __future__ import annotations
from typing import Generic, TypeVar
__lowerCAmelCase : Optional[Any] = TypeVar('''T''')
class _lowerCAmelCase ( Generic[T] ):
"""simple docstring"""
def __init__( self , _lowercase ) -> Tuple:
'''simple docstring'''
snake_case_ : int = data
snake_case_ : Optional[int] = self
snake_case_ : int = 0
class _lowerCAmelCase ( Generic[T] ):
"""simple docstring"""
def __init__( self ) -> int:
'''simple docstring'''
snake_case_ : dict[T, DisjointSetTreeNode[T]] = {}
def UpperCAmelCase__ ( self , _lowercase ) -> Any:
'''simple docstring'''
snake_case_ : List[str] = DisjointSetTreeNode(A__ )
def UpperCAmelCase__ ( self , _lowercase ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Any = self.map[data]
if elem_ref != elem_ref.parent:
snake_case_ : str = self.find_set(elem_ref.parent.data )
return elem_ref.parent
def UpperCAmelCase__ ( self , _lowercase , _lowercase ) -> Any:
'''simple docstring'''
if nodea.rank > nodea.rank:
snake_case_ : Tuple = nodea
else:
snake_case_ : str = nodea
if nodea.rank == nodea.rank:
nodea.rank += 1
def UpperCAmelCase__ ( self , _lowercase , _lowercase ) -> Union[str, Any]:
'''simple docstring'''
self.link(self.find_set(A__ ) , self.find_set(A__ ) )
class _lowerCAmelCase ( Generic[T] ):
"""simple docstring"""
def __init__( self ) -> List[Any]:
'''simple docstring'''
snake_case_ : dict[T, dict[T, int]] = {}
def UpperCAmelCase__ ( self , _lowercase ) -> Optional[Any]:
'''simple docstring'''
if node not in self.connections:
snake_case_ : Dict = {}
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase ) -> Any:
'''simple docstring'''
self.add_node(A__ )
self.add_node(A__ )
snake_case_ : int = weight
snake_case_ : int = weight
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ : Any = []
snake_case_ : str = set()
for start in self.connections:
for end in self.connections[start]:
if (start, end) not in seen:
seen.add((end, start) )
edges.append((start, end, self.connections[start][end]) )
edges.sort(key=lambda _lowercase : x[2] )
# creating the disjoint set
snake_case_ : List[Any] = DisjointSetTree[T]()
for node in self.connections:
disjoint_set.make_set(A__ )
# MST generation
snake_case_ : Union[str, Any] = 0
snake_case_ : int = 0
snake_case_ : Dict = GraphUndirectedWeighted[T]()
while num_edges < len(self.connections ) - 1:
snake_case_ : Union[str, Any] = edges[index]
index += 1
snake_case_ : Tuple = disjoint_set.find_set(A__ )
snake_case_ : Union[str, Any] = disjoint_set.find_set(A__ )
if parent_u != parent_v:
num_edges += 1
graph.add_edge(A__ , A__ , A__ )
disjoint_set.union(A__ , A__ )
return graph
| 719 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase=1_3 , _lowercase=7 , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=9_9 , _lowercase=3_2 , _lowercase=5 , _lowercase=4 , _lowercase=3_7 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=5_1_2 , _lowercase=1_6 , _lowercase=2 , _lowercase=0.02 , _lowercase=4 , ) -> List[str]:
'''simple docstring'''
snake_case_ : Tuple = parent
snake_case_ : List[str] = batch_size
snake_case_ : int = seq_length
snake_case_ : List[Any] = is_training
snake_case_ : Optional[int] = use_attention_mask
snake_case_ : Optional[Any] = use_token_type_ids
snake_case_ : Union[str, Any] = use_labels
snake_case_ : str = vocab_size
snake_case_ : List[str] = hidden_size
snake_case_ : List[str] = num_hidden_layers
snake_case_ : int = num_attention_heads
snake_case_ : Dict = intermediate_size
snake_case_ : Union[str, Any] = hidden_act
snake_case_ : List[str] = hidden_dropout_prob
snake_case_ : Any = attention_probs_dropout_prob
snake_case_ : List[str] = max_position_embeddings
snake_case_ : Union[str, Any] = type_vocab_size
snake_case_ : str = type_sequence_label_size
snake_case_ : Dict = initializer_range
snake_case_ : str = num_choices
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ : Tuple = None
if self.use_attention_mask:
snake_case_ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ : Optional[int] = None
if self.use_token_type_ids:
snake_case_ : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ : Union[str, Any] = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowercase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ : str = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ , snake_case_ : List[Any] = config_and_inputs
snake_case_ : Tuple = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = True
_lowerCamelCase = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Dict = FlaxRoFormerModelTester(self )
@slow
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
snake_case_ : Tuple = model_class_name.from_pretrained("""junnyu/roformer_chinese_small""" , from_pt=_lowercase )
snake_case_ : List[str] = model(np.ones((1, 1) ) )
self.assertIsNotNone(_lowercase )
@require_flax
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ : Dict = FlaxRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" )
snake_case_ : Tuple = jnp.array([[0, 1, 2, 3, 4, 5]] )
snake_case_ : Dict = model(_lowercase )[0]
snake_case_ : Optional[int] = 5_0_0_0_0
snake_case_ : Union[str, Any] = (1, 6, vocab_size)
self.assertEqual(output.shape , _lowercase )
snake_case_ : Dict = jnp.array(
[[[-0.1205, -1.0265, 0.2922], [-1.5134, 0.1974, 0.1519], [-5.0135, -3.9003, -0.8404]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , _lowercase , atol=1E-4 ) )
| 21 | 0 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _SCREAMING_SNAKE_CASE ( snake_case ):
lowerCamelCase_ = ['image_processor', 'tokenizer']
lowerCamelCase_ = 'ViltImageProcessor'
lowerCamelCase_ = ('BertTokenizer', 'BertTokenizerFast')
def __init__( self : Tuple , snake_case_ : int=None , snake_case_ : Optional[int]=None , **snake_case_ : Union[str, Any] ):
"""simple docstring"""
A : Tuple = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , snake_case_ , )
A : str = kwargs.pop('''feature_extractor''' )
A : str = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(snake_case_ , snake_case_ )
A : Optional[Any] = self.image_processor
def __call__( self : Union[str, Any] , snake_case_ : Optional[Any] , snake_case_ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , snake_case_ : bool = True , snake_case_ : Union[bool, str, PaddingStrategy] = False , snake_case_ : Union[bool, str, TruncationStrategy] = None , snake_case_ : Optional[int] = None , snake_case_ : int = 0 , snake_case_ : Optional[int] = None , snake_case_ : Optional[bool] = None , snake_case_ : Optional[bool] = None , snake_case_ : bool = False , snake_case_ : bool = False , snake_case_ : bool = False , snake_case_ : bool = False , snake_case_ : bool = True , snake_case_ : Optional[Union[str, TensorType]] = None , **snake_case_ : List[Any] , ):
"""simple docstring"""
A : Any = self.tokenizer(
text=snake_case_ , add_special_tokens=snake_case_ , padding=snake_case_ , truncation=snake_case_ , max_length=snake_case_ , stride=snake_case_ , pad_to_multiple_of=snake_case_ , return_token_type_ids=snake_case_ , return_attention_mask=snake_case_ , return_overflowing_tokens=snake_case_ , return_special_tokens_mask=snake_case_ , return_offsets_mapping=snake_case_ , return_length=snake_case_ , verbose=snake_case_ , return_tensors=snake_case_ , **snake_case_ , )
# add pixel_values + pixel_mask
A : Optional[Any] = self.image_processor(snake_case_ , return_tensors=snake_case_ )
encoding.update(snake_case_ )
return encoding
def _UpperCAmelCase ( self : Any , *snake_case_ : Any , **snake_case_ : Tuple ):
"""simple docstring"""
return self.tokenizer.batch_decode(*snake_case_ , **snake_case_ )
def _UpperCAmelCase ( self : Tuple , *snake_case_ : Union[str, Any] , **snake_case_ : str ):
"""simple docstring"""
return self.tokenizer.decode(*snake_case_ , **snake_case_ )
@property
def _UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
A : Optional[int] = self.tokenizer.model_input_names
A : Dict = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def _UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , snake_case_ , )
return self.image_processor_class
@property
def _UpperCAmelCase ( self : str ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , snake_case_ , )
return self.image_processor | 256 |
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class _SCREAMING_SNAKE_CASE :
lowerCamelCase_ = 42
lowerCamelCase_ = 42
class _SCREAMING_SNAKE_CASE :
def __init__( self : List[str] , snake_case_ : int ):
"""simple docstring"""
A : list[list[Edge]] = [[] for _ in range(snake_case_ )]
A : Union[str, Any] = size
def __getitem__( self : Any , snake_case_ : int ):
"""simple docstring"""
return iter(self._graph[vertex] )
@property
def _UpperCAmelCase ( self : str ):
"""simple docstring"""
return self._size
def _UpperCAmelCase ( self : Tuple , snake_case_ : int , snake_case_ : int , snake_case_ : int ):
"""simple docstring"""
if weight not in (0, 1):
raise ValueError('''Edge weight must be either 0 or 1.''' )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError('''Vertex indexes must be in [0; size).''' )
self._graph[from_vertex].append(Edge(snake_case_ , snake_case_ ) )
def _UpperCAmelCase ( self : str , snake_case_ : int , snake_case_ : int ):
"""simple docstring"""
A : Tuple = deque([start_vertex] )
A : list[int | None] = [None] * self.size
A : str = 0
while queue:
A : Optional[Any] = queue.popleft()
A : List[str] = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
A : Tuple = current_distance + edge.weight
A : Dict = distances[edge.destination_vertex]
if (
isinstance(snake_case_ , snake_case_ )
and new_distance >= dest_vertex_distance
):
continue
A : Optional[int] = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError('''No path from start_vertex to finish_vertex.''' )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod() | 256 | 1 |
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def A_ ( snake_case : int ) -> int:
'''simple docstring'''
def is_in_circle(snake_case : float , snake_case : float ) -> bool:
__UpperCamelCase = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
__UpperCamelCase = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(snake_case ) )
# The ratio of the area for circle to square is pi/4.
__UpperCamelCase = proportion * 4
print(f"The estimated value of pi is {pi_estimate}" )
print(f"The numpy value of pi is {pi}" )
print(f"The total error is {abs(pi - pi_estimate )}" )
def A_ ( snake_case : int , snake_case : Callable[[float], float] , snake_case : float = 0.0 , snake_case : float = 1.0 , ) -> float:
'''simple docstring'''
return mean(
function_to_integrate(uniform(snake_case , snake_case ) ) for _ in range(snake_case ) ) * (max_value - min_value)
def A_ ( snake_case : int , snake_case : float = 0.0 , snake_case : float = 1.0 ) -> None:
'''simple docstring'''
def identity_function(snake_case : float ) -> float:
return x
__UpperCamelCase = area_under_curve_estimator(
snake_case , snake_case , snake_case , snake_case )
__UpperCamelCase = (max_value * max_value - min_value * min_value) / 2
print('''******************''' )
print(f"Estimating area under y=x where x varies from {min_value} to {max_value}" )
print(f"Estimated value is {estimated_value}" )
print(f"Expected value is {expected_value}" )
print(f"Total error is {abs(estimated_value - expected_value )}" )
print('''******************''' )
def A_ ( snake_case : int ) -> None:
'''simple docstring'''
def function_to_integrate(snake_case : float ) -> float:
return sqrt(4.0 - x * x )
__UpperCamelCase = area_under_curve_estimator(
snake_case , snake_case , 0.0 , 2.0 )
print('''******************''' )
print('''Estimating pi using area_under_curve_estimator''' )
print(f"Estimated value is {estimated_value}" )
print(f"Expected value is {pi}" )
print(f"Total error is {abs(estimated_value - pi )}" )
print('''******************''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 451 |
from collections import Counter
from timeit import timeit
def A_ ( snake_case : str = "" , ) -> bool:
'''simple docstring'''
return sum(c % 2 for c in Counter(input_str.replace(''' ''' , '''''' ).lower() ).values() ) < 2
def A_ ( snake_case : str = "" ) -> bool:
'''simple docstring'''
if len(snake_case ) == 0:
return True
__UpperCamelCase = input_str.replace(''' ''' , '''''' ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
__UpperCamelCase = {}
for character in lower_case_input_str:
__UpperCamelCase = character_freq_dict.get(snake_case , 0 ) + 1
__UpperCamelCase = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def A_ ( snake_case : str = "" ) -> None:
'''simple docstring'''
print('''\nFor string = ''' , snake_case , ''':''' )
print(
'''> can_string_be_rearranged_as_palindrome_counter()''' , '''\tans =''' , can_string_be_rearranged_as_palindrome_counter(snake_case ) , '''\ttime =''' , timeit(
'''z.can_string_be_rearranged_as_palindrome_counter(z.check_str)''' , setup='''import __main__ as z''' , ) , '''seconds''' , )
print(
'''> can_string_be_rearranged_as_palindrome()''' , '''\tans =''' , can_string_be_rearranged_as_palindrome(snake_case ) , '''\ttime =''' , timeit(
'''z.can_string_be_rearranged_as_palindrome(z.check_str)''' , setup='''import __main__ as z''' , ) , '''seconds''' , )
if __name__ == "__main__":
lowercase__ : Tuple = input(
"Enter string to determine if it can be rearranged as a palindrome or not: "
).strip()
benchmark(check_str)
lowercase__ : Dict = can_string_be_rearranged_as_palindrome_counter(check_str)
print(F"{check_str} can {'' if status else 'not '}be rearranged as a palindrome")
| 451 | 1 |
import string
from math import logaa
def _lowerCAmelCase ( UpperCamelCase__: str , UpperCamelCase__: str ) -> int:
"""simple docstring"""
A = document.translate(
str.maketrans("""""" , """""" , string.punctuation ) ).replace("""\n""" , """""" )
A = document_without_punctuation.split(""" """ ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def _lowerCAmelCase ( UpperCamelCase__: str , UpperCamelCase__: str ) -> tuple[int, int]:
"""simple docstring"""
A = corpus.lower().translate(
str.maketrans("""""" , """""" , string.punctuation ) ) # strip all punctuation and replace it with ''
A = corpus_without_punctuation.split("""\n""" )
A = term.lower()
return (len([doc for doc in docs if term in doc] ), len(UpperCamelCase__ ))
def _lowerCAmelCase ( UpperCamelCase__: int , UpperCamelCase__: int , UpperCamelCase__: Optional[Any]=False ) -> float:
"""simple docstring"""
if smoothing:
if n == 0:
raise ValueError("""log10(0) is undefined.""" )
return round(1 + logaa(n / (1 + df) ) , 3 )
if df == 0:
raise ZeroDivisionError("""df must be > 0""" )
elif n == 0:
raise ValueError("""log10(0) is undefined.""" )
return round(logaa(n / df ) , 3 )
def _lowerCAmelCase ( UpperCamelCase__: int , UpperCamelCase__: int ) -> float:
"""simple docstring"""
return round(tf * idf , 3 )
| 641 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def _lowerCAmelCase ( UpperCamelCase__: Optional[Any] ) -> Optional[int]:
"""simple docstring"""
A = 3_84
if "tiny" in model_name:
A = [3, 3, 9, 3]
A = [96, 1_92, 3_84, 7_68]
if "small" in model_name:
A = [3, 3, 27, 3]
A = [96, 1_92, 3_84, 7_68]
if "base" in model_name:
A = [3, 3, 27, 3]
A = [1_28, 2_56, 5_12, 10_24]
A = 5_12
if "large" in model_name:
A = [3, 3, 27, 3]
A = [1_92, 3_84, 7_68, 15_36]
A = 7_68
if "xlarge" in model_name:
A = [3, 3, 27, 3]
A = [2_56, 5_12, 10_24, 20_48]
A = 10_24
# set label information
A = 1_50
A = """huggingface/label-files"""
A = """ade20k-id2label.json"""
A = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type="""dataset""" ) , """r""" ) )
A = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
A = {v: k for k, v in idalabel.items()}
A = ConvNextConfig(
depths=UpperCamelCase__ , hidden_sizes=UpperCamelCase__ , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] )
A = UperNetConfig(
backbone_config=UpperCamelCase__ , auxiliary_in_channels=UpperCamelCase__ , num_labels=UpperCamelCase__ , idalabel=UpperCamelCase__ , labelaid=UpperCamelCase__ , )
return config
def _lowerCAmelCase ( UpperCamelCase__: int ) -> Dict:
"""simple docstring"""
A = []
# fmt: off
# stem
rename_keys.append(("""backbone.downsample_layers.0.0.weight""", """backbone.embeddings.patch_embeddings.weight""") )
rename_keys.append(("""backbone.downsample_layers.0.0.bias""", """backbone.embeddings.patch_embeddings.bias""") )
rename_keys.append(("""backbone.downsample_layers.0.1.weight""", """backbone.embeddings.layernorm.weight""") )
rename_keys.append(("""backbone.downsample_layers.0.1.bias""", """backbone.embeddings.layernorm.bias""") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'backbone.stages.{i}.{j}.gamma', f'backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter') )
rename_keys.append((f'backbone.stages.{i}.{j}.depthwise_conv.weight', f'backbone.encoder.stages.{i}.layers.{j}.dwconv.weight') )
rename_keys.append((f'backbone.stages.{i}.{j}.depthwise_conv.bias', f'backbone.encoder.stages.{i}.layers.{j}.dwconv.bias') )
rename_keys.append((f'backbone.stages.{i}.{j}.norm.weight', f'backbone.encoder.stages.{i}.layers.{j}.layernorm.weight') )
rename_keys.append((f'backbone.stages.{i}.{j}.norm.bias', f'backbone.encoder.stages.{i}.layers.{j}.layernorm.bias') )
rename_keys.append((f'backbone.stages.{i}.{j}.pointwise_conv1.weight', f'backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight') )
rename_keys.append((f'backbone.stages.{i}.{j}.pointwise_conv1.bias', f'backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias') )
rename_keys.append((f'backbone.stages.{i}.{j}.pointwise_conv2.weight', f'backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight') )
rename_keys.append((f'backbone.stages.{i}.{j}.pointwise_conv2.bias', f'backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias') )
if i > 0:
rename_keys.append((f'backbone.downsample_layers.{i}.0.weight', f'backbone.encoder.stages.{i}.downsampling_layer.0.weight') )
rename_keys.append((f'backbone.downsample_layers.{i}.0.bias', f'backbone.encoder.stages.{i}.downsampling_layer.0.bias') )
rename_keys.append((f'backbone.downsample_layers.{i}.1.weight', f'backbone.encoder.stages.{i}.downsampling_layer.1.weight') )
rename_keys.append((f'backbone.downsample_layers.{i}.1.bias', f'backbone.encoder.stages.{i}.downsampling_layer.1.bias') )
rename_keys.append((f'backbone.norm{i}.weight', f'backbone.hidden_states_norms.stage{i+1}.weight') )
rename_keys.append((f'backbone.norm{i}.bias', f'backbone.hidden_states_norms.stage{i+1}.bias') )
# decode head
rename_keys.extend(
[
("""decode_head.conv_seg.weight""", """decode_head.classifier.weight"""),
("""decode_head.conv_seg.bias""", """decode_head.classifier.bias"""),
("""auxiliary_head.conv_seg.weight""", """auxiliary_head.classifier.weight"""),
("""auxiliary_head.conv_seg.bias""", """auxiliary_head.classifier.bias"""),
] )
# fmt: on
return rename_keys
def _lowerCAmelCase ( UpperCamelCase__: Tuple , UpperCamelCase__: List[str] , UpperCamelCase__: int ) -> Optional[Any]:
"""simple docstring"""
A = dct.pop(UpperCamelCase__ )
A = val
def _lowerCAmelCase ( UpperCamelCase__: Optional[int] , UpperCamelCase__: Any , UpperCamelCase__: int ) -> List[Any]:
"""simple docstring"""
A = {
"""upernet-convnext-tiny""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth""",
"""upernet-convnext-small""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth""",
"""upernet-convnext-base""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth""",
"""upernet-convnext-large""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth""",
"""upernet-convnext-xlarge""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth""",
}
A = model_name_to_url[model_name]
A = torch.hub.load_state_dict_from_url(UpperCamelCase__ , map_location="""cpu""" )["""state_dict"""]
A = get_upernet_config(UpperCamelCase__ )
A = UperNetForSemanticSegmentation(UpperCamelCase__ )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
A = state_dict.pop(UpperCamelCase__ )
if "bn" in key:
A = key.replace("""bn""" , """batch_norm""" )
A = val
# rename keys
A = create_rename_keys(UpperCamelCase__ )
for src, dest in rename_keys:
rename_key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
model.load_state_dict(UpperCamelCase__ )
# verify on image
A = """https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"""
A = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw ).convert("""RGB""" )
A = SegformerImageProcessor()
A = processor(UpperCamelCase__ , return_tensors="""pt""" ).pixel_values
with torch.no_grad():
A = model(UpperCamelCase__ )
if model_name == "upernet-convnext-tiny":
A = torch.tensor(
[[-8.81_10, -8.81_10, -8.65_21], [-8.81_10, -8.81_10, -8.65_21], [-8.77_46, -8.77_46, -8.61_30]] )
elif model_name == "upernet-convnext-small":
A = torch.tensor(
[[-8.82_36, -8.82_36, -8.67_71], [-8.82_36, -8.82_36, -8.67_71], [-8.76_38, -8.76_38, -8.62_40]] )
elif model_name == "upernet-convnext-base":
A = torch.tensor(
[[-8.85_58, -8.85_58, -8.69_05], [-8.85_58, -8.85_58, -8.69_05], [-8.76_69, -8.76_69, -8.60_21]] )
elif model_name == "upernet-convnext-large":
A = torch.tensor(
[[-8.66_60, -8.66_60, -8.62_10], [-8.66_60, -8.66_60, -8.62_10], [-8.63_10, -8.63_10, -8.59_64]] )
elif model_name == "upernet-convnext-xlarge":
A = torch.tensor(
[[-8.49_80, -8.49_80, -8.39_77], [-8.49_80, -8.49_80, -8.39_77], [-8.43_79, -8.43_79, -8.34_12]] )
print("""Logits:""" , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , UpperCamelCase__ , atol=1e-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(UpperCamelCase__ )
print(f'Saving processor to {pytorch_dump_folder_path}' )
processor.save_pretrained(UpperCamelCase__ )
if push_to_hub:
print(f'Pushing model and processor for {model_name} to hub' )
model.push_to_hub(f'openmmlab/{model_name}' )
processor.push_to_hub(f'openmmlab/{model_name}' )
if __name__ == "__main__":
_lowercase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="upernet-convnext-tiny",
type=str,
choices=[f'''upernet-convnext-{size}''' for size in ["tiny", "small", "base", "large", "xlarge"]],
help="Name of the ConvNext UperNet model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
_lowercase : str = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 641 | 1 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import (
DiffusionPipeline,
UnCLIPImageVariationPipeline,
UnCLIPScheduler,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, load_image, require_torch_gpu, skip_mps
from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _A ( __lowerCamelCase , unittest.TestCase ):
__a = UnCLIPImageVariationPipeline
__a = IMAGE_VARIATION_PARAMS - {'height', 'width', 'guidance_scale'}
__a = IMAGE_VARIATION_BATCH_PARAMS
__a = [
'generator',
'return_dict',
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
__a = False
@property
def _lowerCamelCase ( self ) -> Optional[Any]:
return 32
@property
def _lowerCamelCase ( self ) -> Tuple:
return 32
@property
def _lowerCamelCase ( self ) -> Optional[int]:
return self.time_input_dim
@property
def _lowerCamelCase ( self ) -> Dict:
return self.time_input_dim * 4
@property
def _lowerCamelCase ( self ) -> Optional[int]:
return 100
@property
def _lowerCamelCase ( self ) -> Union[str, Any]:
lowerCamelCase__ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
return tokenizer
@property
def _lowerCamelCase ( self ) -> Dict:
torch.manual_seed(0 )
lowerCamelCase__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(UpperCAmelCase_ )
@property
def _lowerCamelCase ( self ) -> List[Any]:
torch.manual_seed(0 )
lowerCamelCase__ = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , )
return CLIPVisionModelWithProjection(UpperCAmelCase_ )
@property
def _lowerCamelCase ( self ) -> Dict:
torch.manual_seed(0 )
lowerCamelCase__ = {
'clip_embeddings_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'cross_attention_dim': self.cross_attention_dim,
}
lowerCamelCase__ = UnCLIPTextProjModel(**UpperCAmelCase_ )
return model
@property
def _lowerCamelCase ( self ) -> Optional[Any]:
torch.manual_seed(0 )
lowerCamelCase__ = {
'sample_size': 32,
# RGB in channels
'in_channels': 3,
# Out channels is double in channels because predicts mean and variance
'out_channels': 6,
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': 'identity',
}
lowerCamelCase__ = UNetaDConditionModel(**UpperCAmelCase_ )
return model
@property
def _lowerCamelCase ( self ) -> str:
return {
"sample_size": 64,
"layers_per_block": 1,
"down_block_types": ("ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D"),
"up_block_types": ("ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D"),
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"in_channels": 6,
"out_channels": 3,
}
@property
def _lowerCamelCase ( self ) -> Optional[Any]:
torch.manual_seed(0 )
lowerCamelCase__ = UNetaDModel(**self.dummy_super_res_kwargs )
return model
@property
def _lowerCamelCase ( self ) -> Union[str, Any]:
# seeded differently to get different unet than `self.dummy_super_res_first`
torch.manual_seed(1 )
lowerCamelCase__ = UNetaDModel(**self.dummy_super_res_kwargs )
return model
def _lowerCamelCase ( self ) -> List[Any]:
lowerCamelCase__ = self.dummy_decoder
lowerCamelCase__ = self.dummy_text_proj
lowerCamelCase__ = self.dummy_text_encoder
lowerCamelCase__ = self.dummy_tokenizer
lowerCamelCase__ = self.dummy_super_res_first
lowerCamelCase__ = self.dummy_super_res_last
lowerCamelCase__ = UnCLIPScheduler(
variance_type="learned_range" , prediction_type="epsilon" , num_train_timesteps=1000 , )
lowerCamelCase__ = UnCLIPScheduler(
variance_type="fixed_small_log" , prediction_type="epsilon" , num_train_timesteps=1000 , )
lowerCamelCase__ = CLIPImageProcessor(crop_size=32 , size=32 )
lowerCamelCase__ = self.dummy_image_encoder
return {
"decoder": decoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_proj": text_proj,
"feature_extractor": feature_extractor,
"image_encoder": image_encoder,
"super_res_first": super_res_first,
"super_res_last": super_res_last,
"decoder_scheduler": decoder_scheduler,
"super_res_scheduler": super_res_scheduler,
}
def _lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=0 , SCREAMING_SNAKE_CASE__=True ) -> List[Any]:
lowerCamelCase__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ )
if str(UpperCAmelCase_ ).startswith("mps" ):
lowerCamelCase__ = torch.manual_seed(UpperCAmelCase_ )
else:
lowerCamelCase__ = torch.Generator(device=UpperCAmelCase_ ).manual_seed(UpperCAmelCase_ )
if pil_image:
lowerCamelCase__ = input_image * 0.5 + 0.5
lowerCamelCase__ = input_image.clamp(0 , 1 )
lowerCamelCase__ = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
lowerCamelCase__ = DiffusionPipeline.numpy_to_pil(UpperCAmelCase_ )[0]
return {
"image": input_image,
"generator": generator,
"decoder_num_inference_steps": 2,
"super_res_num_inference_steps": 2,
"output_type": "np",
}
def _lowerCamelCase ( self ) -> Tuple:
lowerCamelCase__ = 'cpu'
lowerCamelCase__ = self.get_dummy_components()
lowerCamelCase__ = self.pipeline_class(**UpperCAmelCase_ )
lowerCamelCase__ = pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
lowerCamelCase__ = self.get_dummy_inputs(UpperCAmelCase_ , pil_image=UpperCAmelCase_ )
lowerCamelCase__ = pipe(**UpperCAmelCase_ )
lowerCamelCase__ = output.images
lowerCamelCase__ = self.get_dummy_inputs(UpperCAmelCase_ , pil_image=UpperCAmelCase_ )
lowerCamelCase__ = pipe(
**UpperCAmelCase_ , return_dict=UpperCAmelCase_ , )[0]
lowerCamelCase__ = image[0, -3:, -3:, -1]
lowerCamelCase__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCamelCase__ = np.array(
[
0.99_97,
0.00_02,
0.99_97,
0.99_97,
0.99_69,
0.00_23,
0.99_97,
0.99_69,
0.99_70,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def _lowerCamelCase ( self ) -> List[Any]:
lowerCamelCase__ = 'cpu'
lowerCamelCase__ = self.get_dummy_components()
lowerCamelCase__ = self.pipeline_class(**UpperCAmelCase_ )
lowerCamelCase__ = pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
lowerCamelCase__ = self.get_dummy_inputs(UpperCAmelCase_ , pil_image=UpperCAmelCase_ )
lowerCamelCase__ = pipe(**UpperCAmelCase_ )
lowerCamelCase__ = output.images
lowerCamelCase__ = self.get_dummy_inputs(UpperCAmelCase_ , pil_image=UpperCAmelCase_ )
lowerCamelCase__ = pipe(
**UpperCAmelCase_ , return_dict=UpperCAmelCase_ , )[0]
lowerCamelCase__ = image[0, -3:, -3:, -1]
lowerCamelCase__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCamelCase__ = np.array([0.99_97, 0.00_03, 0.99_97, 0.99_97, 0.99_70, 0.00_24, 0.99_97, 0.99_71, 0.99_71] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def _lowerCamelCase ( self ) -> Optional[Any]:
lowerCamelCase__ = 'cpu'
lowerCamelCase__ = self.get_dummy_components()
lowerCamelCase__ = self.pipeline_class(**UpperCAmelCase_ )
lowerCamelCase__ = pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
lowerCamelCase__ = self.get_dummy_inputs(UpperCAmelCase_ , pil_image=UpperCAmelCase_ )
lowerCamelCase__ = [
pipeline_inputs['image'],
pipeline_inputs['image'],
]
lowerCamelCase__ = pipe(**UpperCAmelCase_ )
lowerCamelCase__ = output.images
lowerCamelCase__ = self.get_dummy_inputs(UpperCAmelCase_ , pil_image=UpperCAmelCase_ )
lowerCamelCase__ = [
tuple_pipeline_inputs['image'],
tuple_pipeline_inputs['image'],
]
lowerCamelCase__ = pipe(
**UpperCAmelCase_ , return_dict=UpperCAmelCase_ , )[0]
lowerCamelCase__ = image[0, -3:, -3:, -1]
lowerCamelCase__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (2, 64, 64, 3)
lowerCamelCase__ = np.array(
[
0.99_97,
0.99_89,
0.00_08,
0.00_21,
0.99_60,
0.00_18,
0.00_14,
0.00_02,
0.99_33,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def _lowerCamelCase ( self ) -> Union[str, Any]:
lowerCamelCase__ = torch.device("cpu" )
class _A :
__a = 1
lowerCamelCase__ = self.get_dummy_components()
lowerCamelCase__ = self.pipeline_class(**UpperCAmelCase_ )
lowerCamelCase__ = pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
lowerCamelCase__ = torch.Generator(device=UpperCAmelCase_ ).manual_seed(0 )
lowerCamelCase__ = pipe.decoder.dtype
lowerCamelCase__ = 1
lowerCamelCase__ = (
batch_size,
pipe.decoder.config.in_channels,
pipe.decoder.config.sample_size,
pipe.decoder.config.sample_size,
)
lowerCamelCase__ = pipe.prepare_latents(
UpperCAmelCase_ , dtype=UpperCAmelCase_ , device=UpperCAmelCase_ , generator=UpperCAmelCase_ , latents=UpperCAmelCase_ , scheduler=DummyScheduler() )
lowerCamelCase__ = (
batch_size,
pipe.super_res_first.config.in_channels // 2,
pipe.super_res_first.config.sample_size,
pipe.super_res_first.config.sample_size,
)
lowerCamelCase__ = pipe.prepare_latents(
UpperCAmelCase_ , dtype=UpperCAmelCase_ , device=UpperCAmelCase_ , generator=UpperCAmelCase_ , latents=UpperCAmelCase_ , scheduler=DummyScheduler() )
lowerCamelCase__ = self.get_dummy_inputs(UpperCAmelCase_ , pil_image=UpperCAmelCase_ )
lowerCamelCase__ = pipe(
**UpperCAmelCase_ , decoder_latents=UpperCAmelCase_ , super_res_latents=UpperCAmelCase_ ).images
lowerCamelCase__ = self.get_dummy_inputs(UpperCAmelCase_ , pil_image=UpperCAmelCase_ )
# Don't pass image, instead pass embedding
lowerCamelCase__ = pipeline_inputs.pop("image" )
lowerCamelCase__ = pipe.image_encoder(UpperCAmelCase_ ).image_embeds
lowerCamelCase__ = pipe(
**UpperCAmelCase_ , decoder_latents=UpperCAmelCase_ , super_res_latents=UpperCAmelCase_ , image_embeddings=UpperCAmelCase_ , ).images
# make sure passing text embeddings manually is identical
assert np.abs(img_out_a - img_out_a ).max() < 1e-4
@skip_mps
def _lowerCamelCase ( self ) -> List[str]:
lowerCamelCase__ = torch_device == 'cpu'
# Check is relaxed because there is not a torch 2.0 sliced attention added kv processor
lowerCamelCase__ = 1e-2
self._test_attention_slicing_forward_pass(
test_max_difference=UpperCAmelCase_ , expected_max_diff=UpperCAmelCase_ )
@skip_mps
def _lowerCamelCase ( self ) -> Optional[int]:
lowerCamelCase__ = torch_device == 'cpu'
lowerCamelCase__ = True
lowerCamelCase__ = [
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
self._test_inference_batch_single_identical(
test_max_difference=UpperCAmelCase_ , relax_max_difference=UpperCAmelCase_ , additional_params_copy_to_batched_inputs=UpperCAmelCase_ , )
def _lowerCamelCase ( self ) -> Optional[Any]:
lowerCamelCase__ = [
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
if torch_device == "mps":
# TODO: MPS errors with larger batch sizes
lowerCamelCase__ = [2, 3]
self._test_inference_batch_consistent(
batch_sizes=UpperCAmelCase_ , additional_params_copy_to_batched_inputs=UpperCAmelCase_ , )
else:
self._test_inference_batch_consistent(
additional_params_copy_to_batched_inputs=UpperCAmelCase_ )
@skip_mps
def _lowerCamelCase ( self ) -> List[str]:
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def _lowerCamelCase ( self ) -> Any:
return super().test_save_load_local()
@skip_mps
def _lowerCamelCase ( self ) -> Optional[Any]:
return super().test_save_load_optional_components()
@slow
@require_torch_gpu
class _A ( unittest.TestCase ):
def _lowerCamelCase ( self ) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self ) -> Optional[Any]:
lowerCamelCase__ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unclip/cat.png" )
lowerCamelCase__ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/unclip/karlo_v1_alpha_cat_variation_fp16.npy" )
lowerCamelCase__ = UnCLIPImageVariationPipeline.from_pretrained(
"kakaobrain/karlo-v1-alpha-image-variations" , torch_dtype=torch.floataa )
lowerCamelCase__ = pipeline.to(UpperCAmelCase_ )
pipeline.set_progress_bar_config(disable=UpperCAmelCase_ )
lowerCamelCase__ = torch.Generator(device="cpu" ).manual_seed(0 )
lowerCamelCase__ = pipeline(
UpperCAmelCase_ , generator=UpperCAmelCase_ , output_type="np" , )
lowerCamelCase__ = output.images[0]
assert image.shape == (256, 256, 3)
assert_mean_pixel_difference(UpperCAmelCase_ , UpperCAmelCase_ , 15 )
| 706 |
"""simple docstring"""
def UpperCAmelCase__ ( A__ ) -> list[int]:
"""simple docstring"""
if length <= 0 or not isinstance(A__ , A__ ):
raise ValueError("Length must be a positive integer." )
return [n * (2 * n - 1) for n in range(A__ )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 274 | 0 |
'''simple docstring'''
UpperCAmelCase__ : str = {"a": ["c", "b"], "b": ["d", "e"], "c": [], "d": [], "e": []}
UpperCAmelCase__ : Any = ["a", "b", "c", "d", "e"]
def A ( UpperCamelCase_ : List[Any] , UpperCamelCase_ : str , UpperCamelCase_ : Any ) -> Dict:
'''simple docstring'''
lowerCAmelCase__ = start
# add current to visited
visited.append(UpperCamelCase_ )
lowerCAmelCase__ = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
lowerCAmelCase__ = topological_sort(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# if all neighbors visited add current to sort
sort.append(UpperCamelCase_ )
# if all vertices haven't been visited select a new one to visit
if len(UpperCamelCase_ ) != len(UpperCamelCase_ ):
for vertice in vertices:
if vertice not in visited:
lowerCAmelCase__ = topological_sort(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# return sort
return sort
if __name__ == "__main__":
UpperCAmelCase__ : Dict = topological_sort("a", [], [])
print(sort)
| 48 |
import sys
def __magic_name__ ( __lowerCAmelCase : str ) -> Union[str, Any]:
__lowerCamelCase = len(__lowerCAmelCase )
__lowerCamelCase = [[0 for x in range(__lowerCAmelCase )] for x in range(__lowerCAmelCase )]
__lowerCamelCase = [[0 for x in range(__lowerCAmelCase )] for x in range(__lowerCAmelCase )]
for chain_length in range(2 , __lowerCAmelCase ):
for a in range(1 , n - chain_length + 1 ):
__lowerCamelCase = a + chain_length - 1
__lowerCamelCase = sys.maxsize
for c in range(__lowerCAmelCase , __lowerCAmelCase ):
__lowerCamelCase = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
__lowerCamelCase = cost
__lowerCamelCase = c
return matrix, sol
def __magic_name__ ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[int] ) -> List[str]:
if i == j:
print('''A''' + str(__lowerCAmelCase ) , end=''' ''' )
else:
print('''(''' , end=''' ''' )
print_optiomal_solution(__lowerCAmelCase , __lowerCAmelCase , optimal_solution[i][j] )
print_optiomal_solution(__lowerCAmelCase , optimal_solution[i][j] + 1 , __lowerCAmelCase )
print(''')''' , end=''' ''' )
def __magic_name__ ( ) -> Optional[Any]:
__lowerCamelCase = [30, 35, 15, 5, 10, 20, 25]
__lowerCamelCase = len(__lowerCAmelCase )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
__lowerCamelCase , __lowerCamelCase = matrix_chain_order(__lowerCAmelCase )
print('''No. of Operation required: ''' + str(matrix[1][n - 1] ) )
print_optiomal_solution(__lowerCAmelCase , 1 , n - 1 )
if __name__ == "__main__":
main()
| 298 | 0 |
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class _UpperCamelCase ( lowerCAmelCase ):
UpperCAmelCase_ = (DPMSolverSDEScheduler,)
UpperCAmelCase_ = 10
def UpperCAmelCase_ ( self :Union[str, Any] , **lowerCamelCase :List[str] ) -> List[Any]:
UpperCAmelCase__ = {
"num_train_timesteps": 1100,
"beta_start": 0.00_01,
"beta_end": 0.02,
"beta_schedule": "linear",
"noise_sampler_seed": 0,
}
config.update(**lowerCamelCase )
return config
def UpperCAmelCase_ ( self :str ) -> str:
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=lowerCamelCase )
def UpperCAmelCase_ ( self :Any ) -> Optional[int]:
for beta_start, beta_end in zip([0.0_00_01, 0.00_01, 0.0_01] , [0.00_02, 0.0_02, 0.02] ):
self.check_over_configs(beta_start=lowerCamelCase , beta_end=lowerCamelCase )
def UpperCAmelCase_ ( self :Optional[int] ) -> List[Any]:
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=lowerCamelCase )
def UpperCAmelCase_ ( self :str ) -> int:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCamelCase )
def UpperCAmelCase_ ( self :int ) -> Tuple:
UpperCAmelCase__ = self.scheduler_classes[0]
UpperCAmelCase__ = self.get_scheduler_config()
UpperCAmelCase__ = scheduler_class(**lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps )
UpperCAmelCase__ = self.dummy_model()
UpperCAmelCase__ = self.dummy_sample_deter * scheduler.init_noise_sigma
UpperCAmelCase__ = sample.to(lowerCamelCase )
for i, t in enumerate(scheduler.timesteps ):
UpperCAmelCase__ = scheduler.scale_model_input(lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = model(lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = output.prev_sample
UpperCAmelCase__ = torch.sum(torch.abs(lowerCamelCase ) )
UpperCAmelCase__ = torch.mean(torch.abs(lowerCamelCase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_67.47_82_10_44_92_18_75 ) < 1e-2
assert abs(result_mean.item() - 0.21_78_70_59_64_56_52_77 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_71.59_35_21_11_81_64_06 ) < 1e-2
assert abs(result_mean.item() - 0.2_23_42_90_68_92_29_96_52 ) < 1e-3
else:
assert abs(result_sum.item() - 1_62.52_38_34_22_85_15_62 ) < 1e-2
assert abs(result_mean.item() - 0.2_11_61_95_70_85_13_26 ) < 1e-3
def UpperCAmelCase_ ( self :List[str] ) -> Optional[Any]:
UpperCAmelCase__ = self.scheduler_classes[0]
UpperCAmelCase__ = self.get_scheduler_config(prediction_type="v_prediction" )
UpperCAmelCase__ = scheduler_class(**lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps )
UpperCAmelCase__ = self.dummy_model()
UpperCAmelCase__ = self.dummy_sample_deter * scheduler.init_noise_sigma
UpperCAmelCase__ = sample.to(lowerCamelCase )
for i, t in enumerate(scheduler.timesteps ):
UpperCAmelCase__ = scheduler.scale_model_input(lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = model(lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = output.prev_sample
UpperCAmelCase__ = torch.sum(torch.abs(lowerCamelCase ) )
UpperCAmelCase__ = torch.mean(torch.abs(lowerCamelCase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_24.77_14_92_00_43_94_53 ) < 1e-2
assert abs(result_mean.item() - 0.1_62_26_28_90_14_81_62_84 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_28.1_66_33_60_59_57_03 ) < 1e-2
assert abs(result_mean.item() - 0.1_66_88_32_60_01_16_72_97 ) < 1e-3
else:
assert abs(result_sum.item() - 1_19.8_48_75_48_82_81_25 ) < 1e-2
assert abs(result_mean.item() - 0.15_60_53_06_62_53_66_21 ) < 1e-3
def UpperCAmelCase_ ( self :List[str] ) -> List[str]:
UpperCAmelCase__ = self.scheduler_classes[0]
UpperCAmelCase__ = self.get_scheduler_config()
UpperCAmelCase__ = scheduler_class(**lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps , device=lowerCamelCase )
UpperCAmelCase__ = self.dummy_model()
UpperCAmelCase__ = self.dummy_sample_deter.to(lowerCamelCase ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
UpperCAmelCase__ = scheduler.scale_model_input(lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = model(lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = output.prev_sample
UpperCAmelCase__ = torch.sum(torch.abs(lowerCamelCase ) )
UpperCAmelCase__ = torch.mean(torch.abs(lowerCamelCase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_67.46_95_73_97_46_09_38 ) < 1e-2
assert abs(result_mean.item() - 0.2_18_05_93_46_07_98_26_35 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_71.59_35_36_37_69_53_12 ) < 1e-2
assert abs(result_mean.item() - 0.2_23_42_90_83_82_41_57_71 ) < 1e-3
else:
assert abs(result_sum.item() - 1_62.52_38_34_22_85_15_62 ) < 1e-2
assert abs(result_mean.item() - 0.2_11_61_95_70_85_13_26 ) < 1e-3
def UpperCAmelCase_ ( self :Optional[int] ) -> Any:
UpperCAmelCase__ = self.scheduler_classes[0]
UpperCAmelCase__ = self.get_scheduler_config()
UpperCAmelCase__ = scheduler_class(**lowerCamelCase , use_karras_sigmas=lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps , device=lowerCamelCase )
UpperCAmelCase__ = self.dummy_model()
UpperCAmelCase__ = self.dummy_sample_deter.to(lowerCamelCase ) * scheduler.init_noise_sigma
UpperCAmelCase__ = sample.to(lowerCamelCase )
for t in scheduler.timesteps:
UpperCAmelCase__ = scheduler.scale_model_input(lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = model(lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = output.prev_sample
UpperCAmelCase__ = torch.sum(torch.abs(lowerCamelCase ) )
UpperCAmelCase__ = torch.mean(torch.abs(lowerCamelCase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_76.66_97_41_35_74_21_88 ) < 1e-2
assert abs(result_mean.item() - 0.2_30_03_87_27_30_98_18_11 ) < 1e-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_77.63_65_35_64_45_31_25 ) < 1e-2
assert abs(result_mean.item() - 0.2_30_03_87_27_30_98_18_11 ) < 1e-2
else:
assert abs(result_sum.item() - 1_70.3_13_52_23_38_86_72 ) < 1e-2
assert abs(result_mean.item() - 0.2_30_03_87_27_30_98_18_11 ) < 1e-2
| 364 |
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class _UpperCamelCase ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
UpperCAmelCase_ = IFInpaintingSuperResolutionPipeline
UpperCAmelCase_ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""width""", """height"""}
UpperCAmelCase_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"""original_image"""} )
UpperCAmelCase_ = PipelineTesterMixin.required_optional_params - {"""latents"""}
def UpperCAmelCase_ ( self :str ) -> Optional[Any]:
return self._get_superresolution_dummy_components()
def UpperCAmelCase_ ( self :List[str] , lowerCamelCase :Dict , lowerCamelCase :Optional[Any]=0 ) -> int:
if str(lowerCamelCase ).startswith("mps" ):
UpperCAmelCase__ = torch.manual_seed(lowerCamelCase )
else:
UpperCAmelCase__ = torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase )
UpperCAmelCase__ = floats_tensor((1, 3, 16, 16) , rng=random.Random(lowerCamelCase ) ).to(lowerCamelCase )
UpperCAmelCase__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCamelCase ) ).to(lowerCamelCase )
UpperCAmelCase__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCamelCase ) ).to(lowerCamelCase )
UpperCAmelCase__ = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"original_image": original_image,
"mask_image": mask_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def UpperCAmelCase_ ( self :Union[str, Any] ) -> Dict:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def UpperCAmelCase_ ( self :List[str] ) -> Dict:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def UpperCAmelCase_ ( self :Tuple ) -> Dict:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def UpperCAmelCase_ ( self :Dict ) -> str:
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def UpperCAmelCase_ ( self :Dict ) -> str:
self._test_save_load_local()
def UpperCAmelCase_ ( self :List[str] ) -> List[str]:
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 364 | 1 |
import numpy as np
def UpperCAmelCase_ ( __UpperCAmelCase : np.array ) -> np.array:
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod() | 31 |
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class A ( unittest.TestCase ):
def __init__( self: Optional[int] , _lowerCAmelCase: Tuple , _lowerCAmelCase: Optional[Any]=13 , _lowerCAmelCase: Optional[int]=7 , _lowerCAmelCase: Any=True , _lowerCAmelCase: List[Any]=True , _lowerCAmelCase: List[str]=True , _lowerCAmelCase: str=True , _lowerCAmelCase: Optional[int]=99 , _lowerCAmelCase: Any=32 , _lowerCAmelCase: Any=5 , _lowerCAmelCase: Tuple=4 , _lowerCAmelCase: Union[str, Any]=37 , _lowerCAmelCase: List[str]="gelu" , _lowerCAmelCase: Dict=0.1 , _lowerCAmelCase: Tuple=0.1 , _lowerCAmelCase: int=512 , _lowerCAmelCase: Tuple=16 , _lowerCAmelCase: Tuple=2 , _lowerCAmelCase: str=0.02 , _lowerCAmelCase: Optional[Any]=4 , ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ =parent
UpperCAmelCase_ =batch_size
UpperCAmelCase_ =seq_length
UpperCAmelCase_ =is_training
UpperCAmelCase_ =use_attention_mask
UpperCAmelCase_ =use_token_type_ids
UpperCAmelCase_ =use_labels
UpperCAmelCase_ =vocab_size
UpperCAmelCase_ =hidden_size
UpperCAmelCase_ =num_hidden_layers
UpperCAmelCase_ =num_attention_heads
UpperCAmelCase_ =intermediate_size
UpperCAmelCase_ =hidden_act
UpperCAmelCase_ =hidden_dropout_prob
UpperCAmelCase_ =attention_probs_dropout_prob
UpperCAmelCase_ =max_position_embeddings
UpperCAmelCase_ =type_vocab_size
UpperCAmelCase_ =type_sequence_label_size
UpperCAmelCase_ =initializer_range
UpperCAmelCase_ =num_choices
def lowerCAmelCase__ ( self: Dict ) -> Any:
'''simple docstring'''
UpperCAmelCase_ =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ =None
if self.use_attention_mask:
UpperCAmelCase_ =random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ =None
if self.use_token_type_ids:
UpperCAmelCase_ =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase_ =RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCAmelCase__ ( self: str ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ =self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ =config_and_inputs
UpperCAmelCase_ ={"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def lowerCAmelCase__ ( self: Tuple ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ =config_and_inputs
UpperCAmelCase_ =True
UpperCAmelCase_ =floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCAmelCase_ =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class A ( __lowercase , unittest.TestCase ):
_snake_case =True
_snake_case =(
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCAmelCase__ ( self: Dict ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ =FlaxRobertaModelTester(self )
@slow
def lowerCAmelCase__ ( self: Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
UpperCAmelCase_ =model_class_name.from_pretrained("roberta-base" , from_pt=_lowerCAmelCase )
UpperCAmelCase_ =model(np.ones((1, 1) ) )
self.assertIsNotNone(_lowerCAmelCase )
| 54 | 0 |
def SCREAMING_SNAKE_CASE ( snake_case_ : int ):
if a < 0:
raise ValueError("Input value must be a positive integer" )
elif isinstance(snake_case_ , snake_case_ ):
raise TypeError("Input value must be a 'int' type" )
return bin(snake_case_ ).count("1" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 25 |
from __future__ import annotations
import time
__lowerCamelCase : str = list[tuple[int, int]]
__lowerCamelCase : Optional[int] = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__lowerCamelCase : Tuple = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : Union[str, Any] , __A : int , __A : int , __A : int , __A : int , __A : Node | None ):
snake_case__ : Optional[int] = pos_x
snake_case__ : Dict = pos_y
snake_case__ : int = (pos_y, pos_x)
snake_case__ : Optional[int] = goal_x
snake_case__ : Tuple = goal_y
snake_case__ : str = parent
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : List[Any] , __A : tuple[int, int] , __A : tuple[int, int] ):
snake_case__ : Tuple = Node(start[1] , start[0] , goal[1] , goal[0] , __A )
snake_case__ : Tuple = Node(goal[1] , goal[0] , goal[1] , goal[0] , __A )
snake_case__ : int = [self.start]
snake_case__ : Union[str, Any] = False
def _lowercase ( self : Dict ):
while self.node_queue:
snake_case__ : Optional[Any] = self.node_queue.pop(0 )
if current_node.pos == self.target.pos:
snake_case__ : Optional[Any] = True
return self.retrace_path(__A )
snake_case__ : int = self.get_successors(__A )
for node in successors:
self.node_queue.append(__A )
if not self.reached:
return [self.start.pos]
return None
def _lowercase ( self : Union[str, Any] , __A : Node ):
snake_case__ : str = []
for action in delta:
snake_case__ : str = parent.pos_x + action[1]
snake_case__ : Union[str, Any] = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(__A ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(__A , __A , self.target.pos_y , self.target.pos_x , __A ) )
return successors
def _lowercase ( self : Optional[Any] , __A : Node | None ):
snake_case__ : Tuple = node
snake_case__ : Any = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
snake_case__ : Tuple = current_node.parent
path.reverse()
return path
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : Dict , __A : str , __A : int ):
snake_case__ : str = BreadthFirstSearch(__A , __A )
snake_case__ : int = BreadthFirstSearch(__A , __A )
snake_case__ : Tuple = False
def _lowercase ( self : Optional[Any] ):
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
snake_case__ : Any = self.fwd_bfs.node_queue.pop(0 )
snake_case__ : List[str] = self.bwd_bfs.node_queue.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
snake_case__ : List[str] = True
return self.retrace_bidirectional_path(
__A , __A )
snake_case__ : Union[str, Any] = current_bwd_node
snake_case__ : Dict = current_fwd_node
snake_case__ : List[Any] = {
self.fwd_bfs: self.fwd_bfs.get_successors(__A ),
self.bwd_bfs: self.bwd_bfs.get_successors(__A ),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(__A )
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def _lowercase ( self : Any , __A : Node , __A : Node ):
snake_case__ : List[str] = self.fwd_bfs.retrace_path(__A )
snake_case__ : Optional[Any] = self.bwd_bfs.retrace_path(__A )
bwd_path.pop()
bwd_path.reverse()
snake_case__ : List[Any] = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
__lowerCamelCase : str = (0, 0)
__lowerCamelCase : List[str] = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
__lowerCamelCase : Any = time.time()
__lowerCamelCase : Optional[Any] = BreadthFirstSearch(init, goal)
__lowerCamelCase : str = bfs.search()
__lowerCamelCase : Optional[Any] = time.time() - start_bfs_time
print("""Unidirectional BFS computation time : """, bfs_time)
__lowerCamelCase : Optional[Any] = time.time()
__lowerCamelCase : Optional[int] = BidirectionalBreadthFirstSearch(init, goal)
__lowerCamelCase : str = bd_bfs.search()
__lowerCamelCase : Optional[Any] = time.time() - start_bd_bfs_time
print("""Bidirectional BFS computation time : """, bd_bfs_time)
| 25 | 1 |
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
SCREAMING_SNAKE_CASE__ = logging.getLogger()
def UpperCAmelCase__ ( lowerCamelCase_ : Path , lowerCamelCase_ : list ):
__a : Tuple = '\n'.join(lowerCamelCase_ )
Path(lowerCamelCase_ ).open('w' ).writelines(lowerCamelCase_ )
SCREAMING_SNAKE_CASE__ = '''patrickvonplaten/t5-tiny-random'''
SCREAMING_SNAKE_CASE__ = '''sshleifer/bart-tiny-random'''
SCREAMING_SNAKE_CASE__ = '''sshleifer/tiny-mbart'''
SCREAMING_SNAKE_CASE__ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class _UpperCamelCase( __lowerCamelCase ):
def __lowerCAmelCase ( self : str , SCREAMING_SNAKE_CASE__ : Optional[int] ):
'''simple docstring'''
__a : Dict = Path(self.get_auto_remove_tmp_dir() ) / 'utest_input.source'
__a : str = input_file_name.parent / 'utest_output.txt'
assert not output_file_name.exists()
__a : Optional[Any] = [' New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County.']
_dump_articles(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__a : List[str] = str(Path(self.get_auto_remove_tmp_dir() ) / 'scores.json' )
__a : Any = 'translation_en_to_de' if model == T5_TINY else 'summarization'
__a : Union[str, Any] = f'''
run_eval_search.py
{model}
{input_file_name}
{output_file_name}
--score_path {score_path}
--task {task}
--num_beams 2
--length_penalty 2.0
'''.split()
with patch.object(SCREAMING_SNAKE_CASE__ , 'argv' , SCREAMING_SNAKE_CASE__ ):
run_generate()
assert Path(SCREAMING_SNAKE_CASE__ ).exists()
# os.remove(Path(output_file_name))
def __lowerCAmelCase ( self : str ):
'''simple docstring'''
self.run_eval_tester(SCREAMING_SNAKE_CASE__ )
@parameterized.expand([BART_TINY, MBART_TINY] )
@slow
def __lowerCAmelCase ( self : Any , SCREAMING_SNAKE_CASE__ : List[Any] ):
'''simple docstring'''
self.run_eval_tester(SCREAMING_SNAKE_CASE__ )
@parameterized.expand([T5_TINY, MBART_TINY] )
@slow
def __lowerCAmelCase ( self : int , SCREAMING_SNAKE_CASE__ : Dict ):
'''simple docstring'''
__a : str = Path(self.get_auto_remove_tmp_dir() ) / 'utest_input.source'
__a : Any = input_file_name.parent / 'utest_output.txt'
assert not output_file_name.exists()
__a : Dict = {
'en': ['Machine learning is great, isn\'t it?', 'I like to eat bananas', 'Tomorrow is another great day!'],
'de': [
'Maschinelles Lernen ist großartig, oder?',
'Ich esse gerne Bananen',
'Morgen ist wieder ein toller Tag!',
],
}
__a : Dict = Path(self.get_auto_remove_tmp_dir() )
__a : Tuple = str(tmp_dir / 'scores.json' )
__a : List[str] = str(tmp_dir / 'val.target' )
_dump_articles(SCREAMING_SNAKE_CASE__ , text['en'] )
_dump_articles(SCREAMING_SNAKE_CASE__ , text['de'] )
__a : Dict = 'translation_en_to_de' if model == T5_TINY else 'summarization'
__a : Any = f'''
run_eval_search.py
{model}
{str(SCREAMING_SNAKE_CASE__ )}
{str(SCREAMING_SNAKE_CASE__ )}
--score_path {score_path}
--reference_path {reference_path}
--task {task}
'''.split()
testargs.extend(['--search', 'num_beams=1:2 length_penalty=0.9:1.0'] )
with patch.object(SCREAMING_SNAKE_CASE__ , 'argv' , SCREAMING_SNAKE_CASE__ ):
with CaptureStdout() as cs:
run_search()
__a : List[str] = [' num_beams | length_penalty', model, 'Best score args']
__a : Optional[Any] = ['Info']
if "translation" in task:
expected_strings.append('bleu' )
else:
expected_strings.extend(SCREAMING_SNAKE_CASE__ )
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(SCREAMING_SNAKE_CASE__ ).exists()
os.remove(Path(SCREAMING_SNAKE_CASE__ ) )
| 47 |
def SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case , snake_case ) -> int:
# Return True if there is node that has not iterated.
__lowercase = [False] * len(snake_case )
__lowercase = []
queue.append(snake_case )
__lowercase = True
while queue:
__lowercase = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(snake_case )
__lowercase = True
__lowercase = u
return visited[t]
def SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case ) -> Dict:
# This array is filled by BFS and to store path
__lowercase = [-1] * (len(snake_case ))
__lowercase = 0
while bfs(snake_case , snake_case , snake_case , snake_case ):
__lowercase = float('Inf' )
__lowercase = sink
while s != source:
# Find the minimum value in select path
__lowercase = min(snake_case , graph[parent[s]][s] )
__lowercase = parent[s]
max_flow += path_flow
__lowercase = sink
while v != source:
__lowercase = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
__lowercase = parent[v]
return max_flow
SCREAMING_SNAKE_CASE_ : Any = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ : Optional[Any] = 0, 5
print(ford_fulkerson(graph, source, sink))
| 375 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowerCAmelCase : int = {
"configuration_chinese_clip": [
"CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ChineseCLIPConfig",
"ChineseCLIPOnnxConfig",
"ChineseCLIPTextConfig",
"ChineseCLIPVisionConfig",
],
"processing_chinese_clip": ["ChineseCLIPProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : List[str] = ["ChineseCLIPFeatureExtractor"]
_lowerCAmelCase : Optional[int] = ["ChineseCLIPImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Optional[Any] = [
"CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ChineseCLIPModel",
"ChineseCLIPPreTrainedModel",
"ChineseCLIPTextModel",
"ChineseCLIPVisionModel",
]
if TYPE_CHECKING:
from .configuration_chinese_clip import (
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
ChineseCLIPConfig,
ChineseCLIPOnnxConfig,
ChineseCLIPTextConfig,
ChineseCLIPVisionConfig,
)
from .processing_chinese_clip import ChineseCLIPProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_chinese_clip import (
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
ChineseCLIPModel,
ChineseCLIPPreTrainedModel,
ChineseCLIPTextModel,
ChineseCLIPVisionModel,
)
else:
import sys
_lowerCAmelCase : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 710 |
import heapq
import sys
import numpy as np
_lowerCAmelCase : str = tuple[int, int]
class __snake_case :
def __init__( self ):
"""simple docstring"""
lowerCAmelCase__ = []
lowerCAmelCase__ = set()
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
if not self.empty():
return self.elements[0][0]
else:
return float('inf' )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
return len(self.elements ) == 0
def SCREAMING_SNAKE_CASE_ ( self ,a_ ,a_ ):
"""simple docstring"""
if item not in self.set:
heapq.heappush(self.elements ,(priority, item) )
self.set.add(a_ )
else:
# update
# print("update", item)
lowerCAmelCase__ = []
((lowerCAmelCase__) , (lowerCAmelCase__)) = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((lowerCAmelCase__) , (lowerCAmelCase__)) = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements ,(pro, xxx) )
def SCREAMING_SNAKE_CASE_ ( self ,a_ ):
"""simple docstring"""
if item in self.set:
self.set.remove(a_ )
lowerCAmelCase__ = []
((lowerCAmelCase__) , (lowerCAmelCase__)) = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((lowerCAmelCase__) , (lowerCAmelCase__)) = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements ,(prito, yyy) )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
return self.elements[0][1]
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
((lowerCAmelCase__) , (lowerCAmelCase__)) = heapq.heappop(self.elements )
self.set.remove(a_ )
return (priority, item)
def UpperCAmelCase_ ( snake_case__ , snake_case__ ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ = np.array(snake_case__ )
lowerCAmelCase__ = np.array(snake_case__ )
return np.linalg.norm(a - b )
def UpperCAmelCase_ ( snake_case__ , snake_case__ ) -> Any:
"""simple docstring"""
return consistent_heuristic(snake_case__ , snake_case__ ) // t
def UpperCAmelCase_ ( snake_case__ , snake_case__ ) -> str:
"""simple docstring"""
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def UpperCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ = g_function[start] + Wa * heuristics[i](snake_case__ , snake_case__ )
return ans
def UpperCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ) -> Any:
"""simple docstring"""
lowerCAmelCase__ = np.chararray((n, n) )
for i in range(snake_case__ ):
for j in range(snake_case__ ):
lowerCAmelCase__ = '*'
for i in range(snake_case__ ):
for j in range(snake_case__ ):
if (j, (n - 1) - i) in blocks:
lowerCAmelCase__ = '#'
lowerCAmelCase__ = '-'
lowerCAmelCase__ = back_pointer[goal]
while x != start:
((lowerCAmelCase__) , (lowerCAmelCase__)) = x
# print(x)
lowerCAmelCase__ = '-'
lowerCAmelCase__ = back_pointer[x]
lowerCAmelCase__ = '-'
for i in range(snake_case__ ):
for j in range(snake_case__ ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=' ' )
print('<-- End position' , end=' ' )
else:
print(grid[i][j] , end=' ' )
print()
print('^' )
print('Start position' )
print()
print('# is an obstacle' )
print('- is the path taken by algorithm' )
print('PATH TAKEN BY THE ALGORITHM IS:-' )
lowerCAmelCase__ = back_pointer[goal]
while x != start:
print(snake_case__ , end=' ' )
lowerCAmelCase__ = back_pointer[x]
print(snake_case__ )
sys.exit()
def UpperCAmelCase_ ( snake_case__ ) -> Union[str, Any]:
"""simple docstring"""
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def UpperCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ) -> Union[str, Any]:
"""simple docstring"""
for itera in range(snake_case__ ):
open_list[itera].remove_element(snake_case__ )
# print("s", s)
# print("j", j)
((lowerCAmelCase__) , (lowerCAmelCase__)) = s
lowerCAmelCase__ = (x - 1, y)
lowerCAmelCase__ = (x + 1, y)
lowerCAmelCase__ = (x, y + 1)
lowerCAmelCase__ = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(snake_case__ ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(snake_case__ )
lowerCAmelCase__ = -1
lowerCAmelCase__ = float('inf' )
if valid(snake_case__ ) and g_function[neighbours] > g_function[s] + 1:
lowerCAmelCase__ = g_function[s] + 1
lowerCAmelCase__ = s
if neighbours not in close_list_anchor:
open_list[0].put(snake_case__ , key(snake_case__ , 0 , snake_case__ , snake_case__ ) )
if neighbours not in close_list_inad:
for var in range(1 , snake_case__ ):
if key(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) <= Wa * key(
snake_case__ , 0 , snake_case__ , snake_case__ ):
open_list[j].put(
snake_case__ , key(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) )
def UpperCAmelCase_ ( ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(15 , 20 ):
some_list.append((x, 17) )
for x in range(10 , 19 ):
for y in range(1 , 15 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(12 , 19 ):
some_list.append((x, y) )
for x in range(3 , 13 ):
for y in range(16 , 19 ):
some_list.append((x, y) )
return some_list
_lowerCAmelCase : Tuple = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
_lowerCAmelCase : Optional[Any] = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(1_0, 1),
(1_1, 1),
(1_2, 1),
(1_3, 1),
(1_4, 1),
(1_5, 1),
(1_6, 1),
(1_7, 1),
(1_8, 1),
(1_9, 1),
]
_lowerCAmelCase : Any = make_common_ground()
_lowerCAmelCase : List[str] = blocks_blk
# hyper parameters
_lowerCAmelCase : Optional[Any] = 1
_lowerCAmelCase : Union[str, Any] = 1
_lowerCAmelCase : int = 2_0
_lowerCAmelCase : str = 3 # one consistent and two other inconsistent
# start and end destination
_lowerCAmelCase : Tuple = (0, 0)
_lowerCAmelCase : List[str] = (n - 1, n - 1)
_lowerCAmelCase : str = 1
def UpperCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ) -> str:
"""simple docstring"""
lowerCAmelCase__ = {start: 0, goal: float('inf' )}
lowerCAmelCase__ = {start: -1, goal: -1}
lowerCAmelCase__ = []
lowerCAmelCase__ = set()
for i in range(snake_case__ ):
open_list.append(PriorityQueue() )
open_list[i].put(snake_case__ , key(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) )
lowerCAmelCase__ = []
lowerCAmelCase__ = []
while open_list[0].minkey() < float('inf' ):
for i in range(1 , snake_case__ ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float('inf' ):
do_something(snake_case__ , snake_case__ , snake_case__ )
else:
lowerCAmelCase__ , lowerCAmelCase__ = open_list[i].top_show()
visited.add(snake_case__ )
expand_state(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , )
close_list_inad.append(snake_case__ )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float('inf' ):
do_something(snake_case__ , snake_case__ , snake_case__ )
else:
lowerCAmelCase__ = open_list[0].top_show()
visited.add(snake_case__ )
expand_state(
snake_case__ , 0 , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , )
close_list_anchor.append(snake_case__ )
print('No path found to goal' )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(snake_case__ ):
if (j, i) in blocks:
print('#' , end=' ' )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print('*' , end=' ' )
else:
print('-' , end=' ' )
else:
print('*' , end=' ' )
if (j, i) == (n - 1, n - 1):
print('<-- End position' , end=' ' )
print()
print('^' )
print('Start position' )
print()
print('# is an obstacle' )
print('- is the path taken by algorithm' )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 604 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class A__ ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
UpperCamelCase = 1
UpperCamelCase = 3
UpperCamelCase = (32, 32)
UpperCamelCase = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(_SCREAMING_SNAKE_CASE )
return image
@property
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
return model
@property
def _SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
return model
@property
def _SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5006 , )
return RobertaSeriesModelWithTransformation(_SCREAMING_SNAKE_CASE )
@property
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
def extract(*_SCREAMING_SNAKE_CASE : Optional[Any] , **_SCREAMING_SNAKE_CASE : Union[str, Any] ):
class A__ :
'''simple docstring'''
def __init__( self : int ):
"""simple docstring"""
UpperCamelCase = torch.ones([0] )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
self.pixel_values.to(_SCREAMING_SNAKE_CASE )
return self
return Out()
return extract
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase = self.dummy_cond_unet
UpperCamelCase = PNDMScheduler(skip_prk_steps=_SCREAMING_SNAKE_CASE )
UpperCamelCase = self.dummy_vae
UpperCamelCase = self.dummy_text_encoder
UpperCamelCase = XLMRobertaTokenizer.from_pretrained('hf-internal-testing/tiny-xlm-roberta' )
UpperCamelCase = 77
UpperCamelCase = self.dummy_image.to(_SCREAMING_SNAKE_CASE )
UpperCamelCase = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
UpperCamelCase = AltDiffusionImgaImgPipeline(
unet=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE , vae=_SCREAMING_SNAKE_CASE , text_encoder=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , safety_checker=_SCREAMING_SNAKE_CASE , feature_extractor=self.dummy_extractor , )
UpperCamelCase = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=_SCREAMING_SNAKE_CASE )
UpperCamelCase = alt_pipe.to(_SCREAMING_SNAKE_CASE )
alt_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
UpperCamelCase = 'A painting of a squirrel eating a burger'
UpperCamelCase = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(0 )
UpperCamelCase = alt_pipe(
[prompt] , generator=_SCREAMING_SNAKE_CASE , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , image=_SCREAMING_SNAKE_CASE , )
UpperCamelCase = output.images
UpperCamelCase = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(0 )
UpperCamelCase = alt_pipe(
[prompt] , generator=_SCREAMING_SNAKE_CASE , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , image=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , )[0]
UpperCamelCase = image[0, -3:, -3:, -1]
UpperCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCamelCase = np.array([0.4_4_2_7, 0.3_7_3_1, 0.4_2_4_9, 0.4_9_4_1, 0.4_5_4_6, 0.4_1_4_8, 0.4_1_9_3, 0.4_6_6_6, 0.4_4_9_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5E-3
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' )
def _SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
UpperCamelCase = self.dummy_cond_unet
UpperCamelCase = PNDMScheduler(skip_prk_steps=_SCREAMING_SNAKE_CASE )
UpperCamelCase = self.dummy_vae
UpperCamelCase = self.dummy_text_encoder
UpperCamelCase = XLMRobertaTokenizer.from_pretrained('hf-internal-testing/tiny-xlm-roberta' )
UpperCamelCase = 77
UpperCamelCase = self.dummy_image.to(_SCREAMING_SNAKE_CASE )
# put models in fp16
UpperCamelCase = unet.half()
UpperCamelCase = vae.half()
UpperCamelCase = bert.half()
# make sure here that pndm scheduler skips prk
UpperCamelCase = AltDiffusionImgaImgPipeline(
unet=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE , vae=_SCREAMING_SNAKE_CASE , text_encoder=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , safety_checker=_SCREAMING_SNAKE_CASE , feature_extractor=self.dummy_extractor , )
UpperCamelCase = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=_SCREAMING_SNAKE_CASE )
UpperCamelCase = alt_pipe.to(_SCREAMING_SNAKE_CASE )
alt_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
UpperCamelCase = 'A painting of a squirrel eating a burger'
UpperCamelCase = torch.manual_seed(0 )
UpperCamelCase = alt_pipe(
[prompt] , generator=_SCREAMING_SNAKE_CASE , num_inference_steps=2 , output_type='np' , image=_SCREAMING_SNAKE_CASE , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
# resize to resolution that is divisible by 8 but not 16 or 32
UpperCamelCase = init_image.resize((760, 504) )
UpperCamelCase = 'BAAI/AltDiffusion'
UpperCamelCase = AltDiffusionImgaImgPipeline.from_pretrained(
_SCREAMING_SNAKE_CASE , safety_checker=_SCREAMING_SNAKE_CASE , )
pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
pipe.enable_attention_slicing()
UpperCamelCase = 'A fantasy landscape, trending on artstation'
UpperCamelCase = torch.manual_seed(0 )
UpperCamelCase = pipe(
prompt=_SCREAMING_SNAKE_CASE , image=_SCREAMING_SNAKE_CASE , strength=0.7_5 , guidance_scale=7.5 , generator=_SCREAMING_SNAKE_CASE , output_type='np' , )
UpperCamelCase = output.images[0]
UpperCamelCase = image[255:258, 383:386, -1]
assert image.shape == (504, 760, 3)
UpperCamelCase = np.array([0.9_3_5_8, 0.9_3_9_7, 0.9_5_9_9, 0.9_9_0_1, 1.0_0_0_0, 1.0_0_0_0, 0.9_8_8_2, 1.0_0_0_0, 1.0_0_0_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
UpperCamelCase = init_image.resize((768, 512) )
UpperCamelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy' )
UpperCamelCase = 'BAAI/AltDiffusion'
UpperCamelCase = AltDiffusionImgaImgPipeline.from_pretrained(
_SCREAMING_SNAKE_CASE , safety_checker=_SCREAMING_SNAKE_CASE , )
pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
pipe.enable_attention_slicing()
UpperCamelCase = 'A fantasy landscape, trending on artstation'
UpperCamelCase = torch.manual_seed(0 )
UpperCamelCase = pipe(
prompt=_SCREAMING_SNAKE_CASE , image=_SCREAMING_SNAKE_CASE , strength=0.7_5 , guidance_scale=7.5 , generator=_SCREAMING_SNAKE_CASE , output_type='np' , )
UpperCamelCase = output.images[0]
assert image.shape == (512, 768, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1E-2
| 280 |
def lowercase__ ( _UpperCamelCase , _UpperCamelCase) -> bool:
"""simple docstring"""
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 280 | 1 |
import os
import zipfile
import pytest
from datasets.utils.extract import (
BzipaExtractor,
Extractor,
GzipExtractor,
LzaExtractor,
SevenZipExtractor,
TarExtractor,
XzExtractor,
ZipExtractor,
ZstdExtractor,
)
from .utils import require_lza, require_pyazr, require_zstandard
@pytest.mark.parametrize(
'compression_format, is_archive' , [
('7z', True),
('bz2', False),
('gzip', False),
('lz4', False),
('tar', True),
('xz', False),
('zip', True),
('zstd', False),
] , )
def _lowerCamelCase ( snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ):
_lowerCAmelCase = {
'7z': (seven_zip_file, SevenZipExtractor),
'bz2': (bza_file, BzipaExtractor),
'gzip': (gz_file, GzipExtractor),
'lz4': (lza_file, LzaExtractor),
'tar': (tar_file, TarExtractor),
'xz': (xz_file, XzExtractor),
'zip': (zip_file, ZipExtractor),
'zstd': (zstd_file, ZstdExtractor),
}
_lowerCAmelCase , _lowerCAmelCase = input_paths_and_base_extractors[compression_format]
if input_path is None:
_lowerCAmelCase = F'for \'{compression_format}\' compression_format, '
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(lowerCamelCase_ )
assert base_extractor.is_extractable(lowerCamelCase_ )
_lowerCAmelCase = tmp_path / ('extracted' if is_archive else 'extracted.txt')
base_extractor.extract(lowerCamelCase_ , lowerCamelCase_ )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
_lowerCAmelCase = file_path.read_text(encoding='utf-8' )
else:
_lowerCAmelCase = output_path.read_text(encoding='utf-8' )
_lowerCAmelCase = text_file.read_text(encoding='utf-8' )
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize(
'compression_format, is_archive' , [
('7z', True),
('bz2', False),
('gzip', False),
('lz4', False),
('tar', True),
('xz', False),
('zip', True),
('zstd', False),
] , )
def _lowerCamelCase ( snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ):
_lowerCAmelCase = {
'7z': seven_zip_file,
'bz2': bza_file,
'gzip': gz_file,
'lz4': lza_file,
'tar': tar_file,
'xz': xz_file,
'zip': zip_file,
'zstd': zstd_file,
}
_lowerCAmelCase = input_paths[compression_format]
if input_path is None:
_lowerCAmelCase = F'for \'{compression_format}\' compression_format, '
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(lowerCamelCase_ )
_lowerCAmelCase = Extractor.infer_extractor_format(lowerCamelCase_ )
assert extractor_format is not None
_lowerCAmelCase = tmp_path / ('extracted' if is_archive else 'extracted.txt')
Extractor.extract(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
_lowerCAmelCase = file_path.read_text(encoding='utf-8' )
else:
_lowerCAmelCase = output_path.read_text(encoding='utf-8' )
_lowerCAmelCase = text_file.read_text(encoding='utf-8' )
assert extracted_file_content == expected_file_content
@pytest.fixture
def _lowerCamelCase ( snake_case , snake_case ):
import tarfile
_lowerCAmelCase = tmp_path / 'data_dot_dot'
directory.mkdir()
_lowerCAmelCase = directory / 'tar_file_with_dot_dot.tar'
with tarfile.TarFile(lowerCamelCase_ , 'w' ) as f:
f.add(lowerCamelCase_ , arcname=os.path.join('..' , text_file.name ) )
return path
@pytest.fixture
def _lowerCamelCase ( snake_case ):
import tarfile
_lowerCAmelCase = tmp_path / 'data_sym_link'
directory.mkdir()
_lowerCAmelCase = directory / 'tar_file_with_sym_link.tar'
os.symlink('..' , directory / 'subdir' , target_is_directory=lowerCamelCase_ )
with tarfile.TarFile(lowerCamelCase_ , 'w' ) as f:
f.add(str(directory / 'subdir' ) , arcname='subdir' ) # str required by os.readlink on Windows and Python < 3.8
return path
@pytest.mark.parametrize(
'insecure_tar_file, error_log' , [('tar_file_with_dot_dot', 'illegal path'), ('tar_file_with_sym_link', 'Symlink')] , )
def _lowerCamelCase ( snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
_lowerCAmelCase = {
'tar_file_with_dot_dot': tar_file_with_dot_dot,
'tar_file_with_sym_link': tar_file_with_sym_link,
}
_lowerCAmelCase = insecure_tar_files[insecure_tar_file]
_lowerCAmelCase = tmp_path / 'extracted'
TarExtractor.extract(lowerCamelCase_ , lowerCamelCase_ )
assert caplog.text
for record in caplog.records:
assert record.levelname == "ERROR"
assert error_log in record.msg
def _lowerCamelCase ( snake_case ):
# We should have less false positives than zipfile.is_zipfile
# We do that by checking only the magic number
_lowerCAmelCase = tmpdir / 'not_a_zip_file'
# From: https://github.com/python/cpython/pull/5053
_lowerCAmelCase = (
b'\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00'
b'\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6\'\x00\x00\x00\x15I'
b'DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07'
b'\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82'
)
with not_a_zip_file.open('wb' ) as f:
f.write(lowerCamelCase_ )
assert zipfile.is_zipfile(str(lowerCamelCase_ ) ) # is a false positive for `zipfile`
assert not ZipExtractor.is_extractable(lowerCamelCase_ ) # but we're right
| 712 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowercase: int = {
'''configuration_mvp''': ['''MVP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MvpConfig''', '''MvpOnnxConfig'''],
'''tokenization_mvp''': ['''MvpTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase: Any = ['''MvpTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase: str = [
'''MVP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MvpForCausalLM''',
'''MvpForConditionalGeneration''',
'''MvpForQuestionAnswering''',
'''MvpForSequenceClassification''',
'''MvpModel''',
'''MvpPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
_lowercase: Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 225 | 0 |
lowerCamelCase__ : int = tuple[float, float, float]
lowerCamelCase__ : Optional[Any] = tuple[float, float, float]
def UpperCAmelCase_ ( __UpperCAmelCase : Pointad , __UpperCAmelCase : Pointad ) -> Vectorad:
SCREAMING_SNAKE_CASE_ = end_pointa[0] - end_pointa[0]
SCREAMING_SNAKE_CASE_ = end_pointa[1] - end_pointa[1]
SCREAMING_SNAKE_CASE_ = end_pointa[2] - end_pointa[2]
return (x, y, z)
def UpperCAmelCase_ ( __UpperCAmelCase : Vectorad , __UpperCAmelCase : Vectorad ) -> Vectorad:
SCREAMING_SNAKE_CASE_ = ab[1] * ac[2] - ab[2] * ac[1] # *i
SCREAMING_SNAKE_CASE_ = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
SCREAMING_SNAKE_CASE_ = ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def UpperCAmelCase_ ( __UpperCAmelCase : Vectorad , __UpperCAmelCase : int ) -> bool:
return tuple(round(__UpperCAmelCase , __UpperCAmelCase ) for x in vector ) == (0, 0, 0)
def UpperCAmelCase_ ( __UpperCAmelCase : Pointad , __UpperCAmelCase : Pointad , __UpperCAmelCase : Pointad , __UpperCAmelCase : int = 10 ) -> bool:
SCREAMING_SNAKE_CASE_ = create_vector(__UpperCAmelCase , __UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = create_vector(__UpperCAmelCase , __UpperCAmelCase )
return is_zero_vector(get_ad_vectors_cross(__UpperCAmelCase , __UpperCAmelCase ) , __UpperCAmelCase ) | 31 |
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
lowercase : Tuple = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
lowercase : List[str] = """ \"\"\"
Output class for the scheduler's step function output.
Args:
prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the
denoising loop.
pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
The predicted denoised sample (x_{0}) based on the model output from the current timestep.
`pred_original_sample` can be used to preview progress or for guidance.
\"\"\"
prev_sample: torch.FloatTensor
pred_original_sample: Optional[torch.FloatTensor] = None
"""
class __snake_case ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : int = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir ,"""schedulers/""" ) )
lowercase : Any = self.diffusers_dir
shutil.copy(
os.path.join(snake_case ,"""src/diffusers/schedulers/scheduling_ddpm.py""" ) ,os.path.join(self.diffusers_dir ,"""schedulers/scheduling_ddpm.py""" ) ,)
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Any = """src/diffusers"""
shutil.rmtree(self.diffusers_dir )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ,snake_case=None ):
'''simple docstring'''
lowercase : Optional[int] = comment + f"\nclass {class_name}(nn.Module):\n" + class_code
if overwrite_result is not None:
lowercase : Optional[Any] = comment + f"\nclass {class_name}(nn.Module):\n" + overwrite_result
lowercase : List[Any] = black.Mode(target_versions={black.TargetVersion.PYaa} ,line_length=119 )
lowercase : int = black.format_str(snake_case ,mode=snake_case )
lowercase : int = os.path.join(self.diffusers_dir ,"""new_code.py""" )
with open(snake_case ,"""w""" ,newline="""\n""" ) as f:
f.write(snake_case )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(snake_case ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name ,overwrite=snake_case )
with open(snake_case ,"""r""" ) as f:
self.assertTrue(f.read() ,snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : List[Any] = check_copies.find_code_in_diffusers("""schedulers.scheduling_ddpm.DDPMSchedulerOutput""" )
self.assertEqual(snake_case ,snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
self.check_copy_consistency(
"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput""" ,"""DDPMSchedulerOutput""" ,REFERENCE_CODE + """\n""" ,)
# With no empty line at the end
self.check_copy_consistency(
"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput""" ,"""DDPMSchedulerOutput""" ,snake_case ,)
# Copy consistency with rename
self.check_copy_consistency(
"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test""" ,"""TestSchedulerOutput""" ,re.sub("""DDPM""" ,"""Test""" ,snake_case ) ,)
# Copy consistency with a really long name
lowercase : Dict = """TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"""
self.check_copy_consistency(
f"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}" ,f"{long_class_name}SchedulerOutput" ,re.sub("""Bert""" ,snake_case ,snake_case ) ,)
# Copy consistency with overwrite
self.check_copy_consistency(
"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test""" ,"""TestSchedulerOutput""" ,snake_case ,overwrite_result=re.sub("""DDPM""" ,"""Test""" ,snake_case ) ,)
| 336 | 0 |
import functools
def lowerCAmelCase__ ( _UpperCamelCase : Optional[int] , _UpperCamelCase : List[str] ) -> int:
"""simple docstring"""
snake_case = len(lowerCamelCase__ )
snake_case = len(lowerCamelCase__ )
@functools.cache
def min_distance(_UpperCamelCase : Dict , _UpperCamelCase : List[Any] ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
snake_case = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , lowerCamelCase__ ) , 1 + min_distance(lowerCamelCase__ , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 715 | """simple docstring"""
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self ):
"""simple docstring"""
snake_case = ['a', 'b', 'c']
# Defaults to last layer if both are None
snake_case ,snake_case = get_aligned_output_features_output_indices(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
self.assertEqual(lowerCAmelCase , ['c'] )
self.assertEqual(lowerCAmelCase , [2] )
# Out indices set to match out features
snake_case ,snake_case = get_aligned_output_features_output_indices(['a', 'c'] , lowerCAmelCase , lowerCAmelCase )
self.assertEqual(lowerCAmelCase , ['a', 'c'] )
self.assertEqual(lowerCAmelCase , [0, 2] )
# Out features set to match out indices
snake_case ,snake_case = get_aligned_output_features_output_indices(lowerCAmelCase , [0, 2] , lowerCAmelCase )
self.assertEqual(lowerCAmelCase , ['a', 'c'] )
self.assertEqual(lowerCAmelCase , [0, 2] )
# Out features selected from negative indices
snake_case ,snake_case = get_aligned_output_features_output_indices(lowerCAmelCase , [-3, -1] , lowerCAmelCase )
self.assertEqual(lowerCAmelCase , ['a', 'c'] )
self.assertEqual(lowerCAmelCase , [-3, -1] )
def snake_case ( self ):
"""simple docstring"""
with self.assertRaises(lowerCAmelCase ):
verify_out_features_out_indices(['a', 'b'] , (0, 1) , lowerCAmelCase )
# Out features must be a list
with self.assertRaises(lowerCAmelCase ):
verify_out_features_out_indices(('a', 'b') , (0, 1) , ['a', 'b'] )
# Out features must be a subset of stage names
with self.assertRaises(lowerCAmelCase ):
verify_out_features_out_indices(['a', 'b'] , (0, 1) , ['a'] )
# Out indices must be a list or tuple
with self.assertRaises(lowerCAmelCase ):
verify_out_features_out_indices(lowerCAmelCase , 0 , ['a', 'b'] )
# Out indices must be a subset of stage names
with self.assertRaises(lowerCAmelCase ):
verify_out_features_out_indices(lowerCAmelCase , (0, 1) , ['a'] )
# Out features and out indices must be the same length
with self.assertRaises(lowerCAmelCase ):
verify_out_features_out_indices(['a', 'b'] , (0,) , ['a', 'b', 'c'] )
# Out features should match out indices
with self.assertRaises(lowerCAmelCase ):
verify_out_features_out_indices(['a', 'b'] , (0, 2) , ['a', 'b', 'c'] )
# Out features and out indices should be in order
with self.assertRaises(lowerCAmelCase ):
verify_out_features_out_indices(['b', 'a'] , (0, 1) , ['a', 'b'] )
# Check passes with valid inputs
verify_out_features_out_indices(['a', 'b', 'd'] , (0, 1, -1) , ['a', 'b', 'c', 'd'] )
def snake_case ( self ):
"""simple docstring"""
snake_case = BackboneMixin()
snake_case = ['a', 'b', 'c']
snake_case = ['a', 'c']
snake_case = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features , ['a', 'c'] )
self.assertEqual(backbone.out_indices , [0, 2] )
# Check out features and indices are updated correctly
snake_case = ['a', 'b']
self.assertEqual(backbone.out_features , ['a', 'b'] )
self.assertEqual(backbone.out_indices , [0, 1] )
snake_case = [-3, -1]
self.assertEqual(backbone.out_features , ['a', 'c'] )
self.assertEqual(backbone.out_indices , [-3, -1] )
| 104 | 0 |
import torch
from transformers import AutoModel
class _snake_case ( torch.nn.Module ):
def __init__( self: str , __lowerCamelCase: Tuple="sayef/fsner-bert-base-uncased" ) -> str:
super(__SCREAMING_SNAKE_CASE , self ).__init__()
__UpperCAmelCase : Tuple = AutoModel.from_pretrained(__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase : Tuple = torch.nn.CosineSimilarity(3 , 1e-08 )
__UpperCAmelCase : str = torch.nn.Softmax(dim=1 )
def _lowerCamelCase ( self: Optional[int] , **__lowerCamelCase: Optional[int] ) -> int:
return self.bert(**__SCREAMING_SNAKE_CASE ).last_hidden_state
def _lowerCamelCase ( self: Any , __lowerCamelCase: Optional[int] ) -> Optional[Any]:
return token_embeddings.sum(2 , keepdim=__SCREAMING_SNAKE_CASE )
def _lowerCamelCase ( self: Optional[int] , __lowerCamelCase: str , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: str=1 ) -> Optional[Any]:
return self.softmax(T * self.cos(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
def _lowerCamelCase ( self: Optional[int] , __lowerCamelCase: int , __lowerCamelCase: Dict ) -> Optional[Any]:
__UpperCAmelCase : Any = W_supports["sizes"].tolist()
__UpperCAmelCase : Optional[Any] = W_supports["start_token_id"].item()
__UpperCAmelCase : List[str] = W_supports["end_token_id"].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
__UpperCAmelCase : int = self.BERT(**__SCREAMING_SNAKE_CASE )
__UpperCAmelCase : List[str] = self.BERT(**__SCREAMING_SNAKE_CASE )
__UpperCAmelCase : Dict = None
__UpperCAmelCase : Dict = None
__UpperCAmelCase : List[Any] = W_supports["input_ids"] == start_token_id
__UpperCAmelCase : Dict = W_supports["input_ids"] == end_token_id
for i, size in enumerate(__SCREAMING_SNAKE_CASE ):
if i == 0:
__UpperCAmelCase : Union[str, Any] = 0
else:
__UpperCAmelCase : Union[str, Any] = support_sizes[i - 1]
__UpperCAmelCase : Optional[Any] = S[s : s + size][start_token_masks[s : s + size]]
__UpperCAmelCase : Union[str, Any] = S[s : s + size][end_token_masks[s : s + size]]
__UpperCAmelCase : Union[str, Any] = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
__UpperCAmelCase : List[Any] = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
__UpperCAmelCase : str = torch.vstack((p_starts, p_start) )
__UpperCAmelCase : Optional[int] = torch.vstack((p_ends, p_end) )
else:
__UpperCAmelCase : List[str] = p_start
__UpperCAmelCase : Optional[int] = p_end
return p_starts, p_ends
| 382 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE : List[str] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : str = ['''NllbTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Optional[Any] = ['''NllbTokenizerFast''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
_SCREAMING_SNAKE_CASE : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 549 | 0 |
import operator as op
def _snake_case ( A ) -> List[Any]:
lowerCAmelCase__ = []
lowerCAmelCase__ = lambda A , A : int(x / y ) # noqa: E731 integer division operation
lowerCAmelCase__ = {
'''^''': op.pow,
'''*''': op.mul,
'''/''': div,
'''+''': op.add,
'''-''': op.sub,
} # operators & their respective operation
# print table header
print('''Symbol'''.center(8 ) , '''Action'''.center(12 ) , '''Stack''' , sep=''' | ''' )
print('''-''' * (30 + len(A )) )
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(A ) # append x to stack
# output in tabular format
print(x.rjust(8 ) , ('''push(''' + x + ''')''').ljust(12 ) , ''','''.join(A ) , sep=''' | ''' )
else:
lowerCAmelCase__ = stack.pop() # pop stack
# output in tabular format
print(''''''.rjust(8 ) , ('''pop(''' + b + ''')''').ljust(12 ) , ''','''.join(A ) , sep=''' | ''' )
lowerCAmelCase__ = stack.pop() # pop stack
# output in tabular format
print(''''''.rjust(8 ) , ('''pop(''' + a + ''')''').ljust(12 ) , ''','''.join(A ) , sep=''' | ''' )
stack.append(
str(opr[x](int(A ) , int(A ) ) ) ) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8 ) , ('''push(''' + a + x + b + ''')''').ljust(12 ) , ''','''.join(A ) , sep=''' | ''' , )
return int(stack[0] )
if __name__ == "__main__":
__UpperCAmelCase = input('''\n\nEnter a Postfix Equation (space separated) = ''').split(''' ''')
print('''\n\tResult = ''', solve(Postfix)) | 704 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.linear_k''': '''encoder.layers.*.self_attn.linear_k''',
'''self_attn.linear_v''': '''encoder.layers.*.self_attn.linear_v''',
'''self_attn.linear_q''': '''encoder.layers.*.self_attn.linear_q''',
'''self_attn.pos_bias_u''': '''encoder.layers.*.self_attn.pos_bias_u''',
'''self_attn.pos_bias_v''': '''encoder.layers.*.self_attn.pos_bias_v''',
'''self_attn.linear_out''': '''encoder.layers.*.self_attn.linear_out''',
'''self_attn.linear_pos''': '''encoder.layers.*.self_attn.linear_pos''',
'''self_attn.rotary_emb''': '''encoder.embed_positions''',
'''self_attn_layer_norm''': '''encoder.layers.*.self_attn_layer_norm''',
'''conv_module.pointwise_conv1''': '''encoder.layers.*.conv_module.pointwise_conv1''',
'''conv_module.pointwise_conv2''': '''encoder.layers.*.conv_module.pointwise_conv2''',
'''conv_module.depthwise_conv''': '''encoder.layers.*.conv_module.depthwise_conv''',
'''conv_module.batch_norm''': '''encoder.layers.*.conv_module.batch_norm''',
'''conv_module.layer_norm''': '''encoder.layers.*.conv_module.layer_norm''',
'''ffn1.w_1''': '''encoder.layers.*.ffn1.intermediate_dense''',
'''ffn1.w_2''': '''encoder.layers.*.ffn1.output_dense''',
'''ffn1.layer_norm''': '''encoder.layers.*.ffn1_layer_norm''',
'''ffn2.w_1''': '''encoder.layers.*.ffn2.intermediate_dense''',
'''ffn2.w_2''': '''encoder.layers.*.ffn2.output_dense''',
'''ffn2.layer_norm''': '''encoder.layers.*.ffn2_layer_norm''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
__UpperCAmelCase = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def _snake_case ( A , A , A , A , A ) -> Optional[Any]:
for attribute in key.split('''.''' ):
lowerCAmelCase__ = getattr(A , A )
if weight_type is not None:
lowerCAmelCase__ = getattr(A , A ).shape
else:
lowerCAmelCase__ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
lowerCAmelCase__ = value
elif weight_type == "weight_g":
lowerCAmelCase__ = value
elif weight_type == "weight_v":
lowerCAmelCase__ = value
elif weight_type == "bias":
lowerCAmelCase__ = value
elif weight_type == "running_mean":
lowerCAmelCase__ = value
elif weight_type == "running_var":
lowerCAmelCase__ = value
elif weight_type == "num_batches_tracked":
lowerCAmelCase__ = value
elif weight_type == "inv_freq":
lowerCAmelCase__ = value
else:
lowerCAmelCase__ = value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def _snake_case ( A , A , A ) -> Any:
lowerCAmelCase__ = []
lowerCAmelCase__ = fairseq_model.state_dict()
lowerCAmelCase__ = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
lowerCAmelCase__ = False
if "conv_layers" in name:
load_conv_layer(
A , A , A , A , hf_model.config.feat_extract_norm == '''group''' , )
lowerCAmelCase__ = True
else:
for key, mapped_key in MAPPING.items():
lowerCAmelCase__ = '''wav2vec2_conformer.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
lowerCAmelCase__ = True
if "*" in mapped_key:
lowerCAmelCase__ = name.split(A )[0].split('''.''' )[-2]
lowerCAmelCase__ = mapped_key.replace('''*''' , A )
if "pos_bias_u" in name:
lowerCAmelCase__ = None
elif "pos_bias_v" in name:
lowerCAmelCase__ = None
elif "weight_g" in name:
lowerCAmelCase__ = '''weight_g'''
elif "weight_v" in name:
lowerCAmelCase__ = '''weight_v'''
elif "bias" in name:
lowerCAmelCase__ = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowerCAmelCase__ = '''weight'''
elif "running_mean" in name:
lowerCAmelCase__ = '''running_mean'''
elif "inv_freq" in name:
lowerCAmelCase__ = '''inv_freq'''
elif "running_var" in name:
lowerCAmelCase__ = '''running_var'''
elif "num_batches_tracked" in name:
lowerCAmelCase__ = '''num_batches_tracked'''
else:
lowerCAmelCase__ = None
set_recursively(A , A , A , A , A )
continue
if not is_used:
unused_weights.append(A )
logger.warning(F"""Unused weights: {unused_weights}""" )
def _snake_case ( A , A , A , A , A ) -> Tuple:
lowerCAmelCase__ = full_name.split('''conv_layers.''' )[-1]
lowerCAmelCase__ = name.split('''.''' )
lowerCAmelCase__ = int(items[0] )
lowerCAmelCase__ = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
lowerCAmelCase__ = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
lowerCAmelCase__ = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
lowerCAmelCase__ = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
lowerCAmelCase__ = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(A )
@torch.no_grad()
def _snake_case ( A , A , A=None , A=None , A=True ) -> Optional[int]:
if config_path is not None:
lowerCAmelCase__ = WavaVecaConformerConfig.from_pretrained(A , hidden_act='''swish''' )
else:
lowerCAmelCase__ = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
lowerCAmelCase__ = '''rotary'''
if is_finetuned:
if dict_path:
lowerCAmelCase__ = Dictionary.load(A )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowerCAmelCase__ = target_dict.pad_index
lowerCAmelCase__ = target_dict.bos_index
lowerCAmelCase__ = target_dict.eos_index
lowerCAmelCase__ = len(target_dict.symbols )
lowerCAmelCase__ = os.path.join(A , '''vocab.json''' )
if not os.path.isdir(A ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(A ) )
return
os.makedirs(A , exist_ok=A )
lowerCAmelCase__ = target_dict.indices
# fairseq has the <pad> and <s> switched
lowerCAmelCase__ = 0
lowerCAmelCase__ = 1
with open(A , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(A , A )
lowerCAmelCase__ = WavaVecaCTCTokenizer(
A , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=A , )
lowerCAmelCase__ = True if config.feat_extract_norm == '''layer''' else False
lowerCAmelCase__ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=A , return_attention_mask=A , )
lowerCAmelCase__ = WavaVecaProcessor(feature_extractor=A , tokenizer=A )
processor.save_pretrained(A )
lowerCAmelCase__ = WavaVecaConformerForCTC(A )
else:
lowerCAmelCase__ = WavaVecaConformerForPreTraining(A )
if is_finetuned:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
lowerCAmelCase__ = argparse.Namespace(task='''audio_pretraining''' )
lowerCAmelCase__ = fairseq.tasks.setup_task(A )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=A )
lowerCAmelCase__ = model[0].eval()
recursively_load_weights(A , A , not is_finetuned )
hf_wavavec.save_pretrained(A )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
__UpperCAmelCase = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
) | 98 | 0 |
'''simple docstring'''
from sklearn.metrics import recall_score
import datasets
__UpperCamelCase = "\nRecall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:\nRecall = TP / (TP + FN)\nWhere TP is the true positives and FN is the false negatives.\n"
__UpperCamelCase = "\nArgs:\n- **predictions** (`list` of `int`): The predicted labels.\n- **references** (`list` of `int`): The ground truth labels.\n- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.\n- **pos_label** (`int`): The class label to use as the 'positive class' when calculating the recall. Defaults to `1`.\n- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.\n - `'binary'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.\n - `'micro'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.\n - `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - `'weighted'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.\n - `'samples'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.\n- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .\n - `'warn'`: If there is a zero division, the return value is `0`, but warnings are also raised.\n - `0`: If there is a zero division, the return value is `0`.\n - `1`: If there is a zero division, the return value is `1`.\n\nReturns:\n- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.\n\nExamples:\n\n Example 1-A simple example with some errors\n >>> recall_metric = datasets.load_metric('recall')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])\n >>> print(results)\n {'recall': 0.6666666666666666}\n\n Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.\n >>> recall_metric = datasets.load_metric('recall')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)\n >>> print(results)\n {'recall': 0.5}\n\n Example 3-The same example as Example 1, but with `sample_weight` included.\n >>> recall_metric = datasets.load_metric('recall')\n >>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)\n >>> print(results)\n {'recall': 0.55}\n\n Example 4-A multiclass example, using different averages.\n >>> recall_metric = datasets.load_metric('recall')\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='macro')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='micro')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='weighted')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {'recall': array([1., 0., 0.])}\n"
__UpperCamelCase = "\n@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _A ( datasets.Metric ):
def lowercase__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""int32""" ) ),
"""references""": datasets.Sequence(datasets.Value("""int32""" ) ),
}
if self.config_name == """multilabel"""
else {
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) , reference_urls=["""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html"""] , )
def lowercase__ ( self : Tuple , __magic_name__ : int , __magic_name__ : Union[str, Any] , __magic_name__ : Any=None , __magic_name__ : Optional[Any]=1 , __magic_name__ : List[str]="binary" , __magic_name__ : Tuple=None , __magic_name__ : Dict="warn" , ) -> Any:
"""simple docstring"""
__snake_case : Tuple = recall_score(
__magic_name__ , __magic_name__ , labels=__magic_name__ , pos_label=__magic_name__ , average=__magic_name__ , sample_weight=__magic_name__ , zero_division=__magic_name__ , )
return {"recall": float(__magic_name__ ) if score.size == 1 else score}
| 26 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class __magic_name__ ( lowercase__ ):
def __init__( self : int , *snake_case_ : Optional[Any] , **snake_case_ : Optional[Any] ):
warnings.warn(
"The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use MobileViTImageProcessor instead." , snake_case_ , )
super().__init__(*snake_case_ , **snake_case_ )
| 163 | 0 |
import argparse
import struct
import unittest
class lowerCamelCase__ :
"""simple docstring"""
def __init__( self , snake_case ):
'''simple docstring'''
UpperCamelCase__ = data
# Initialize hash values
UpperCamelCase__ = [
0x6A09E667,
0xBB67AE85,
0x3C6EF372,
0xA54FF53A,
0x510E527F,
0x9B05688C,
0x1F83D9AB,
0x5BE0CD19,
]
# Initialize round constants
UpperCamelCase__ = [
0x428A2F98,
0x71374491,
0xB5C0FBCF,
0xE9B5DBA5,
0x3956C25B,
0x59F111F1,
0x923F82A4,
0xAB1C5ED5,
0xD807AA98,
0x12835B01,
0x243185BE,
0x550C7DC3,
0x72BE5D74,
0x80DEB1FE,
0x9BDC06A7,
0xC19BF174,
0xE49B69C1,
0xEFBE4786,
0x0FC19DC6,
0x240CA1CC,
0x2DE92C6F,
0x4A7484AA,
0x5CB0A9DC,
0x76F988DA,
0x983E5152,
0xA831C66D,
0xB00327C8,
0xBF597FC7,
0xC6E00BF3,
0xD5A79147,
0x06CA6351,
0x14292967,
0x27B70A85,
0x2E1B2138,
0x4D2C6DFC,
0x53380D13,
0x650A7354,
0x766A0ABB,
0x81C2C92E,
0x92722C85,
0xA2BFE8A1,
0xA81A664B,
0xC24B8B70,
0xC76C51A3,
0xD192E819,
0xD6990624,
0xF40E3585,
0x106AA070,
0x19A4C116,
0x1E376C08,
0x2748774C,
0x34B0BCB5,
0x391C0CB3,
0x4ED8AA4A,
0x5B9CCA4F,
0x682E6FF3,
0x748F82EE,
0x78A5636F,
0x84C87814,
0x8CC70208,
0x90BEFFFA,
0xA4506CEB,
0xBEF9A3F7,
0xC67178F2,
]
UpperCamelCase__ = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def snake_case__ ( snake_case ):
'''simple docstring'''
UpperCamelCase__ = b"\x80" + (b"\x00" * (63 - (len(snake_case ) + 8) % 64))
UpperCamelCase__ = struct.pack(">Q" , (len(snake_case ) * 8) )
return data + padding + big_endian_integer
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = [
self.preprocessed_data[x : x + 64]
for x in range(0 , len(self.preprocessed_data ) , 64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
UpperCamelCase__ = list(struct.unpack(">16L" , snake_case ) )
# add 48 0-ed integers
words += [0] * 48
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ = self.hashes
for index in range(0 , 64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
UpperCamelCase__ = (
self.ror(words[index - 15] , 7 )
^ self.ror(words[index - 15] , 18 )
^ (words[index - 15] >> 3)
)
UpperCamelCase__ = (
self.ror(words[index - 2] , 17 )
^ self.ror(words[index - 2] , 19 )
^ (words[index - 2] >> 10)
)
UpperCamelCase__ = (
words[index - 16] + sa + words[index - 7] + sa
) % 0x100000000
# Compression
UpperCamelCase__ = self.ror(snake_case , 6 ) ^ self.ror(snake_case , 11 ) ^ self.ror(snake_case , 25 )
UpperCamelCase__ = (e & f) ^ ((~e & 0xFFFFFFFF) & g)
UpperCamelCase__ = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0x100000000
UpperCamelCase__ = self.ror(snake_case , 2 ) ^ self.ror(snake_case , 13 ) ^ self.ror(snake_case , 22 )
UpperCamelCase__ = (a & b) ^ (a & c) ^ (b & c)
UpperCamelCase__ = (sa + maj) % 0x100000000
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ = (
g,
f,
e,
((d + tempa) % 0x100000000),
c,
b,
a,
((tempa + tempa) % 0x100000000),
)
UpperCamelCase__ = [a, b, c, d, e, f, g, h]
# Modify final values
UpperCamelCase__ = [
((element + mutated_hash_values[index]) % 0x100000000)
for index, element in enumerate(self.hashes )
]
UpperCamelCase__ = "".join([hex(snake_case )[2:].zfill(8 ) for value in self.hashes] )
def snake_case__ ( self , snake_case , snake_case ):
'''simple docstring'''
return 0xFFFFFFFF & (value << (32 - rotations)) | (value >> rotations)
class lowerCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self ):
'''simple docstring'''
import hashlib
UpperCamelCase__ = bytes("Test String" , "utf-8" )
self.assertEqual(SHAaaa(snake_case ).hash , hashlib.shaaaa(snake_case ).hexdigest() )
def UpperCamelCase_( )-> None:
import doctest
doctest.testmod()
UpperCamelCase__ = argparse.ArgumentParser()
parser.add_argument(
"-s" , "--string" , dest="input_string" , default="Hello World!! Welcome to Cryptography" , help="Hash the string" , )
parser.add_argument(
"-f" , "--file" , dest="input_file" , help="Hash contents of a file" )
UpperCamelCase__ = parser.parse_args()
UpperCamelCase__ = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , "rb" ) as f:
UpperCamelCase__ = f.read()
else:
UpperCamelCase__ = bytes(_A , "utf-8" )
print(SHAaaa(_A ).hash )
if __name__ == "__main__":
main()
| 709 |
from __future__ import annotations
def UpperCamelCase_( _A :list[int] , _A :int )-> list[int]:
UpperCamelCase__ = 0
UpperCamelCase__ = len(_A ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
UpperCamelCase__ = i + 1
else:
UpperCamelCase__ = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'''{two_pointer([2, 7, 1_1, 1_5], 9) = }''')
| 185 | 0 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = "encoder-decoder"
_lowerCAmelCase = True
def __init__(self , **_lowercase ):
'''simple docstring'''
super().__init__(**_lowercase )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
__a : List[Any] = kwargs.pop("""encoder""" )
__a : str = encoder_config.pop("""model_type""" )
__a : int = kwargs.pop("""decoder""" )
__a : str = decoder_config.pop("""model_type""" )
from ..auto.configuration_auto import AutoConfig
__a : List[str] = AutoConfig.for_model(_lowercase , **_lowercase )
__a : int = AutoConfig.for_model(_lowercase , **_lowercase )
__a : Dict = True
@classmethod
def lowerCAmelCase__(cls , _lowercase , _lowercase , **_lowercase ):
'''simple docstring'''
logger.info("""Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""" )
__a : List[str] = True
__a : Optional[int] = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **_lowercase )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : int = copy.deepcopy(self.__dict__ )
__a : List[str] = self.encoder.to_dict()
__a : Tuple = self.decoder.to_dict()
__a : Tuple = self.__class__.model_type
return output
| 581 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[Any] = {
"""task_specific_params""": {
"""summarization""": {"""length_penalty""": 1.0, """max_length""": 128, """min_length""": 12, """num_beams""": 4},
"""summarization_cnn""": {"""length_penalty""": 2.0, """max_length""": 142, """min_length""": 56, """num_beams""": 4},
"""summarization_xsum""": {"""length_penalty""": 1.0, """max_length""": 62, """min_length""": 11, """num_beams""": 6},
}
}
__a : List[Any] = {
"""task_specific_params.summarization.length_penalty""": 1.0,
"""task_specific_params.summarization.max_length""": 128,
"""task_specific_params.summarization.min_length""": 12,
"""task_specific_params.summarization.num_beams""": 4,
"""task_specific_params.summarization_cnn.length_penalty""": 2.0,
"""task_specific_params.summarization_cnn.max_length""": 142,
"""task_specific_params.summarization_cnn.min_length""": 56,
"""task_specific_params.summarization_cnn.num_beams""": 4,
"""task_specific_params.summarization_xsum.length_penalty""": 1.0,
"""task_specific_params.summarization_xsum.max_length""": 62,
"""task_specific_params.summarization_xsum.min_length""": 11,
"""task_specific_params.summarization_xsum.num_beams""": 6,
}
self.assertEqual(flatten_dict(_lowercase ) , _lowercase )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Union[str, Any] = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(_lowercase ) , x.transpose() ) )
__a : Union[str, Any] = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(_lowercase , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : int = np.random.randn(3 , 4 )
__a : Optional[Any] = torch.tensor(_lowercase )
self.assertTrue(np.allclose(transpose(_lowercase ) , transpose(_lowercase ).numpy() ) )
__a : Optional[Any] = np.random.randn(3 , 4 , 5 )
__a : Any = torch.tensor(_lowercase )
self.assertTrue(np.allclose(transpose(_lowercase , axes=(1, 2, 0) ) , transpose(_lowercase , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[Any] = np.random.randn(3 , 4 )
__a : int = tf.constant(_lowercase )
self.assertTrue(np.allclose(transpose(_lowercase ) , transpose(_lowercase ).numpy() ) )
__a : Any = np.random.randn(3 , 4 , 5 )
__a : List[str] = tf.constant(_lowercase )
self.assertTrue(np.allclose(transpose(_lowercase , axes=(1, 2, 0) ) , transpose(_lowercase , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Union[str, Any] = np.random.randn(3 , 4 )
__a : Union[str, Any] = jnp.array(_lowercase )
self.assertTrue(np.allclose(transpose(_lowercase ) , np.asarray(transpose(_lowercase ) ) ) )
__a : Optional[int] = np.random.randn(3 , 4 , 5 )
__a : Union[str, Any] = jnp.array(_lowercase )
self.assertTrue(np.allclose(transpose(_lowercase , axes=(1, 2, 0) ) , np.asarray(transpose(_lowercase , axes=(1, 2, 0) ) ) ) )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Tuple = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(_lowercase , (4, 3) ) , np.reshape(_lowercase , (4, 3) ) ) )
__a : Optional[int] = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(_lowercase , (12, 5) ) , np.reshape(_lowercase , (12, 5) ) ) )
@require_torch
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : List[Any] = np.random.randn(3 , 4 )
__a : List[Any] = torch.tensor(_lowercase )
self.assertTrue(np.allclose(reshape(_lowercase , (4, 3) ) , reshape(_lowercase , (4, 3) ).numpy() ) )
__a : List[str] = np.random.randn(3 , 4 , 5 )
__a : Any = torch.tensor(_lowercase )
self.assertTrue(np.allclose(reshape(_lowercase , (12, 5) ) , reshape(_lowercase , (12, 5) ).numpy() ) )
@require_tf
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : str = np.random.randn(3 , 4 )
__a : Dict = tf.constant(_lowercase )
self.assertTrue(np.allclose(reshape(_lowercase , (4, 3) ) , reshape(_lowercase , (4, 3) ).numpy() ) )
__a : Tuple = np.random.randn(3 , 4 , 5 )
__a : Optional[Any] = tf.constant(_lowercase )
self.assertTrue(np.allclose(reshape(_lowercase , (12, 5) ) , reshape(_lowercase , (12, 5) ).numpy() ) )
@require_flax
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : str = np.random.randn(3 , 4 )
__a : Tuple = jnp.array(_lowercase )
self.assertTrue(np.allclose(reshape(_lowercase , (4, 3) ) , np.asarray(reshape(_lowercase , (4, 3) ) ) ) )
__a : Tuple = np.random.randn(3 , 4 , 5 )
__a : Optional[int] = jnp.array(_lowercase )
self.assertTrue(np.allclose(reshape(_lowercase , (12, 5) ) , np.asarray(reshape(_lowercase , (12, 5) ) ) ) )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Tuple = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(_lowercase ) , np.squeeze(_lowercase ) ) )
__a : Optional[Any] = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(_lowercase , axis=2 ) , np.squeeze(_lowercase , axis=2 ) ) )
@require_torch
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[int] = np.random.randn(1 , 3 , 4 )
__a : List[Any] = torch.tensor(_lowercase )
self.assertTrue(np.allclose(squeeze(_lowercase ) , squeeze(_lowercase ).numpy() ) )
__a : Optional[Any] = np.random.randn(1 , 4 , 1 , 5 )
__a : str = torch.tensor(_lowercase )
self.assertTrue(np.allclose(squeeze(_lowercase , axis=2 ) , squeeze(_lowercase , axis=2 ).numpy() ) )
@require_tf
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : int = np.random.randn(1 , 3 , 4 )
__a : Tuple = tf.constant(_lowercase )
self.assertTrue(np.allclose(squeeze(_lowercase ) , squeeze(_lowercase ).numpy() ) )
__a : Any = np.random.randn(1 , 4 , 1 , 5 )
__a : List[str] = tf.constant(_lowercase )
self.assertTrue(np.allclose(squeeze(_lowercase , axis=2 ) , squeeze(_lowercase , axis=2 ).numpy() ) )
@require_flax
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Tuple = np.random.randn(1 , 3 , 4 )
__a : Union[str, Any] = jnp.array(_lowercase )
self.assertTrue(np.allclose(squeeze(_lowercase ) , np.asarray(squeeze(_lowercase ) ) ) )
__a : Any = np.random.randn(1 , 4 , 1 , 5 )
__a : Optional[Any] = jnp.array(_lowercase )
self.assertTrue(np.allclose(squeeze(_lowercase , axis=2 ) , np.asarray(squeeze(_lowercase , axis=2 ) ) ) )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : List[Any] = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(_lowercase , axis=1 ) , np.expand_dims(_lowercase , axis=1 ) ) )
@require_torch
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Any = np.random.randn(3 , 4 )
__a : Any = torch.tensor(_lowercase )
self.assertTrue(np.allclose(expand_dims(_lowercase , axis=1 ) , expand_dims(_lowercase , axis=1 ).numpy() ) )
@require_tf
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : int = np.random.randn(3 , 4 )
__a : str = tf.constant(_lowercase )
self.assertTrue(np.allclose(expand_dims(_lowercase , axis=1 ) , expand_dims(_lowercase , axis=1 ).numpy() ) )
@require_flax
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Tuple = np.random.randn(3 , 4 )
__a : Optional[Any] = jnp.array(_lowercase )
self.assertTrue(np.allclose(expand_dims(_lowercase , axis=1 ) , np.asarray(expand_dims(_lowercase , axis=1 ) ) ) )
| 581 | 1 |
def a__ ( _UpperCamelCase : int ,_UpperCamelCase : int ):
if not isinstance(_lowerCamelCase ,_lowerCamelCase ):
raise ValueError('''iterations must be defined as integers''' )
if not isinstance(_lowerCamelCase ,_lowerCamelCase ) or not number >= 1:
raise ValueError(
'''starting number must be\n and integer and be more than 0''' )
if not iterations >= 1:
raise ValueError('''Iterations must be done more than 0 times to play FizzBuzz''' )
__lowerCamelCase = ""
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(_lowerCamelCase )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 706 |
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
a_ = """src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
a_ = direct_transformers_import(PATH_TO_TRANSFORMERS)
a_ = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
a_ = re.compile(R"""\[(.+?)\]\((https://huggingface\.co/.+?)\)""")
a_ = {
"""DecisionTransformerConfig""",
"""EncoderDecoderConfig""",
"""MusicgenConfig""",
"""RagConfig""",
"""SpeechEncoderDecoderConfig""",
"""TimmBackboneConfig""",
"""VisionEncoderDecoderConfig""",
"""VisionTextDualEncoderConfig""",
"""LlamaConfig""",
}
def a__ ( _UpperCamelCase : Union[str, Any] ):
__lowerCamelCase = None
# source code of `config_class`
__lowerCamelCase = inspect.getsource(_UpperCamelCase )
__lowerCamelCase = _re_checkpoint.findall(_UpperCamelCase )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith('''/''' ):
__lowerCamelCase = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
__lowerCamelCase = F"""https://huggingface.co/{ckpt_name}"""
if ckpt_link == ckpt_link_from_name:
__lowerCamelCase = ckpt_name
break
return checkpoint
def a__ ( ):
__lowerCamelCase = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
__lowerCamelCase = get_checkpoint_from_config_class(_UpperCamelCase )
__lowerCamelCase = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(_UpperCamelCase )
if len(_UpperCamelCase ) > 0:
__lowerCamelCase = '''\n'''.join(sorted(_UpperCamelCase ) )
raise ValueError(F"""The following configurations don't contain any valid checkpoint:\n{message}""" )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 622 | 0 |
from graphs.minimum_spanning_tree_kruskal import kruskal
def SCREAMING_SNAKE_CASE ( ) -> str:
UpperCamelCase__ : Tuple = 9
UpperCamelCase__ : Optional[int] = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
UpperCamelCase__ : List[str] = kruskal(__lowerCAmelCase , __lowerCAmelCase )
UpperCamelCase__ : List[str] = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
assert sorted(__lowerCAmelCase ) == sorted(__lowerCAmelCase ) | 228 |
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
lowerCamelCase : List[Any] =logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> List[Any]:
return [
int(1000 * (box[0] / width) ),
int(1000 * (box[1] / height) ),
int(1000 * (box[2] / width) ),
int(1000 * (box[3] / height) ),
]
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None ) -> str:
UpperCamelCase__ : Any = tesseract_config if tesseract_config is not None else ""
# apply OCR
UpperCamelCase__ : int = to_pil_image(__lowerCAmelCase )
UpperCamelCase__ , UpperCamelCase__ : Dict = pil_image.size
UpperCamelCase__ : Optional[Any] = pytesseract.image_to_data(__lowerCAmelCase , lang=__lowerCAmelCase , output_type="dict" , config=__lowerCAmelCase )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : List[str] = data["text"], data["left"], data["top"], data["width"], data["height"]
# filter empty words and corresponding coordinates
UpperCamelCase__ : Tuple = [idx for idx, word in enumerate(__lowerCAmelCase ) if not word.strip()]
UpperCamelCase__ : Tuple = [word for idx, word in enumerate(__lowerCAmelCase ) if idx not in irrelevant_indices]
UpperCamelCase__ : Union[str, Any] = [coord for idx, coord in enumerate(__lowerCAmelCase ) if idx not in irrelevant_indices]
UpperCamelCase__ : List[str] = [coord for idx, coord in enumerate(__lowerCAmelCase ) if idx not in irrelevant_indices]
UpperCamelCase__ : Union[str, Any] = [coord for idx, coord in enumerate(__lowerCAmelCase ) if idx not in irrelevant_indices]
UpperCamelCase__ : str = [coord for idx, coord in enumerate(__lowerCAmelCase ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
UpperCamelCase__ : List[Any] = []
for x, y, w, h in zip(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ : Optional[int] = [x, y, x + w, y + h]
actual_boxes.append(__lowerCAmelCase )
# finally, normalize the bounding boxes
UpperCamelCase__ : int = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) )
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class __a ( A__ ):
_lowerCAmelCase : int = ['''pixel_values''']
def __init__( self : Dict , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : Dict[str, int] = None , SCREAMING_SNAKE_CASE : PILImageResampling = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : Optional[str] = None , SCREAMING_SNAKE_CASE : Optional[str] = "" , **SCREAMING_SNAKE_CASE : Any , ):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE )
UpperCamelCase__ : str = size if size is not None else {"height": 2_24, "width": 2_24}
UpperCamelCase__ : str = get_size_dict(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Tuple = do_resize
UpperCamelCase__ : Union[str, Any] = size
UpperCamelCase__ : List[str] = resample
UpperCamelCase__ : Dict = apply_ocr
UpperCamelCase__ : str = ocr_lang
UpperCamelCase__ : List[str] = tesseract_config
def __lowercase ( self : List[str] , SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : Dict[str, int] , SCREAMING_SNAKE_CASE : PILImageResampling = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE : Dict , ):
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = get_size_dict(SCREAMING_SNAKE_CASE )
if "height" not in size or "width" not in size:
raise ValueError(F'The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}' )
UpperCamelCase__ : Union[str, Any] = (size["height"], size["width"])
return resize(SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE , resample=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __lowercase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : ImageInput , SCREAMING_SNAKE_CASE : bool = None , SCREAMING_SNAKE_CASE : Dict[str, int] = None , SCREAMING_SNAKE_CASE : PILImageResampling = None , SCREAMING_SNAKE_CASE : bool = None , SCREAMING_SNAKE_CASE : Optional[str] = None , SCREAMING_SNAKE_CASE : Optional[str] = None , SCREAMING_SNAKE_CASE : Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE : ChannelDimension = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE : Tuple , ):
'''simple docstring'''
UpperCamelCase__ : List[str] = do_resize if do_resize is not None else self.do_resize
UpperCamelCase__ : Tuple = size if size is not None else self.size
UpperCamelCase__ : Any = get_size_dict(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Tuple = resample if resample is not None else self.resample
UpperCamelCase__ : Any = apply_ocr if apply_ocr is not None else self.apply_ocr
UpperCamelCase__ : Any = ocr_lang if ocr_lang is not None else self.ocr_lang
UpperCamelCase__ : str = tesseract_config if tesseract_config is not None else self.tesseract_config
UpperCamelCase__ : Dict = make_list_of_images(SCREAMING_SNAKE_CASE )
if not valid_images(SCREAMING_SNAKE_CASE ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
# All transformations expect numpy arrays.
UpperCamelCase__ : Optional[int] = [to_numpy_array(SCREAMING_SNAKE_CASE ) for image in images]
if apply_ocr:
requires_backends(self , "pytesseract" )
UpperCamelCase__ : Dict = []
UpperCamelCase__ : List[Any] = []
for image in images:
UpperCamelCase__ , UpperCamelCase__ : Any = apply_tesseract(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
words_batch.append(SCREAMING_SNAKE_CASE )
boxes_batch.append(SCREAMING_SNAKE_CASE )
if do_resize:
UpperCamelCase__ : Union[str, Any] = [self.resize(image=SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE , resample=SCREAMING_SNAKE_CASE ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
UpperCamelCase__ : Any = [flip_channel_order(SCREAMING_SNAKE_CASE ) for image in images]
UpperCamelCase__ : str = [to_channel_dimension_format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for image in images]
UpperCamelCase__ : Optional[Any] = BatchFeature(data={"pixel_values": images} , tensor_type=SCREAMING_SNAKE_CASE )
if apply_ocr:
UpperCamelCase__ : Tuple = words_batch
UpperCamelCase__ : Dict = boxes_batch
return data | 228 | 1 |
def _lowerCAmelCase ( A__: Any ):
'''simple docstring'''
if not isinstance(A__ , A__ ):
UpperCAmelCase = F"""Input value of [number={number}] must be an integer"""
raise TypeError(A__ )
if number < 0:
return False
UpperCAmelCase = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 703 |
from math import factorial
def _lowerCAmelCase ( A__: int , A__: int ):
'''simple docstring'''
if n < k or k < 0:
raise ValueError('''Please enter positive integers for n and k where n >= k''' )
return factorial(A__ ) // (factorial(A__ ) * factorial(n - k ))
if __name__ == "__main__":
print(
"The number of five-card hands possible from a standard",
f'''fifty-two card deck is: {combinations(52, 5)}\n''',
)
print(
"If a class of 40 students must be arranged into groups of",
f'''4 for group projects, there are {combinations(40, 4)} ways''',
"to arrange them.\n",
)
print(
"If 10 teams are competing in a Formula One race, there",
f'''are {combinations(10, 3)} ways that first, second and''',
"third place can be awarded.",
)
| 391 | 0 |
from manim import *
class __UpperCamelCase ( _lowerCAmelCase ):
def _a ( self : str ) -> Tuple:
"""simple docstring"""
__lowercase = Rectangle(height=0.5 , width=0.5 )
__lowercase = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
__lowercase = Rectangle(height=0.25 , width=0.25 )
__lowercase = [mem.copy() for i in range(6 )]
__lowercase = [mem.copy() for i in range(6 )]
__lowercase = VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase , buff=0 )
__lowercase = VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase , buff=0 )
__lowercase = VGroup(_lowerCAmelCase , _lowerCAmelCase ).arrange(_lowerCAmelCase , buff=0 )
__lowercase = Text("""CPU""" , font_size=24 )
__lowercase = Group(_lowerCAmelCase , _lowerCAmelCase ).arrange(_lowerCAmelCase , buff=0.5 , aligned_edge=_lowerCAmelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_lowerCAmelCase )
__lowercase = [mem.copy() for i in range(4 )]
__lowercase = VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase , buff=0 )
__lowercase = Text("""GPU""" , font_size=24 )
__lowercase = Group(_lowerCAmelCase , _lowerCAmelCase ).arrange(_lowerCAmelCase , buff=0.5 , aligned_edge=_lowerCAmelCase )
gpu.move_to([-1, -1, 0] )
self.add(_lowerCAmelCase )
__lowercase = [mem.copy() for i in range(6 )]
__lowercase = VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase , buff=0 )
__lowercase = Text("""Model""" , font_size=24 )
__lowercase = Group(_lowerCAmelCase , _lowerCAmelCase ).arrange(_lowerCAmelCase , buff=0.5 , aligned_edge=_lowerCAmelCase )
model.move_to([3, -1.0, 0] )
self.add(_lowerCAmelCase )
__lowercase = []
__lowercase = []
for i, rect in enumerate(_lowerCAmelCase ):
__lowercase = fill.copy().set_fill(_lowerCAmelCase , opacity=0.8 )
target.move_to(_lowerCAmelCase )
model_arr.append(_lowerCAmelCase )
__lowercase = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(_lowerCAmelCase , opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(_lowerCAmelCase )
self.add(*_lowerCAmelCase , *_lowerCAmelCase )
__lowercase = [meta_mem.copy() for i in range(6 )]
__lowercase = [meta_mem.copy() for i in range(6 )]
__lowercase = VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase , buff=0 )
__lowercase = VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase , buff=0 )
__lowercase = VGroup(_lowerCAmelCase , _lowerCAmelCase ).arrange(_lowerCAmelCase , buff=0 )
__lowercase = Text("""Disk""" , font_size=24 )
__lowercase = Group(_lowerCAmelCase , _lowerCAmelCase ).arrange(_lowerCAmelCase , buff=0.5 , aligned_edge=_lowerCAmelCase )
disk.move_to([-4, -1.25, 0] )
self.add(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
__lowercase = MarkupText(
F'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = MarkupText(
F'<span fgcolor=\'{BLUE}\'>●</span> Checkpoint' , font_size=18 , )
blue_text.next_to(_lowerCAmelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(_lowerCAmelCase )
__lowercase = MarkupText(
F'Now watch as an input is passed through the model\nand how the memory is utilized and handled.' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(_lowerCAmelCase ) )
__lowercase = Square(0.3 )
input.set_fill(_lowerCAmelCase , opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] , _lowerCAmelCase , buff=0.5 )
self.play(Write(_lowerCAmelCase ) )
input.generate_target()
input.target.next_to(model_arr[0] , direction=_lowerCAmelCase , buff=0.02 )
self.play(MoveToTarget(_lowerCAmelCase ) )
self.play(FadeOut(_lowerCAmelCase ) )
__lowercase = Arrow(start=_lowerCAmelCase , end=_lowerCAmelCase , color=_lowerCAmelCase , buff=0.5 )
a.next_to(model_arr[0].get_left() , _lowerCAmelCase , buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
__lowercase = MarkupText(
F'As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(_lowerCAmelCase , run_time=3 ) )
__lowercase = {"""run_time""": 1, """fade_in""": True, """fade_out""": True, """buff""": 0.02}
self.play(
Write(_lowerCAmelCase ) , Circumscribe(model_arr[0] , color=_lowerCAmelCase , **_lowerCAmelCase ) , Circumscribe(model_cpu_arr[0] , color=_lowerCAmelCase , **_lowerCAmelCase ) , Circumscribe(gpu_rect[0] , color=_lowerCAmelCase , **_lowerCAmelCase ) , )
self.play(MoveToTarget(model_cpu_arr[0] ) )
__lowercase = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 , _lowerCAmelCase , buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
__lowercase = AnimationGroup(
FadeOut(_lowerCAmelCase , run_time=0.5 ) , MoveToTarget(_lowerCAmelCase , run_time=0.5 ) , FadeIn(_lowerCAmelCase , run_time=0.5 ) , lag_ratio=0.2 )
self.play(_lowerCAmelCase )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
__lowercase = 0.7
self.play(
Circumscribe(model_arr[i] , **_lowerCAmelCase ) , Circumscribe(cpu_left_col_base[i] , **_lowerCAmelCase ) , Circumscribe(cpu_left_col_base[i + 1] , color=_lowerCAmelCase , **_lowerCAmelCase ) , Circumscribe(gpu_rect[0] , color=_lowerCAmelCase , **_lowerCAmelCase ) , Circumscribe(model_arr[i + 1] , color=_lowerCAmelCase , **_lowerCAmelCase ) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 )
self.play(
Circumscribe(model_arr[-1] , color=_lowerCAmelCase , **_lowerCAmelCase ) , Circumscribe(cpu_left_col_base[-1] , color=_lowerCAmelCase , **_lowerCAmelCase ) , Circumscribe(gpu_rect[0] , color=_lowerCAmelCase , **_lowerCAmelCase ) , )
self.play(MoveToTarget(model_cpu_arr[i] ) )
__lowercase = a_c
__lowercase = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 )
self.play(
FadeOut(_lowerCAmelCase ) , FadeOut(_lowerCAmelCase , run_time=0.5 ) , )
__lowercase = MarkupText(F'Inference on a model too large for GPU memory\nis successfully completed.' , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(_lowerCAmelCase , run_time=3 ) , MoveToTarget(_lowerCAmelCase ) )
self.wait()
| 80 |
def snake_case ( lowerCamelCase = 2_000_000 ):
'''simple docstring'''
__lowercase = [0 for i in range(n + 1 )]
__lowercase = 1
__lowercase = 1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , lowerCamelCase ):
__lowercase = 1
__lowercase = 0
for i in range(lowerCamelCase ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(F'''{solution() = }''')
| 80 | 1 |
"""simple docstring"""
def __snake_case ( SCREAMING_SNAKE_CASE: Any , SCREAMING_SNAKE_CASE: List[Any] , SCREAMING_SNAKE_CASE: Dict , SCREAMING_SNAKE_CASE: List[Any] ):
"""simple docstring"""
if height >= 1:
move_tower(height - 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
move_disk(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
move_tower(height - 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __snake_case ( SCREAMING_SNAKE_CASE: List[str] , SCREAMING_SNAKE_CASE: Tuple ):
"""simple docstring"""
print('moving disk from' , SCREAMING_SNAKE_CASE , 'to' , SCREAMING_SNAKE_CASE )
def __snake_case ( ):
"""simple docstring"""
_lowerCAmelCase = int(input('Height of hanoi: ' ).strip() )
move_tower(SCREAMING_SNAKE_CASE , 'A' , 'B' , 'C' )
if __name__ == "__main__":
main()
| 491 |
"""simple docstring"""
def __snake_case ( SCREAMING_SNAKE_CASE: int ):
"""simple docstring"""
_lowerCAmelCase = generate_pascal_triangle(SCREAMING_SNAKE_CASE )
for row_idx in range(SCREAMING_SNAKE_CASE ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=' ' )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=' ' )
else:
print(triangle[row_idx][col_idx] , end='' )
print()
def __snake_case ( SCREAMING_SNAKE_CASE: int ):
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise TypeError('The input value of \'num_rows\' should be \'int\'' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'The input value of \'num_rows\' should be greater than or equal to 0' )
_lowerCAmelCase = []
for current_row_idx in range(SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = populate_current_row(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
triangle.append(SCREAMING_SNAKE_CASE )
return triangle
def __snake_case ( SCREAMING_SNAKE_CASE: list[list[int]] , SCREAMING_SNAKE_CASE: int ):
"""simple docstring"""
_lowerCAmelCase = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
_lowerCAmelCase , _lowerCAmelCase = 1, 1
for current_col_idx in range(1 , SCREAMING_SNAKE_CASE ):
calculate_current_element(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return current_row
def __snake_case ( SCREAMING_SNAKE_CASE: list[list[int]] , SCREAMING_SNAKE_CASE: list[int] , SCREAMING_SNAKE_CASE: int , SCREAMING_SNAKE_CASE: int , ):
"""simple docstring"""
_lowerCAmelCase = triangle[current_row_idx - 1][current_col_idx - 1]
_lowerCAmelCase = triangle[current_row_idx - 1][current_col_idx]
_lowerCAmelCase = above_to_left_elt + above_to_right_elt
def __snake_case ( SCREAMING_SNAKE_CASE: int ):
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise TypeError('The input value of \'num_rows\' should be \'int\'' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'The input value of \'num_rows\' should be greater than or equal to 0' )
_lowerCAmelCase = [[1]]
for row_index in range(1 , SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = [0] + result[-1] + [0]
_lowerCAmelCase = row_index + 1
# Calculate the number of distinct elements in a row
_lowerCAmelCase = sum(divmod(SCREAMING_SNAKE_CASE , 2 ) )
_lowerCAmelCase = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
_lowerCAmelCase = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
_lowerCAmelCase = row_first_half + row_second_half
result.append(SCREAMING_SNAKE_CASE )
return result
def __snake_case ( ):
"""simple docstring"""
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(SCREAMING_SNAKE_CASE: Callable , SCREAMING_SNAKE_CASE: int ) -> None:
_lowerCAmelCase = f"""{func.__name__}({value})"""
_lowerCAmelCase = timeit(f"""__main__.{call}""" , setup='import __main__' )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(f"""{call:38} -- {timing:.4f} seconds""" )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 491 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.