code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
from heapq import heappop, heappush
import numpy as np
def SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case , snake_case , ) -> tuple[float | int, list[tuple[int, int]]]:
__lowercase = grid.shape
__lowercase = [-1, 1, 0, 0]
__lowercase = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
__lowercase = [(0, source)], set()
__lowercase = np.full((rows, cols) , np.inf )
__lowercase = 0
__lowercase = np.empty((rows, cols) , dtype=_lowerCAmelCase )
__lowercase = None
while queue:
(__lowercase) = heappop(_lowerCAmelCase )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
__lowercase = []
while (x, y) != source:
path.append((x, y) )
__lowercase = predecessors[x, y]
path.append(_lowerCAmelCase ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(_lowerCAmelCase ) ):
__lowercase = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
__lowercase = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(_lowerCAmelCase , (dist + 1, (nx, ny)) )
__lowercase = dist + 1
__lowercase = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 375 |
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self, lowerCamelCase__, lowerCamelCase__=13, lowerCamelCase__=7, lowerCamelCase__=True, lowerCamelCase__=True, lowerCamelCase__=99, lowerCamelCase__=32, lowerCamelCase__=5, lowerCamelCase__=4, lowerCamelCase__=37, lowerCamelCase__="gelu", lowerCamelCase__=0.1, lowerCamelCase__=0.1, lowerCamelCase__=50, lowerCamelCase__=0.02, lowerCamelCase__=True, lowerCamelCase__=None, ):
A : List[str] = parent
A : List[str] = batch_size
A : Optional[int] = seq_length
A : Optional[int] = is_training
A : Tuple = use_input_mask
A : Optional[Any] = vocab_size
A : str = hidden_size
A : Any = num_hidden_layers
A : List[Any] = num_attention_heads
A : Optional[int] = intermediate_size
A : int = hidden_act
A : Dict = hidden_dropout_prob
A : Optional[Any] = attention_probs_dropout_prob
A : List[Any] = max_position_embeddings
A : int = initializer_range
A : Tuple = use_labels
A : List[str] = scope
def _lowerCAmelCase ( self ):
A : Optional[Any] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
A : int = None
if self.use_input_mask:
A : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
if self.use_labels:
A : Tuple = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
A : List[Any] = self.get_config()
return config, input_ids, input_mask, token_labels
def _lowerCAmelCase ( self ):
return BertGenerationConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, is_decoder=lowerCamelCase__, initializer_range=self.initializer_range, )
def _lowerCAmelCase ( self ):
(
(
A
) , (
A
) , (
A
) , (
A
) ,
) : List[Any] = self.prepare_config_and_inputs()
A : Any = True
A : Union[str, Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
A : Optional[Any] = ids_tensor([self.batch_size, self.seq_length], vocab_size=2 )
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, **lowerCamelCase__, ):
A : str = BertGenerationEncoder(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
A : Optional[int] = model(lowerCamelCase__, attention_mask=lowerCamelCase__ )
A : List[str] = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, **lowerCamelCase__, ):
A : List[str] = True
A : Union[str, Any] = BertGenerationEncoder(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
A : Any = model(
lowerCamelCase__, attention_mask=lowerCamelCase__, encoder_hidden_states=lowerCamelCase__, encoder_attention_mask=lowerCamelCase__, )
A : Optional[Any] = model(
lowerCamelCase__, attention_mask=lowerCamelCase__, encoder_hidden_states=lowerCamelCase__, )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, **lowerCamelCase__, ):
A : Union[str, Any] = True
A : Optional[int] = True
A : Optional[int] = BertGenerationDecoder(config=lowerCamelCase__ ).to(lowerCamelCase__ ).eval()
# first forward pass
A : int = model(
lowerCamelCase__, attention_mask=lowerCamelCase__, encoder_hidden_states=lowerCamelCase__, encoder_attention_mask=lowerCamelCase__, use_cache=lowerCamelCase__, )
A : List[str] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
A : Optional[Any] = ids_tensor((self.batch_size, 3), config.vocab_size )
A : int = ids_tensor((self.batch_size, 3), vocab_size=2 )
# append to next input_ids and
A : List[str] = torch.cat([input_ids, next_tokens], dim=-1 )
A : Union[str, Any] = torch.cat([input_mask, next_mask], dim=-1 )
A : List[str] = model(
lowerCamelCase__, attention_mask=lowerCamelCase__, encoder_hidden_states=lowerCamelCase__, encoder_attention_mask=lowerCamelCase__, output_hidden_states=lowerCamelCase__, )["""hidden_states"""][0]
A : Any = model(
lowerCamelCase__, attention_mask=lowerCamelCase__, encoder_hidden_states=lowerCamelCase__, encoder_attention_mask=lowerCamelCase__, past_key_values=lowerCamelCase__, output_hidden_states=lowerCamelCase__, )["""hidden_states"""][0]
# select random slice
A : Any = ids_tensor((1,), output_from_past.shape[-1] ).item()
A : Tuple = output_from_no_past[:, -3:, random_slice_idx].detach()
A : Dict = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase__, lowerCamelCase__, atol=1e-3 ) )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, *lowerCamelCase__, ):
A : Optional[int] = BertGenerationDecoder(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
A : List[str] = model(lowerCamelCase__, attention_mask=lowerCamelCase__, labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCAmelCase ( self ):
A , A , A , A : str = self.prepare_config_and_inputs()
A : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Any = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
__lowerCamelCase : int = (BertGenerationDecoder,) if is_torch_available() else ()
__lowerCamelCase : List[Any] = (
{"feature-extraction": BertGenerationEncoder, "text-generation": BertGenerationDecoder}
if is_torch_available()
else {}
)
def _lowerCAmelCase ( self ):
A : Any = BertGenerationEncoderTester(self )
A : Optional[int] = ConfigTester(self, config_class=lowerCamelCase__, hidden_size=37 )
def _lowerCAmelCase ( self ):
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self ):
A : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A , A , A , A : Optional[Any] = self.model_tester.prepare_config_and_inputs()
A : Any = """bert"""
self.model_tester.create_and_check_model(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*lowerCamelCase__ )
def _lowerCAmelCase ( self ):
# This regression test was failing with PyTorch < 1.3
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder()
A : int = None
self.model_tester.create_and_check_model_as_decoder(
lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, )
def _lowerCAmelCase ( self ):
A : Dict = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*lowerCamelCase__ )
@slow
def _lowerCAmelCase ( self ):
A : Tuple = BertGenerationEncoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
self.assertIsNotNone(lowerCamelCase__ )
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowerCAmelCase ( self ):
A : Optional[int] = BertGenerationEncoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
A : Optional[int] = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] )
with torch.no_grad():
A : Union[str, Any] = model(lowerCamelCase__ )[0]
A : List[Any] = torch.Size([1, 8, 1024] )
self.assertEqual(output.shape, lowerCamelCase__ )
A : Tuple = torch.tensor(
[[[0.1775, 0.0083, -0.0321], [1.6002, 0.1287, 0.3912], [2.1473, 0.5791, 0.6066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3], lowerCamelCase__, atol=1e-4 ) )
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowerCAmelCase ( self ):
A : Optional[Any] = BertGenerationDecoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
A : List[Any] = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] )
with torch.no_grad():
A : Dict = model(lowerCamelCase__ )[0]
A : List[str] = torch.Size([1, 8, 5_0358] )
self.assertEqual(output.shape, lowerCamelCase__ )
A : Optional[Any] = torch.tensor(
[[[-0.5788, -2.5994, -3.7054], [0.0438, 4.7997, 1.8795], [1.5862, 6.6409, 4.4638]]] )
self.assertTrue(torch.allclose(output[:, :3, :3], lowerCamelCase__, atol=1e-4 ) )
| 662 | 0 |
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def lowerCamelCase_ ( ):
'''simple docstring'''
UpperCamelCase__ = ArgumentParser(
description=(
'''PyTorch TPU distributed training launch '''
'''helper utility that will spawn up '''
'''multiple distributed processes'''
) )
# Optional arguments for the launch helper
parser.add_argument('''--num_cores''', type=_lowercase, default=1, help='''Number of TPU cores to use (1 or 8).''' )
# positional
parser.add_argument(
'''training_script''', type=_lowercase, help=(
'''The full path to the single TPU training '''
'''program/script to be launched in parallel, '''
'''followed by all the arguments for the '''
'''training script'''
), )
# rest from the training program
parser.add_argument('''training_script_args''', nargs=_lowercase )
return parser.parse_args()
def lowerCamelCase_ ( ):
'''simple docstring'''
UpperCamelCase__ = parse_args()
# Import training_script as a module.
UpperCamelCase__ = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
UpperCamelCase__ = script_fpath.stem
UpperCamelCase__ = importlib.import_module(_lowercase )
# Patch sys.argv
UpperCamelCase__ = [args.training_script] + args.training_script_args + ['''--tpu_num_cores''', str(args.num_cores )]
xmp.spawn(mod._mp_fn, args=(), nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 718 | # Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
lowercase = get_logger()
lowercase = None
class __lowercase ( TensorFormatter[Mapping, '''jax.Array''', Mapping] ):
'''simple docstring'''
def __init__( self : List[str] , _a : Optional[Any]=None , _a : Any=None , **_a : List[Any] ):
super().__init__(features=_a )
import jax
from jaxlib.xla_client import Device
if isinstance(_a , _a ):
raise ValueError(
F"""Expected {device} to be a `str` not {type(_a )}, as `jaxlib.xla_extension.Device` """
'''is not serializable neither with `pickle` nor with `dill`. Instead you can surround '''
'''the device with `str()` to get its string identifier that will be internally mapped '''
'''to the actual `jaxlib.xla_extension.Device`.''' )
UpperCamelCase__ = device if isinstance(_a , _a ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
UpperCamelCase__ = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
F"""Device with string identifier {self.device} not listed among the available """
F"""devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default """
F"""device: {str(jax.devices()[0] )}.""" )
UpperCamelCase__ = str(jax.devices()[0] )
UpperCamelCase__ = jnp_array_kwargs
@staticmethod
def A_ ( ):
import jax
return {str(_a ): device for device in jax.devices()}
def A_ ( self : Optional[int] , _a : Tuple ):
import jax
import jax.numpy as jnp
if isinstance(_a , _a ) and column:
if all(
isinstance(_a , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(_a , axis=0 )
return column
def A_ ( self : Optional[int] , _a : List[Any] ):
import jax
import jax.numpy as jnp
if isinstance(_a , (str, bytes, type(_a )) ):
return value
elif isinstance(_a , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
UpperCamelCase__ = {}
if isinstance(_a , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
UpperCamelCase__ = {'''dtype''': jnp.intaa}
else:
UpperCamelCase__ = {'''dtype''': jnp.intaa}
elif isinstance(_a , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
UpperCamelCase__ = {'''dtype''': jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(_a , PIL.Image.Image ):
UpperCamelCase__ = np.asarray(_a )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
UpperCamelCase__ = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(_a , **{**default_dtype, **self.jnp_array_kwargs} )
def A_ ( self : Optional[Any] , _a : Any ):
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(_a , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(_a , '''__array__''' ) and not isinstance(_a , jax.Array ):
UpperCamelCase__ = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(_a , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(_a ) for substruct in data_struct] )
elif isinstance(_a , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(_a ) for substruct in data_struct] )
return self._tensorize(_a )
def A_ ( self : int , _a : dict ):
return map_nested(self._recursive_tensorize , _a , map_list=_a )
def A_ ( self : List[Any] , _a : pa.Table ):
UpperCamelCase__ = self.numpy_arrow_extractor().extract_row(_a )
UpperCamelCase__ = self.python_features_decoder.decode_row(_a )
return self.recursive_tensorize(_a )
def A_ ( self : Optional[int] , _a : pa.Table ):
UpperCamelCase__ = self.numpy_arrow_extractor().extract_column(_a )
UpperCamelCase__ = self.python_features_decoder.decode_column(_a , pa_table.column_names[0] )
UpperCamelCase__ = self.recursive_tensorize(_a )
UpperCamelCase__ = self._consolidate(_a )
return column
def A_ ( self : int , _a : pa.Table ):
UpperCamelCase__ = self.numpy_arrow_extractor().extract_batch(_a )
UpperCamelCase__ = self.python_features_decoder.decode_batch(_a )
UpperCamelCase__ = self.recursive_tensorize(_a )
for column_name in batch:
UpperCamelCase__ = self._consolidate(batch[column_name] )
return batch
| 591 | 0 |
'''simple docstring'''
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
_a : List[Any] = logging.get_logger(__name__)
# General docstring
_a : Union[str, Any] = "MobileNetV1Config"
# Base docstring
_a : int = "google/mobilenet_v1_1.0_224"
_a : Any = [1, 1_024, 7, 7]
# Image classification docstring
_a : Any = "google/mobilenet_v1_1.0_224"
_a : Tuple = "tabby, tabby cat"
_a : Tuple = [
"google/mobilenet_v1_1.0_224",
"google/mobilenet_v1_0.75_192",
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def _a (lowercase__ : Union[str, Any] , lowercase__ : Tuple , lowercase__ : Tuple=None ) -> Optional[int]:
"""simple docstring"""
__snake_case = {}
if isinstance(lowercase__ , lowercase__ ):
__snake_case = model.mobilenet_va
else:
__snake_case = model
__snake_case = 'MobilenetV1/Conv2d_0/'
__snake_case = backbone.conv_stem.convolution.weight
__snake_case = backbone.conv_stem.normalization.bias
__snake_case = backbone.conv_stem.normalization.weight
__snake_case = backbone.conv_stem.normalization.running_mean
__snake_case = backbone.conv_stem.normalization.running_var
for i in range(1_3 ):
__snake_case = i + 1
__snake_case = i * 2
__snake_case = backbone.layer[pt_index]
__snake_case = f'MobilenetV1/Conv2d_{tf_index}_depthwise/'
__snake_case = pointer.convolution.weight
__snake_case = pointer.normalization.bias
__snake_case = pointer.normalization.weight
__snake_case = pointer.normalization.running_mean
__snake_case = pointer.normalization.running_var
__snake_case = backbone.layer[pt_index + 1]
__snake_case = f'MobilenetV1/Conv2d_{tf_index}_pointwise/'
__snake_case = pointer.convolution.weight
__snake_case = pointer.normalization.bias
__snake_case = pointer.normalization.weight
__snake_case = pointer.normalization.running_mean
__snake_case = pointer.normalization.running_var
if isinstance(lowercase__ , lowercase__ ):
__snake_case = 'MobilenetV1/Logits/Conv2d_1c_1x1/'
__snake_case = model.classifier.weight
__snake_case = model.classifier.bias
return tf_to_pt_map
def _a (lowercase__ : List[str] , lowercase__ : Optional[Any] , lowercase__ : Tuple ) -> str:
"""simple docstring"""
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
'Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see '
'https://www.tensorflow.org/install/ for installation instructions.' )
raise
# Load weights from TF model
__snake_case = tf.train.list_variables(lowercase__ )
__snake_case = {}
for name, shape in init_vars:
logger.info(f'Loading TF weight {name} with shape {shape}' )
__snake_case = tf.train.load_variable(lowercase__ , lowercase__ )
__snake_case = array
# Build TF to PyTorch weights loading map
__snake_case = _build_tf_to_pytorch_map(lowercase__ , lowercase__ , lowercase__ )
for name, pointer in tf_to_pt_map.items():
logger.info(f'Importing {name}' )
if name not in tf_weights:
logger.info(f'{name} not in tf pre-trained weights, skipping' )
continue
__snake_case = tf_weights[name]
if "depthwise_weights" in name:
logger.info('Transposing depthwise' )
__snake_case = np.transpose(lowercase__ , (2, 3, 0, 1) )
elif "weights" in name:
logger.info('Transposing' )
if len(pointer.shape ) == 2: # copying into linear layer
__snake_case = array.squeeze().transpose()
else:
__snake_case = np.transpose(lowercase__ , (3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(f'Pointer shape {pointer.shape} and array shape {array.shape} mismatched' )
logger.info(f'Initialize PyTorch weight {name} {array.shape}' )
__snake_case = torch.from_numpy(lowercase__ )
tf_weights.pop(lowercase__ , lowercase__ )
tf_weights.pop(name + '/RMSProp' , lowercase__ )
tf_weights.pop(name + '/RMSProp_1' , lowercase__ )
tf_weights.pop(name + '/ExponentialMovingAverage' , lowercase__ )
logger.info(f'Weights not copied to PyTorch model: {", ".join(tf_weights.keys() )}' )
return model
def _a (lowercase__ : torch.Tensor , lowercase__ : nn.Convad ) -> torch.Tensor:
"""simple docstring"""
__snake_case , __snake_case = features.shape[-2:]
__snake_case , __snake_case = conv_layer.stride
__snake_case , __snake_case = conv_layer.kernel_size
if in_height % stride_height == 0:
__snake_case = max(kernel_height - stride_height , 0 )
else:
__snake_case = max(kernel_height - (in_height % stride_height) , 0 )
if in_width % stride_width == 0:
__snake_case = max(kernel_width - stride_width , 0 )
else:
__snake_case = max(kernel_width - (in_width % stride_width) , 0 )
__snake_case = pad_along_width // 2
__snake_case = pad_along_width - pad_left
__snake_case = pad_along_height // 2
__snake_case = pad_along_height - pad_top
__snake_case = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(lowercase__ , lowercase__ , 'constant' , 0.0 )
class _lowercase ( nn.Module ):
def __init__( self : Tuple , SCREAMING_SNAKE_CASE_ : MobileNetVaConfig , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[int] = 1 , SCREAMING_SNAKE_CASE_ : Optional[int] = 1 , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : Optional[bool] = True , SCREAMING_SNAKE_CASE_ : Optional[bool or str] = True , ) -> None:
super().__init__()
__snake_case = config
if in_channels % groups != 0:
raise ValueError(f'Input channels ({in_channels}) are not divisible by {groups} groups.' )
if out_channels % groups != 0:
raise ValueError(f'Output channels ({out_channels}) are not divisible by {groups} groups.' )
__snake_case = 0 if config.tf_padding else int((kernel_size - 1) / 2 )
__snake_case = nn.Convad(
in_channels=SCREAMING_SNAKE_CASE_ , out_channels=SCREAMING_SNAKE_CASE_ , kernel_size=SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , groups=SCREAMING_SNAKE_CASE_ , bias=SCREAMING_SNAKE_CASE_ , padding_mode='zeros' , )
if use_normalization:
__snake_case = nn.BatchNormad(
num_features=SCREAMING_SNAKE_CASE_ , eps=config.layer_norm_eps , momentum=0.9_9_9_7 , affine=SCREAMING_SNAKE_CASE_ , track_running_stats=SCREAMING_SNAKE_CASE_ , )
else:
__snake_case = None
if use_activation:
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
__snake_case = ACTaFN[use_activation]
elif isinstance(config.hidden_act , SCREAMING_SNAKE_CASE_ ):
__snake_case = ACTaFN[config.hidden_act]
else:
__snake_case = config.hidden_act
else:
__snake_case = None
def a ( self : List[str] , SCREAMING_SNAKE_CASE_ : torch.Tensor ) -> torch.Tensor:
if self.config.tf_padding:
__snake_case = apply_tf_padding(SCREAMING_SNAKE_CASE_ , self.convolution )
__snake_case = self.convolution(SCREAMING_SNAKE_CASE_ )
if self.normalization is not None:
__snake_case = self.normalization(SCREAMING_SNAKE_CASE_ )
if self.activation is not None:
__snake_case = self.activation(SCREAMING_SNAKE_CASE_ )
return features
class _lowercase ( __lowercase ):
_SCREAMING_SNAKE_CASE : Tuple = MobileNetVaConfig
_SCREAMING_SNAKE_CASE : Optional[Any] = load_tf_weights_in_mobilenet_va
_SCREAMING_SNAKE_CASE : Any = "mobilenet_v1"
_SCREAMING_SNAKE_CASE : Optional[Any] = "pixel_values"
_SCREAMING_SNAKE_CASE : List[Any] = False
def a ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Union[nn.Linear, nn.Convad] ) -> None:
if isinstance(SCREAMING_SNAKE_CASE_ , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(SCREAMING_SNAKE_CASE_ , nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
_a : Optional[Any] = R"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
_a : str = R"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`MobileNetV1ImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
"The bare MobileNetV1 model outputting raw hidden-states without any specific head on top." , __lowercase , )
class _lowercase ( __lowercase ):
def __init__( self : str , SCREAMING_SNAKE_CASE_ : MobileNetVaConfig , SCREAMING_SNAKE_CASE_ : bool = True ) -> Any:
super().__init__(SCREAMING_SNAKE_CASE_ )
__snake_case = config
__snake_case = 32
__snake_case = max(int(depth * config.depth_multiplier ) , config.min_depth )
__snake_case = MobileNetVaConvLayer(
SCREAMING_SNAKE_CASE_ , in_channels=config.num_channels , out_channels=SCREAMING_SNAKE_CASE_ , kernel_size=3 , stride=2 , )
__snake_case = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
__snake_case = nn.ModuleList()
for i in range(13 ):
__snake_case = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
__snake_case = max(int(depth * config.depth_multiplier ) , config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
SCREAMING_SNAKE_CASE_ , in_channels=SCREAMING_SNAKE_CASE_ , out_channels=SCREAMING_SNAKE_CASE_ , kernel_size=3 , stride=strides[i] , groups=SCREAMING_SNAKE_CASE_ , ) )
self.layer.append(
MobileNetVaConvLayer(
SCREAMING_SNAKE_CASE_ , in_channels=SCREAMING_SNAKE_CASE_ , out_channels=SCREAMING_SNAKE_CASE_ , kernel_size=1 , ) )
__snake_case = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def a ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : int ) -> Dict:
raise NotImplementedError
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=SCREAMING_SNAKE_CASE_ , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def a ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[torch.Tensor] = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , ) -> Union[tuple, BaseModelOutputWithPoolingAndNoAttention]:
__snake_case = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__snake_case = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('You have to specify pixel_values' )
__snake_case = self.conv_stem(SCREAMING_SNAKE_CASE_ )
__snake_case = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
__snake_case = layer_module(SCREAMING_SNAKE_CASE_ )
if output_hidden_states:
__snake_case = all_hidden_states + (hidden_states,)
__snake_case = hidden_states
if self.pooler is not None:
__snake_case = torch.flatten(self.pooler(SCREAMING_SNAKE_CASE_ ) , start_dim=1 )
else:
__snake_case = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=SCREAMING_SNAKE_CASE_ , pooler_output=SCREAMING_SNAKE_CASE_ , hidden_states=SCREAMING_SNAKE_CASE_ , )
@add_start_docstrings(
"\n MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , __lowercase , )
class _lowercase ( __lowercase ):
def __init__( self : int , SCREAMING_SNAKE_CASE_ : MobileNetVaConfig ) -> None:
super().__init__(SCREAMING_SNAKE_CASE_ )
__snake_case = config.num_labels
__snake_case = MobileNetVaModel(SCREAMING_SNAKE_CASE_ )
__snake_case = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
__snake_case = nn.Dropout(config.classifier_dropout_prob , inplace=SCREAMING_SNAKE_CASE_ )
__snake_case = nn.Linear(SCREAMING_SNAKE_CASE_ , config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=SCREAMING_SNAKE_CASE_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def a ( self : Tuple , SCREAMING_SNAKE_CASE_ : Optional[torch.Tensor] = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : Optional[torch.Tensor] = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , ) -> Union[tuple, ImageClassifierOutputWithNoAttention]:
__snake_case = return_dict if return_dict is not None else self.config.use_return_dict
__snake_case = self.mobilenet_va(SCREAMING_SNAKE_CASE_ , output_hidden_states=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ )
__snake_case = outputs.pooler_output if return_dict else outputs[1]
__snake_case = self.classifier(self.dropout(SCREAMING_SNAKE_CASE_ ) )
__snake_case = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
__snake_case = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
__snake_case = 'single_label_classification'
else:
__snake_case = 'multi_label_classification'
if self.config.problem_type == "regression":
__snake_case = MSELoss()
if self.num_labels == 1:
__snake_case = loss_fct(logits.squeeze() , labels.squeeze() )
else:
__snake_case = loss_fct(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
elif self.config.problem_type == "single_label_classification":
__snake_case = CrossEntropyLoss()
__snake_case = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
__snake_case = BCEWithLogitsLoss()
__snake_case = loss_fct(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if not return_dict:
__snake_case = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=SCREAMING_SNAKE_CASE_ , logits=SCREAMING_SNAKE_CASE_ , hidden_states=outputs.hidden_states , )
| 56 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ = {
'''configuration_informer''': [
'''INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''InformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'''INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''InformerForPrediction''',
'''InformerModel''',
'''InformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 339 | 0 |
from __future__ import annotations
import os
from typing import Any
import requests
UpperCamelCase_ = '''https://api.github.com'''
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
UpperCamelCase_ = BASE_URL + '''/user'''
# https://github.com/settings/tokens
UpperCamelCase_ = os.environ.get('''USER_TOKEN''', '''''')
def lowerCamelCase_ ( _a : str ):
'''simple docstring'''
UpperCAmelCase_ : int = {
"""Authorization""": F'''token {auth_token}''',
"""Accept""": """application/vnd.github.v3+json""",
}
return requests.get(_a , headers=_a ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(F"{key}: {value}")
else:
raise ValueError('''\'USER_TOKEN\' field cannot be empty.''')
| 716 |
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
UpperCamelCase_ = HfApi()
UpperCamelCase_ = {}
# fmt: off
UpperCamelCase_ = torch.tensor([
-0.7_5_1_5, -1.6_8_8_3, 0.2_4_2_0, 0.0_3_0_0, 0.6_3_4_7, 1.3_4_3_3, -1.1_7_4_3, -3.7_4_6_7,
1.2_3_4_2, -2.2_4_8_5, 0.4_6_3_6, 0.8_0_7_6, -0.7_9_9_1, 0.3_9_6_9, 0.8_4_9_8, 0.9_1_8_9,
-1.8_8_8_7, -3.3_5_2_2, 0.7_6_3_9, 0.2_0_4_0, 0.6_2_7_1, -2.7_1_4_8, -1.6_3_1_6, 3.0_8_3_9,
0.3_1_8_6, 0.2_7_2_1, -0.9_7_5_9, -1.2_4_6_1, 2.6_2_5_7, 1.3_5_5_7
])
UpperCamelCase_ = torch.tensor([
-2.3_6_3_9, -2.5_3_4_4, 0.0_0_5_4, -0.6_6_7_4, 1.5_9_9_0, 1.0_1_5_8, 0.3_1_2_4, -2.1_4_3_6,
1.8_7_9_5, -2.5_4_2_9, -0.1_5_6_6, -0.3_9_7_3, 1.2_4_9_0, 2.6_4_4_7, 1.2_2_8_3, -0.5_2_0_8,
-2.8_1_5_4, -3.5_1_1_9, 2.3_8_3_8, 1.2_0_3_3, 1.7_2_0_1, -2.1_2_5_6, -1.4_5_7_6, 2.7_9_4_8,
2.4_2_0_4, -0.9_7_5_2, -1.2_5_4_6, 0.8_0_2_7, 3.2_7_5_8, 3.1_3_6_5
])
UpperCamelCase_ = torch.tensor([
-0.6_5_3_1, -0.6_8_9_1, -0.3_1_7_2, -0.5_3_7_5, -0.9_1_4_0, -0.5_3_6_7, -0.1_1_7_5, -0.7_8_6_9,
-0.3_8_0_8, -0.4_5_1_3, -0.2_0_9_8, -0.0_0_8_3, 0.3_1_8_3, 0.5_1_4_0, 0.2_2_4_7, -0.1_3_0_4,
-0.1_3_0_2, -0.2_8_0_2, -0.2_0_8_4, -0.2_0_2_5, -0.4_9_6_7, -0.4_8_7_3, -0.0_8_6_1, 0.6_9_2_5,
0.0_2_5_0, 0.1_2_9_0, -0.1_5_4_3, 0.6_3_1_6, 1.0_4_6_0, 1.4_9_4_3
])
UpperCamelCase_ = torch.tensor([
0.0_9_1_1, 0.1_1_0_7, 0.0_1_8_2, 0.0_4_3_5, -0.0_8_0_5, -0.0_6_0_8, 0.0_3_8_1, 0.2_1_7_2,
-0.0_2_8_0, 0.1_3_2_7, -0.0_2_9_9, -0.0_2_5_5, -0.0_0_5_0, -0.1_1_7_0, -0.1_0_4_6, 0.0_3_0_9,
0.1_3_6_7, 0.1_7_2_8, -0.0_5_3_3, -0.0_7_4_8, -0.0_5_3_4, 0.1_6_2_4, 0.0_3_8_4, -0.1_8_0_5,
-0.0_7_0_7, 0.0_6_4_2, 0.0_2_2_0, -0.0_1_3_4, -0.1_3_3_3, -0.1_5_0_5
])
UpperCamelCase_ = torch.tensor([
0.1_3_2_1, 0.1_3_3_7, 0.0_4_4_0, 0.0_6_2_2, -0.0_5_9_1, -0.0_3_7_0, 0.0_5_0_3, 0.2_1_3_3,
-0.0_1_7_7, 0.1_4_1_5, -0.0_1_1_6, -0.0_1_1_2, 0.0_0_4_4, -0.0_9_8_0, -0.0_7_8_9, 0.0_3_9_5,
0.1_5_0_2, 0.1_7_8_5, -0.0_4_8_8, -0.0_5_1_4, -0.0_4_0_4, 0.1_5_3_9, 0.0_4_5_4, -0.1_5_5_9,
-0.0_6_6_5, 0.0_6_5_9, 0.0_3_8_3, -0.0_0_0_5, -0.1_2_6_6, -0.1_3_8_6
])
UpperCamelCase_ = torch.tensor([
0.1_1_5_4, 0.1_2_1_8, 0.0_3_0_7, 0.0_5_2_6, -0.0_7_1_1, -0.0_5_4_1, 0.0_3_6_6, 0.2_0_7_8,
-0.0_2_6_7, 0.1_3_1_7, -0.0_2_2_6, -0.0_1_9_3, -0.0_0_1_4, -0.1_0_5_5, -0.0_9_0_2, 0.0_3_3_0,
0.1_3_9_1, 0.1_7_0_9, -0.0_5_6_2, -0.0_6_9_3, -0.0_5_6_0, 0.1_4_8_2, 0.0_3_8_1, -0.1_6_8_3,
-0.0_6_8_1, 0.0_6_6_1, 0.0_3_3_1, -0.0_0_4_6, -0.1_2_6_8, -0.1_4_3_1
])
UpperCamelCase_ = torch.tensor([
0.1_1_9_2, 0.1_2_4_0, 0.0_4_1_4, 0.0_6_0_6, -0.0_5_5_7, -0.0_4_1_2, 0.0_4_3_0, 0.2_0_4_2,
-0.0_2_0_0, 0.1_3_8_5, -0.0_1_1_5, -0.0_1_3_2, 0.0_0_1_7, -0.0_9_6_5, -0.0_8_0_2, 0.0_3_9_8,
0.1_4_3_3, 0.1_7_4_7, -0.0_4_5_8, -0.0_5_3_3, -0.0_4_0_7, 0.1_5_4_5, 0.0_4_1_9, -0.1_5_7_4,
-0.0_6_4_5, 0.0_6_2_6, 0.0_3_4_1, -0.0_0_1_0, -0.1_1_9_9, -0.1_3_9_0
])
UpperCamelCase_ = torch.tensor([
0.1_0_7_5, 0.1_0_7_4, 0.0_2_0_5, 0.0_4_3_1, -0.0_7_7_4, -0.0_6_0_7, 0.0_2_9_8, 0.2_0_4_2,
-0.0_3_2_0, 0.1_2_6_7, -0.0_2_8_1, -0.0_2_5_0, -0.0_0_6_4, -0.1_0_9_1, -0.0_9_4_6, 0.0_2_9_0,
0.1_3_2_8, 0.1_6_5_0, -0.0_5_8_0, -0.0_7_3_8, -0.0_5_8_6, 0.1_4_4_0, 0.0_3_3_7, -0.1_7_4_6,
-0.0_7_1_2, 0.0_6_0_5, 0.0_2_5_0, -0.0_0_9_9, -0.1_3_1_6, -0.1_4_7_3
])
UpperCamelCase_ = torch.tensor([
-1.4_5_7_2, -2.0_4_8_1, -0.0_4_1_4, -0.6_0_0_5, 1.4_1_3_6, 0.5_8_4_8, 0.4_0_2_8, -2.7_3_3_0,
1.2_2_1_2, -2.1_2_2_8, 0.2_1_5_5, 0.4_0_3_9, 0.7_6_6_2, 2.0_5_3_5, 0.7_4_7_7, -0.3_2_4_3,
-2.1_7_5_8, -2.7_6_4_8, 1.6_9_4_7, 0.7_0_2_6, 1.2_3_3_8, -1.6_0_7_8, -0.8_6_8_2, 2.2_8_1_0,
1.8_5_7_4, -0.5_7_1_8, -0.5_5_8_6, -0.0_1_8_6, 2.3_4_1_5, 2.1_2_5_1])
UpperCamelCase_ = torch.tensor([
-1.3_6_9_0, -1.9_7_2_0, -0.4_0_9_0, -0.6_9_6_6, 1.4_6_6_0, 0.9_9_3_8, -0.1_3_8_5, -2.7_3_2_4,
0.7_7_3_6, -1.8_9_1_7, 0.2_9_2_3, 0.4_2_9_3, 0.1_6_9_3, 1.4_1_1_2, 1.1_8_8_7, -0.3_1_8_1,
-2.2_1_6_0, -2.6_3_8_1, 1.3_1_7_0, 0.8_1_6_3, 0.9_2_4_0, -1.6_5_4_4, -0.6_0_9_9, 2.5_2_5_9,
1.6_4_3_0, -0.9_0_9_0, -0.9_3_9_2, -0.0_1_2_6, 2.4_2_6_8, 2.3_2_6_6
])
UpperCamelCase_ = torch.tensor([
-1.3_5_2_5, -1.9_6_2_8, -0.3_9_5_6, -0.6_8_6_0, 1.4_6_6_4, 1.0_0_1_4, -0.1_2_5_9, -2.7_2_1_2,
0.7_7_7_2, -1.8_8_1_1, 0.2_9_9_6, 0.4_3_8_8, 0.1_7_0_4, 1.4_0_2_9, 1.1_7_0_1, -0.3_0_2_7,
-2.2_0_5_3, -2.6_2_8_7, 1.3_3_5_0, 0.8_1_3_1, 0.9_2_7_4, -1.6_2_9_2, -0.6_0_9_8, 2.5_1_3_1,
1.6_5_0_5, -0.8_9_5_8, -0.9_2_9_8, -0.0_1_5_1, 2.4_2_5_7, 2.3_3_5_5
])
UpperCamelCase_ = torch.tensor([
-2.0_5_8_5, -2.7_8_9_7, -0.2_8_5_0, -0.8_9_4_0, 1.9_0_5_2, 0.5_7_0_2, 0.6_3_4_5, -3.8_9_5_9,
1.5_9_3_2, -3.2_3_1_9, 0.1_9_7_4, 0.0_2_8_7, 1.7_5_6_6, 2.6_5_4_3, 0.8_3_8_7, -0.5_3_5_1,
-3.2_7_3_6, -4.3_3_7_5, 2.9_0_2_9, 1.6_3_9_0, 1.4_6_4_0, -2.1_7_0_1, -1.9_0_1_3, 2.9_3_4_1,
3.4_9_8_1, -0.6_2_5_5, -1.1_6_4_4, -0.1_5_9_1, 3.7_0_9_7, 3.2_0_6_6
])
UpperCamelCase_ = torch.tensor([
-2.3_1_3_9, -2.5_5_9_4, -0.0_1_9_7, -0.6_7_8_5, 1.7_0_0_1, 1.1_6_0_6, 0.3_0_7_5, -2.1_7_4_0,
1.8_0_7_1, -2.5_6_3_0, -0.0_9_2_6, -0.3_8_1_1, 1.2_1_1_6, 2.6_2_4_6, 1.2_7_3_1, -0.5_3_9_8,
-2.8_1_5_3, -3.6_1_4_0, 2.3_8_9_3, 1.3_2_6_2, 1.6_2_5_8, -2.1_8_5_6, -1.3_2_6_7, 2.8_3_9_5,
2.3_7_7_9, -1.0_6_2_3, -1.2_4_6_8, 0.8_9_5_9, 3.3_3_6_7, 3.2_2_4_3
])
UpperCamelCase_ = torch.tensor([
-2.0_6_2_8, -2.7_6_6_7, -0.2_0_8_9, -0.8_2_6_3, 2.0_5_3_9, 0.5_9_9_2, 0.6_4_9_5, -3.8_3_3_6,
1.6_0_2_5, -3.2_8_1_7, 0.1_7_2_1, -0.0_6_3_3, 1.7_5_1_6, 2.7_0_3_9, 0.8_1_0_0, -0.5_9_0_8,
-3.2_1_1_3, -4.4_3_4_3, 2.9_2_5_7, 1.3_6_3_2, 1.5_5_6_2, -2.1_4_8_9, -1.9_8_9_4, 3.0_5_6_0,
3.3_3_9_6, -0.7_3_2_8, -1.0_4_1_7, 0.0_3_8_3, 3.7_0_9_3, 3.2_3_4_3
])
UpperCamelCase_ = torch.tensor([
-1.4_5_7_4, -2.0_5_6_9, -0.0_4_7_3, -0.6_1_1_7, 1.4_0_1_8, 0.5_7_6_9, 0.4_1_2_9, -2.7_3_4_4,
1.2_2_4_1, -2.1_3_9_7, 0.2_0_0_0, 0.3_9_3_7, 0.7_6_1_6, 2.0_4_5_3, 0.7_3_2_4, -0.3_3_9_1,
-2.1_7_4_6, -2.7_7_4_4, 1.6_9_6_3, 0.6_9_2_1, 1.2_1_8_7, -1.6_1_7_2, -0.8_8_7_7, 2.2_4_3_9,
1.8_4_7_1, -0.5_8_3_9, -0.5_6_0_5, -0.0_4_6_4, 2.3_2_5_0, 2.1_2_1_9
])
# fmt: on
UpperCamelCase_ = api.list_models(filter='''diffusers''')
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
UpperCamelCase_ = '''/home/patrick/google_checkpoints/''' + mod.modelId.split('''/''')[-1]
print(F"Started running {mod.modelId}!!!")
if mod.modelId.startswith('''CompVis'''):
UpperCamelCase_ = UNetaDModel.from_pretrained(local_checkpoint, subfolder='''unet''')
else:
UpperCamelCase_ = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
UpperCamelCase_ = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
UpperCamelCase_ = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
UpperCamelCase_ = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results['''_'''.join('''_'''.join(mod.modelId.split('''/''')).split('''-'''))], atol=1E-3
)
print(F"{mod.modelId} has passed successfully!!!")
| 322 | 0 |
'''simple docstring'''
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class _A ( nn.Module ):
def __init__( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
__snake_case : List[Any] = nn.Linear(3 , 4 )
__snake_case : str = nn.BatchNormad(4 )
__snake_case : Optional[Any] = nn.Linear(4 , 5 )
def lowercase__ ( self : str , __magic_name__ : Dict ) -> List[str]:
"""simple docstring"""
return self.lineara(self.batchnorm(self.lineara(__magic_name__ ) ) )
class _A ( __lowercase ):
def lowercase__ ( self : List[str] , __magic_name__ : Tuple , *__magic_name__ : Dict , **__magic_name__ : Optional[Any] ) -> Tuple:
"""simple docstring"""
return (args[0] + 1,) + args[1:], kwargs
class _A ( __lowercase ):
def lowercase__ ( self : str , __magic_name__ : Union[str, Any] , __magic_name__ : Tuple ) -> Union[str, Any]:
"""simple docstring"""
return output + 1
class _A ( unittest.TestCase ):
def lowercase__ ( self : Dict ) -> Any:
"""simple docstring"""
__snake_case : int = ModelForTest()
__snake_case : Tuple = ModelHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
self.assertEqual(test_model._hf_hook , __magic_name__ )
self.assertTrue(hasattr(__magic_name__ , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(__magic_name__ )
self.assertFalse(hasattr(__magic_name__ , """_hf_hook""" ) )
self.assertFalse(hasattr(__magic_name__ , """_old_forward""" ) )
def lowercase__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
__snake_case : List[Any] = ModelForTest()
__snake_case : Optional[int] = ModelHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
add_hook_to_module(__magic_name__ , __magic_name__ , append=__magic_name__ )
self.assertEqual(isinstance(test_model._hf_hook , __magic_name__ ) , __magic_name__ )
self.assertEqual(len(test_model._hf_hook.hooks ) , 2 )
self.assertTrue(hasattr(__magic_name__ , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(__magic_name__ )
self.assertFalse(hasattr(__magic_name__ , """_hf_hook""" ) )
self.assertFalse(hasattr(__magic_name__ , """_old_forward""" ) )
def lowercase__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : List[Any] = ModelForTest()
__snake_case : Any = torch.randn(2 , 3 )
__snake_case : str = test_model(x + 1 )
__snake_case : int = test_model(x + 2 )
__snake_case : Union[str, Any] = PreForwardHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
__snake_case : int = test_model(__magic_name__ )
self.assertTrue(torch.allclose(__magic_name__ , __magic_name__ , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
__snake_case : Optional[int] = PreForwardHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
__snake_case : List[Any] = test_model(__magic_name__ )
self.assertTrue(torch.allclose(__magic_name__ , __magic_name__ , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
__snake_case : Optional[int] = SequentialHook(PreForwardHook() , PreForwardHook() )
add_hook_to_module(__magic_name__ , __magic_name__ )
__snake_case : List[str] = test_model(__magic_name__ )
assert torch.allclose(__magic_name__ , __magic_name__ , atol=1E-5 )
def lowercase__ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__snake_case : Union[str, Any] = ModelForTest()
__snake_case : str = torch.randn(2 , 3 )
__snake_case : Any = test_model(__magic_name__ )
__snake_case : Any = PostForwardHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
__snake_case : Any = test_model(__magic_name__ )
self.assertTrue(torch.allclose(__magic_name__ , output + 1 , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
__snake_case : Any = PostForwardHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
__snake_case : Dict = test_model(__magic_name__ )
self.assertTrue(torch.allclose(__magic_name__ , output + 1 , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
__snake_case : str = SequentialHook(PostForwardHook() , PostForwardHook() )
add_hook_to_module(__magic_name__ , __magic_name__ )
__snake_case : int = test_model(__magic_name__ )
assert torch.allclose(__magic_name__ , output + 2 , atol=1E-5 )
def lowercase__ ( self : str ) -> int:
"""simple docstring"""
__snake_case : Union[str, Any] = ModelForTest()
__snake_case : int = torch.randn(2 , 3 )
__snake_case : Any = test_model(__magic_name__ )
__snake_case : Dict = PostForwardHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
__snake_case : List[Any] = test_model(__magic_name__ )
self.assertTrue(torch.allclose(__magic_name__ , output + 1 ) )
self.assertTrue(outputa.requires_grad )
__snake_case : Dict = True
__snake_case : int = test_model(__magic_name__ )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def lowercase__ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
__snake_case : Tuple = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) )
self.assertEqual(model.lineara.weight.device , torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
__snake_case : Tuple = torch.randn(2 , 3 )
__snake_case : Union[str, Any] = model(__magic_name__ )
self.assertEqual(output.device , torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(__magic_name__ , AlignDevicesHook(io_same_device=__magic_name__ ) )
__snake_case : Tuple = torch.randn(2 , 3 ).to(0 )
__snake_case : Any = model(__magic_name__ )
self.assertEqual(output.device , torch.device(0 ) )
def lowercase__ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
__snake_case : int = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
__snake_case : List[str] = {"""execution_device""": 0 if torch.cuda.is_available() else """cpu""", """offload""": True}
add_hook_to_module(model.lineara , AlignDevicesHook(**__magic_name__ ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__magic_name__ ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__magic_name__ ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
__snake_case : Any = torch.device(hook_kwargs["""execution_device"""] )
self.assertEqual(model.batchnorm.running_mean.device , __magic_name__ )
__snake_case : Dict = torch.randn(2 , 3 )
__snake_case : Any = model(__magic_name__ )
self.assertEqual(output.device , __magic_name__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
__snake_case : int = {
"""execution_device""": 0 if torch.cuda.is_available() else """cpu""",
"""offload""": True,
"""offload_buffers""": True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**__magic_name__ ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__magic_name__ ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__magic_name__ ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
__snake_case : str = torch.randn(2 , 3 )
__snake_case : str = model(__magic_name__ )
self.assertEqual(output.device , __magic_name__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def lowercase__ ( self : Dict ) -> str:
"""simple docstring"""
__snake_case : Tuple = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
__snake_case : Union[str, Any] = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(__magic_name__ , execution_device=__magic_name__ , offload=__magic_name__ )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
__snake_case : Union[str, Any] = torch.device(__magic_name__ )
self.assertEqual(model.batchnorm.running_mean.device , __magic_name__ )
__snake_case : Optional[int] = torch.randn(2 , 3 )
__snake_case : Dict = model(__magic_name__ )
self.assertEqual(output.device , __magic_name__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__magic_name__ )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(__magic_name__ , execution_device=__magic_name__ , offload=__magic_name__ , offload_buffers=__magic_name__ )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
__snake_case : Dict = torch.randn(2 , 3 )
__snake_case : Optional[int] = model(__magic_name__ )
self.assertEqual(output.device , __magic_name__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__magic_name__ )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def lowercase__ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Any = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
__snake_case : str = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(
__magic_name__ , execution_device=__magic_name__ , offload=__magic_name__ , weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
__snake_case : List[str] = torch.device(__magic_name__ )
self.assertEqual(model.batchnorm.running_mean.device , __magic_name__ )
__snake_case : Tuple = torch.randn(2 , 3 )
__snake_case : Optional[Any] = model(__magic_name__ )
self.assertEqual(output.device , __magic_name__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__magic_name__ )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(
__magic_name__ , execution_device=__magic_name__ , offload=__magic_name__ , weights_map=model.state_dict() , offload_buffers=__magic_name__ , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
__snake_case : List[str] = torch.randn(2 , 3 )
__snake_case : Dict = model(__magic_name__ )
self.assertEqual(output.device , __magic_name__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__magic_name__ )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
| 26 |
'''simple docstring'''
# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class _a (_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , unittest.TestCase):
"""simple docstring"""
SCREAMING_SNAKE_CASE = StableDiffusionControlNetImgaImgPipeline
SCREAMING_SNAKE_CASE = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width'}
SCREAMING_SNAKE_CASE = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
SCREAMING_SNAKE_CASE = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({'control_image'})
SCREAMING_SNAKE_CASE = IMAGE_TO_IMAGE_IMAGE_PARAMS
def UpperCamelCase ( self ) -> str:
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=A__ , set_alpha_to_one=A__ , )
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
_SCREAMING_SNAKE_CASE = CLIPTextModel(A__ )
_SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
_SCREAMING_SNAKE_CASE = {
"""unet""": unet,
"""controlnet""": controlnet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def UpperCamelCase ( self , A__ , A__=0 ) -> str:
if str(A__ ).startswith("""mps""" ):
_SCREAMING_SNAKE_CASE = torch.manual_seed(A__ )
else:
_SCREAMING_SNAKE_CASE = torch.Generator(device=A__ ).manual_seed(A__ )
_SCREAMING_SNAKE_CASE = 2
_SCREAMING_SNAKE_CASE = randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=A__ , device=torch.device(A__ ) , )
_SCREAMING_SNAKE_CASE = floats_tensor(control_image.shape , rng=random.Random(A__ ) ).to(A__ )
_SCREAMING_SNAKE_CASE = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_SCREAMING_SNAKE_CASE = Image.fromarray(np.uinta(A__ ) ).convert("""RGB""" ).resize((64, 64) )
_SCREAMING_SNAKE_CASE = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
"""image""": image,
"""control_image""": control_image,
}
return inputs
def UpperCamelCase ( self ) -> str:
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def UpperCamelCase ( self ) -> Optional[int]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def UpperCamelCase ( self ) -> Any:
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
class _a (_lowerCamelCase , _lowerCamelCase , unittest.TestCase):
"""simple docstring"""
SCREAMING_SNAKE_CASE = StableDiffusionControlNetImgaImgPipeline
SCREAMING_SNAKE_CASE = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width'}
SCREAMING_SNAKE_CASE = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
SCREAMING_SNAKE_CASE = frozenset([]) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def UpperCamelCase ( self ) -> List[str]:
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
torch.manual_seed(0 )
def init_weights(A__ ):
if isinstance(A__ , torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
_SCREAMING_SNAKE_CASE = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(A__ )
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(A__ )
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=A__ , set_alpha_to_one=A__ , )
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
_SCREAMING_SNAKE_CASE = CLIPTextModel(A__ )
_SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
_SCREAMING_SNAKE_CASE = MultiControlNetModel([controlneta, controlneta] )
_SCREAMING_SNAKE_CASE = {
"""unet""": unet,
"""controlnet""": controlnet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def UpperCamelCase ( self , A__ , A__=0 ) -> Optional[int]:
if str(A__ ).startswith("""mps""" ):
_SCREAMING_SNAKE_CASE = torch.manual_seed(A__ )
else:
_SCREAMING_SNAKE_CASE = torch.Generator(device=A__ ).manual_seed(A__ )
_SCREAMING_SNAKE_CASE = 2
_SCREAMING_SNAKE_CASE = [
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=A__ , device=torch.device(A__ ) , ),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=A__ , device=torch.device(A__ ) , ),
]
_SCREAMING_SNAKE_CASE = floats_tensor(control_image[0].shape , rng=random.Random(A__ ) ).to(A__ )
_SCREAMING_SNAKE_CASE = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_SCREAMING_SNAKE_CASE = Image.fromarray(np.uinta(A__ ) ).convert("""RGB""" ).resize((64, 64) )
_SCREAMING_SNAKE_CASE = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
"""image""": image,
"""control_image""": control_image,
}
return inputs
def UpperCamelCase ( self ) -> Dict:
_SCREAMING_SNAKE_CASE = self.get_dummy_components()
_SCREAMING_SNAKE_CASE = self.pipeline_class(**A__ )
pipe.to(A__ )
_SCREAMING_SNAKE_CASE = 10.0
_SCREAMING_SNAKE_CASE = 4
_SCREAMING_SNAKE_CASE = self.get_dummy_inputs(A__ )
_SCREAMING_SNAKE_CASE = steps
_SCREAMING_SNAKE_CASE = scale
_SCREAMING_SNAKE_CASE = pipe(**A__ )[0]
_SCREAMING_SNAKE_CASE = self.get_dummy_inputs(A__ )
_SCREAMING_SNAKE_CASE = steps
_SCREAMING_SNAKE_CASE = scale
_SCREAMING_SNAKE_CASE = pipe(**A__ , control_guidance_start=0.1 , control_guidance_end=0.2 )[0]
_SCREAMING_SNAKE_CASE = self.get_dummy_inputs(A__ )
_SCREAMING_SNAKE_CASE = steps
_SCREAMING_SNAKE_CASE = scale
_SCREAMING_SNAKE_CASE = pipe(**A__ , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0]
_SCREAMING_SNAKE_CASE = self.get_dummy_inputs(A__ )
_SCREAMING_SNAKE_CASE = steps
_SCREAMING_SNAKE_CASE = scale
_SCREAMING_SNAKE_CASE = pipe(**A__ , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
def UpperCamelCase ( self ) -> List[Any]:
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def UpperCamelCase ( self ) -> Tuple:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def UpperCamelCase ( self ) -> Tuple:
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
def UpperCamelCase ( self ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE = self.get_dummy_components()
_SCREAMING_SNAKE_CASE = self.pipeline_class(**A__ )
pipe.to(A__ )
pipe.set_progress_bar_config(disable=A__ )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(A__ )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class _a (unittest.TestCase):
"""simple docstring"""
def UpperCamelCase ( self ) -> Optional[Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self ) -> Any:
_SCREAMING_SNAKE_CASE = ControlNetModel.from_pretrained("""lllyasviel/sd-controlnet-canny""" )
_SCREAMING_SNAKE_CASE = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , safety_checker=A__ , controlnet=A__ )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=A__ )
_SCREAMING_SNAKE_CASE = torch.Generator(device="""cpu""" ).manual_seed(0 )
_SCREAMING_SNAKE_CASE = """evil space-punk bird"""
_SCREAMING_SNAKE_CASE = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png""" ).resize((5_12, 5_12) )
_SCREAMING_SNAKE_CASE = load_image(
"""https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png""" ).resize((5_12, 5_12) )
_SCREAMING_SNAKE_CASE = pipe(
A__ , A__ , control_image=A__ , generator=A__ , output_type="""np""" , num_inference_steps=50 , strength=0.6 , )
_SCREAMING_SNAKE_CASE = output.images[0]
assert image.shape == (5_12, 5_12, 3)
_SCREAMING_SNAKE_CASE = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy""" )
assert np.abs(expected_image - image ).max() < 9E-2
| 591 | 0 |
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = {}
snake_case_ = tokenizer(example['content'] , truncation=UpperCamelCase__ )['input_ids']
snake_case_ = len(example['content'] ) / len(output['input_ids'] )
return output
_UpperCAmelCase : str = HfArgumentParser(PretokenizationArguments)
_UpperCAmelCase : Optional[int] = parser.parse_args()
if args.num_workers is None:
_UpperCAmelCase : str = multiprocessing.cpu_count()
_UpperCAmelCase : Dict = AutoTokenizer.from_pretrained(args.tokenizer_dir)
_UpperCAmelCase : List[Any] = time.time()
_UpperCAmelCase : int = load_dataset(args.dataset_name, split="""train""")
print(F'''Dataset loaded in {time.time()-t_start:.2f}s''')
_UpperCAmelCase : List[Any] = time.time()
_UpperCAmelCase : Dict = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
"""repo_name""",
"""path""",
"""copies""",
"""size""",
"""content""",
"""license""",
"""hash""",
"""line_mean""",
"""line_max""",
"""alpha_frac""",
"""autogenerated""",
],
)
print(F'''Dataset tokenized in {time.time()-t_start:.2f}s''')
_UpperCAmelCase : List[Any] = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
| 702 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class lowercase ( unittest.TestCase ):
def __init__( self , snake_case , snake_case=7 , snake_case=3 , snake_case=30 , snake_case=400 , snake_case=True , snake_case=None , snake_case=True , snake_case=1 / 255 , snake_case=True , snake_case=[0.5, 0.5, 0.5] , snake_case=[0.5, 0.5, 0.5] , snake_case=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
snake_case_ = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1333}
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = num_channels
snake_case_ = min_resolution
snake_case_ = max_resolution
snake_case_ = do_resize
snake_case_ = size
snake_case_ = do_rescale
snake_case_ = rescale_factor
snake_case_ = do_normalize
snake_case_ = image_mean
snake_case_ = image_std
snake_case_ = do_pad
def a ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def a ( self , snake_case , snake_case=False ):
if not batched:
snake_case_ = image_inputs[0]
if isinstance(snake_case , Image.Image ):
snake_case_ , snake_case_ = image.size
else:
snake_case_ , snake_case_ = image.shape[1], image.shape[2]
if w < h:
snake_case_ = int(self.size['shortest_edge'] * h / w )
snake_case_ = self.size['shortest_edge']
elif w > h:
snake_case_ = self.size['shortest_edge']
snake_case_ = int(self.size['shortest_edge'] * w / h )
else:
snake_case_ = self.size['shortest_edge']
snake_case_ = self.size['shortest_edge']
else:
snake_case_ = []
for image in image_inputs:
snake_case_ , snake_case_ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
snake_case_ = max(snake_case , key=lambda snake_case : item[0] )[0]
snake_case_ = max(snake_case , key=lambda snake_case : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowercase ( lowercase_ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : str = DetrImageProcessor if is_vision_available() else None
def a ( self ):
snake_case_ = DetrImageProcessingTester(self )
@property
def a ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def a ( self ):
snake_case_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case , 'image_mean' ) )
self.assertTrue(hasattr(snake_case , 'image_std' ) )
self.assertTrue(hasattr(snake_case , 'do_normalize' ) )
self.assertTrue(hasattr(snake_case , 'do_rescale' ) )
self.assertTrue(hasattr(snake_case , 'rescale_factor' ) )
self.assertTrue(hasattr(snake_case , 'do_resize' ) )
self.assertTrue(hasattr(snake_case , 'size' ) )
self.assertTrue(hasattr(snake_case , 'do_pad' ) )
def a ( self ):
snake_case_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 1333} )
self.assertEqual(image_processor.do_pad , snake_case )
snake_case_ = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=snake_case )
self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} )
self.assertEqual(image_processor.do_pad , snake_case )
def a ( self ):
pass
def a ( self ):
# Initialize image_processing
snake_case_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case )
for image in image_inputs:
self.assertIsInstance(snake_case , Image.Image )
# Test not batched input
snake_case_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
snake_case_ , snake_case_ = self.image_processor_tester.get_expected_values(snake_case )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_ , snake_case_ = self.image_processor_tester.get_expected_values(snake_case , batched=snake_case )
snake_case_ = image_processing(snake_case , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def a ( self ):
# Initialize image_processing
snake_case_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case , numpify=snake_case )
for image in image_inputs:
self.assertIsInstance(snake_case , np.ndarray )
# Test not batched input
snake_case_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
snake_case_ , snake_case_ = self.image_processor_tester.get_expected_values(snake_case )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_ = image_processing(snake_case , return_tensors='pt' ).pixel_values
snake_case_ , snake_case_ = self.image_processor_tester.get_expected_values(snake_case , batched=snake_case )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def a ( self ):
# Initialize image_processing
snake_case_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case , torchify=snake_case )
for image in image_inputs:
self.assertIsInstance(snake_case , torch.Tensor )
# Test not batched input
snake_case_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
snake_case_ , snake_case_ = self.image_processor_tester.get_expected_values(snake_case )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_ = image_processing(snake_case , return_tensors='pt' ).pixel_values
snake_case_ , snake_case_ = self.image_processor_tester.get_expected_values(snake_case , batched=snake_case )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def a ( self ):
# prepare image and target
snake_case_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
snake_case_ = json.loads(f.read() )
snake_case_ = {'image_id': 3_9769, 'annotations': target}
# encode them
snake_case_ = DetrImageProcessor.from_pretrained('facebook/detr-resnet-50' )
snake_case_ = image_processing(images=snake_case , annotations=snake_case , return_tensors='pt' )
# verify pixel values
snake_case_ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape , snake_case )
snake_case_ = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , snake_case , atol=1e-4 ) )
# verify area
snake_case_ = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , snake_case ) )
# verify boxes
snake_case_ = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , snake_case )
snake_case_ = torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , snake_case , atol=1e-3 ) )
# verify image_id
snake_case_ = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , snake_case ) )
# verify is_crowd
snake_case_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , snake_case ) )
# verify class_labels
snake_case_ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , snake_case ) )
# verify orig_size
snake_case_ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , snake_case ) )
# verify size
snake_case_ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , snake_case ) )
@slow
def a ( self ):
# prepare image, target and masks_path
snake_case_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
snake_case_ = json.loads(f.read() )
snake_case_ = {'file_name': '000000039769.png', 'image_id': 3_9769, 'segments_info': target}
snake_case_ = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
snake_case_ = DetrImageProcessor.from_pretrained('facebook/detr-resnet-50-panoptic' )
snake_case_ = image_processing(images=snake_case , annotations=snake_case , masks_path=snake_case , return_tensors='pt' )
# verify pixel values
snake_case_ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape , snake_case )
snake_case_ = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , snake_case , atol=1e-4 ) )
# verify area
snake_case_ = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , snake_case ) )
# verify boxes
snake_case_ = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , snake_case )
snake_case_ = torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , snake_case , atol=1e-3 ) )
# verify image_id
snake_case_ = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , snake_case ) )
# verify is_crowd
snake_case_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , snake_case ) )
# verify class_labels
snake_case_ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , snake_case ) )
# verify masks
snake_case_ = 82_2873
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , snake_case )
# verify orig_size
snake_case_ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , snake_case ) )
# verify size
snake_case_ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , snake_case ) )
| 108 | 0 |
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class _lowercase :
lowercase = 4_2
lowercase = None
lowercase = None
def __lowercase ( ):
UpperCamelCase_ : Any = Node(1 )
UpperCamelCase_ : Optional[Any] = Node(2 )
UpperCamelCase_ : Optional[int] = Node(3 )
UpperCamelCase_ : Any = Node(4 )
UpperCamelCase_ : List[Any] = Node(5 )
return tree
def __lowercase ( lowerCamelCase : Node | None ):
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def __lowercase ( lowerCamelCase : Node | None ):
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def __lowercase ( lowerCamelCase : Node | None ):
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def __lowercase ( lowerCamelCase : Node | None ):
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def __lowercase ( lowerCamelCase : Node | None ):
UpperCamelCase_ : list[Any] = []
if root is None:
return output
UpperCamelCase_ : List[str] = deque([root] )
while process_queue:
UpperCamelCase_ : Tuple = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def __lowercase ( lowerCamelCase : Node | None , lowerCamelCase : int ):
UpperCamelCase_ : list[Any] = []
def populate_output(lowerCamelCase : Node | None , lowerCamelCase : int ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(lowerCamelCase , lowerCamelCase )
return output
def __lowercase ( lowerCamelCase : Node | None , lowerCamelCase : int ):
UpperCamelCase_ : list[Any] = []
def populate_output(lowerCamelCase : Node | None , lowerCamelCase : int ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(lowerCamelCase , lowerCamelCase )
return output
def __lowercase ( lowerCamelCase : Node | None ):
if root is None:
return []
UpperCamelCase_ : list[Sequence[Node | None]] = []
UpperCamelCase_ : List[Any] = 0
UpperCamelCase_ : str = height(lowerCamelCase )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(lowerCamelCase , lowerCamelCase ) )
UpperCamelCase_ : Any = 1
else:
output.append(get_nodes_from_right_to_left(lowerCamelCase , lowerCamelCase ) )
UpperCamelCase_ : Tuple = 0
return output
def __lowercase ( ): # Main function for testing.
UpperCamelCase_ : Optional[int] = make_tree()
print(F"In-order Traversal: {inorder(lowerCamelCase )}" )
print(F"Pre-order Traversal: {preorder(lowerCamelCase )}" )
print(F"Post-order Traversal: {postorder(lowerCamelCase )}" , '\n' )
print(F"Height of Tree: {height(lowerCamelCase )}" , '\n' )
print('Complete Level Order Traversal: ' )
print(level_order(lowerCamelCase ) , '\n' )
print('Level-wise order Traversal: ' )
for level in range(1 , height(lowerCamelCase ) + 1 ):
print(F"Level {level}:" , get_nodes_from_left_to_right(lowerCamelCase , level=lowerCamelCase ) )
print('\nZigZag order Traversal: ' )
print(zigzag(lowerCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 417 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 348 | 0 |
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class UpperCamelCase_ ( UpperCamelCase , UpperCamelCase , unittest.TestCase ):
lowercase = IFImgaImgSuperResolutionPipeline
lowercase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''width''', '''height'''}
lowercase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''original_image'''} )
lowercase = PipelineTesterMixin.required_optional_params - {'''latents'''}
def snake_case__( self ) -> Union[str, Any]:
return self._get_superresolution_dummy_components()
def snake_case__( self , lowercase , lowercase=0 ) -> Any:
if str(lowercase ).startswith('''mps''' ):
_a : Union[str, Any] = torch.manual_seed(lowercase )
else:
_a : List[str] = torch.Generator(device=lowercase ).manual_seed(lowercase )
_a : Optional[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowercase ) ).to(lowercase )
_a : Optional[Any] = floats_tensor((1, 3, 16, 16) , rng=random.Random(lowercase ) ).to(lowercase )
_a : Union[str, Any] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''original_image''': original_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def snake_case__( self ) -> int:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def snake_case__( self ) -> Optional[Any]:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def snake_case__( self ) -> Tuple:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def snake_case__( self ) -> int:
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def snake_case__( self ) -> List[Any]:
self._test_save_load_local()
def snake_case__( self ) -> List[str]:
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , ) | 307 |
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
__lowerCamelCase = logging.get_logger(__name__)
logging.set_verbosity_info()
def UpperCamelCase__ ( UpperCAmelCase , UpperCAmelCase ) -> Dict:
"""simple docstring"""
if "xprophetnet" in prophetnet_checkpoint_path:
_a : Any = XLMProphetNetForConditionalGenerationOld.from_pretrained(UpperCAmelCase )
_a , _a : List[Any] = XLMProphetNetForConditionalGeneration.from_pretrained(
UpperCAmelCase , output_loading_info=UpperCAmelCase )
else:
_a : Optional[Any] = ProphetNetForConditionalGenerationOld.from_pretrained(UpperCAmelCase )
_a , _a : str = ProphetNetForConditionalGeneration.from_pretrained(
UpperCAmelCase , output_loading_info=UpperCAmelCase )
_a : Optional[Any] = ['''key_proj''', '''value_proj''', '''query_proj''']
_a : int = {
'''self_attn''': '''ngram_self_attn''',
'''cross_attn''': '''encoder_attn''',
'''cross_attn_layer_norm''': '''encoder_attn_layer_norm''',
'''feed_forward_layer_norm''': '''final_layer_norm''',
'''feed_forward''': '''''',
'''intermediate''': '''fc1''',
'''output''': '''fc2''',
'''key_proj''': '''k_proj''',
'''query_proj''': '''q_proj''',
'''value_proj''': '''v_proj''',
'''word_embeddings''': '''embed_tokens''',
'''embeddings_layer_norm''': '''emb_layer_norm''',
'''relative_pos_embeddings''': '''relative_linear''',
'''ngram_embeddings''': '''ngram_input_embed''',
'''position_embeddings''': '''embed_positions''',
}
for key in loading_info["missing_keys"]:
_a : Dict = key.split('''.''' )
if attributes[0] == "lm_head":
_a : List[Any] = prophet
_a : Dict = prophet_old
else:
_a : List[Any] = prophet.prophetnet
_a : List[str] = prophet_old.model
_a : int = False
for attribute in attributes:
if attribute in mapping:
_a : Optional[int] = mapping[attribute]
if not hasattr(UpperCAmelCase , UpperCAmelCase ) and len(UpperCAmelCase ) > 0:
_a : Optional[Any] = attribute
elif hasattr(UpperCAmelCase , UpperCAmelCase ):
_a : Dict = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
_a : str = old_model.weight
logger.info(F'{attribute} is initialized.' )
_a : int = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
_a : Optional[Any] = old_model.bias
logger.info(F'{attribute} is initialized' )
_a : List[str] = True
break
elif attribute in special_keys and hasattr(UpperCAmelCase , '''in_proj_weight''' ):
_a : List[Any] = old_model.in_proj_weight.shape[0] // 3
_a : int = getattr(UpperCAmelCase , UpperCAmelCase )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
_a : str = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
_a : Dict = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
_a : Any = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
_a : List[str] = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
_a : int = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
_a : Tuple = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
_a : Any = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings."
_a : Union[str, Any] = nn.Parameter(old_model.embed_positions.weight[:512, :] )
_a : str = True
break
if attribute.isdigit():
_a : str = model[int(UpperCAmelCase )]
_a : Any = old_model[int(UpperCAmelCase )]
else:
_a : int = getattr(UpperCAmelCase , UpperCAmelCase )
if old_attribute == "":
_a : Optional[Any] = old_model
else:
if not hasattr(UpperCAmelCase , UpperCAmelCase ):
raise ValueError(F'{old_model} does not have {old_attribute}' )
_a : Tuple = getattr(UpperCAmelCase , UpperCAmelCase )
if not is_key_init:
raise ValueError(F'{key} was not correctly initialized!' )
print(F'Saving model to {pytorch_dump_folder_path}' )
prophet.save_pretrained(UpperCAmelCase )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--prophetnet_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__lowerCamelCase = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path) | 307 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class UpperCAmelCase :
"""simple docstring"""
def __init__( self , _snake_case , ) -> Optional[Any]:
_UpperCamelCase : List[Any] = parent
_UpperCamelCase : str = 13
_UpperCamelCase : Optional[int] = 7
_UpperCamelCase : Dict = True
_UpperCamelCase : Dict = True
_UpperCamelCase : List[str] = False
_UpperCamelCase : List[str] = True
_UpperCamelCase : List[Any] = 99
_UpperCamelCase : Optional[int] = 32
_UpperCamelCase : Optional[int] = 2
_UpperCamelCase : Optional[Any] = 4
_UpperCamelCase : Optional[Any] = 37
_UpperCamelCase : str = '''gelu'''
_UpperCamelCase : str = 0.1
_UpperCamelCase : Union[str, Any] = 0.1
_UpperCamelCase : List[Any] = 512
_UpperCamelCase : Any = 16
_UpperCamelCase : Any = 2
_UpperCamelCase : Optional[Any] = 0.02
_UpperCamelCase : str = 3
_UpperCamelCase : Optional[int] = 4
_UpperCamelCase : Any = None
def _lowercase ( self ) -> Any:
_UpperCamelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase : List[Any] = None
if self.use_input_mask:
_UpperCamelCase : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCamelCase : List[Any] = None
_UpperCamelCase : str = None
_UpperCamelCase : List[str] = None
if self.use_labels:
_UpperCamelCase : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCamelCase : Tuple = ids_tensor([self.batch_size] , self.num_choices )
_UpperCamelCase : str = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowercase ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ) -> Optional[Any]:
_UpperCamelCase : Any = TFDistilBertModel(config=_snake_case )
_UpperCamelCase : int = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
_UpperCamelCase : Optional[int] = model(_snake_case )
_UpperCamelCase : List[Any] = [input_ids, input_mask]
_UpperCamelCase : Union[str, Any] = model(_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ) -> Optional[Any]:
_UpperCamelCase : int = TFDistilBertForMaskedLM(config=_snake_case )
_UpperCamelCase : Union[str, Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
_UpperCamelCase : Any = model(_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ) -> List[str]:
_UpperCamelCase : str = TFDistilBertForQuestionAnswering(config=_snake_case )
_UpperCamelCase : List[Any] = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
}
_UpperCamelCase : Any = model(_snake_case )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowercase ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ) -> Dict:
_UpperCamelCase : List[Any] = self.num_labels
_UpperCamelCase : str = TFDistilBertForSequenceClassification(_snake_case )
_UpperCamelCase : Tuple = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
_UpperCamelCase : List[str] = model(_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowercase ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ) -> Optional[int]:
_UpperCamelCase : int = self.num_choices
_UpperCamelCase : Dict = TFDistilBertForMultipleChoice(_snake_case )
_UpperCamelCase : Tuple = tf.tile(tf.expand_dims(_snake_case , 1 ) , (1, self.num_choices, 1) )
_UpperCamelCase : str = tf.tile(tf.expand_dims(_snake_case , 1 ) , (1, self.num_choices, 1) )
_UpperCamelCase : str = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
}
_UpperCamelCase : Optional[int] = model(_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowercase ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ) -> Optional[int]:
_UpperCamelCase : List[Any] = self.num_labels
_UpperCamelCase : Optional[int] = TFDistilBertForTokenClassification(_snake_case )
_UpperCamelCase : Union[str, Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
_UpperCamelCase : Union[str, Any] = model(_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowercase ( self ) -> Tuple:
_UpperCamelCase : Any = self.prepare_config_and_inputs()
((_UpperCamelCase), (_UpperCamelCase), (_UpperCamelCase), (_UpperCamelCase), (_UpperCamelCase), (_UpperCamelCase)) : Optional[Any] = config_and_inputs
_UpperCamelCase : Dict = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase ( a_ , a_ , unittest.TestCase ):
"""simple docstring"""
A__ : List[str] = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
A__ : Optional[int] = (
{
'feature-extraction': TFDistilBertModel,
'fill-mask': TFDistilBertForMaskedLM,
'question-answering': TFDistilBertForQuestionAnswering,
'text-classification': TFDistilBertForSequenceClassification,
'token-classification': TFDistilBertForTokenClassification,
'zero-shot': TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
A__ : Optional[Any] = False
A__ : List[Any] = False
def _lowercase ( self ) -> Tuple:
_UpperCamelCase : Tuple = TFDistilBertModelTester(self )
_UpperCamelCase : Optional[int] = ConfigTester(self , config_class=_snake_case , dim=37 )
def _lowercase ( self ) -> Optional[int]:
self.config_tester.run_common_tests()
def _lowercase ( self ) -> List[str]:
_UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*_snake_case )
def _lowercase ( self ) -> Optional[int]:
_UpperCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*_snake_case )
def _lowercase ( self ) -> str:
_UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*_snake_case )
def _lowercase ( self ) -> Tuple:
_UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*_snake_case )
def _lowercase ( self ) -> Any:
_UpperCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*_snake_case )
def _lowercase ( self ) -> Any:
_UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*_snake_case )
@slow
def _lowercase ( self ) -> Tuple:
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
_UpperCamelCase : Tuple = TFDistilBertModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
@require_tf
class UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def _lowercase ( self ) -> Any:
_UpperCamelCase : str = TFDistilBertModel.from_pretrained('''distilbert-base-uncased''' )
_UpperCamelCase : List[Any] = tf.constant([[0, 1, 2, 3, 4, 5]] )
_UpperCamelCase : Optional[int] = model(_snake_case )[0]
_UpperCamelCase : int = [1, 6, 768]
self.assertEqual(output.shape , _snake_case )
_UpperCamelCase : Any = tf.constant(
[
[
[0.19_261_885, -0.13_732_955, 0.4_119_799],
[0.22_150_156, -0.07_422_661, 0.39_037_204],
[0.22_756_018, -0.0_896_414, 0.3_701_467],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , _snake_case , atol=1E-4 )
| 683 |
'''simple docstring'''
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
_UpperCAmelCase : Optional[int] = pytest.mark.integration
@pytest.mark.parametrize('''path''' ,['''paws''', '''csv'''] )
def snake_case__ ( UpperCamelCase ,UpperCamelCase ) -> Dict:
inspect_dataset(UpperCamelCase ,UpperCamelCase )
_UpperCamelCase : Optional[Any] = path + '''.py'''
assert script_name in os.listdir(UpperCamelCase )
assert "__pycache__" not in os.listdir(UpperCamelCase )
@pytest.mark.filterwarnings('''ignore:inspect_metric is deprecated:FutureWarning''' )
@pytest.mark.filterwarnings('''ignore:metric_module_factory is deprecated:FutureWarning''' )
@pytest.mark.parametrize('''path''' ,['''accuracy'''] )
def snake_case__ ( UpperCamelCase ,UpperCamelCase ) -> int:
inspect_metric(UpperCamelCase ,UpperCamelCase )
_UpperCamelCase : List[str] = path + '''.py'''
assert script_name in os.listdir(UpperCamelCase )
assert "__pycache__" not in os.listdir(UpperCamelCase )
@pytest.mark.parametrize(
'''path, config_name, expected_splits''' ,[
('''squad''', '''plain_text''', ['''train''', '''validation''']),
('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),
('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),
] ,)
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> int:
_UpperCamelCase : List[str] = get_dataset_config_info(UpperCamelCase ,config_name=UpperCamelCase )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'''path, config_name, expected_exception''' ,[
('''paws''', None, ValueError),
] ,)
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> List[str]:
with pytest.raises(UpperCamelCase ):
get_dataset_config_info(UpperCamelCase ,config_name=UpperCamelCase )
@pytest.mark.parametrize(
'''path, expected''' ,[
('''squad''', '''plain_text'''),
('''acronym_identification''', '''default'''),
('''lhoestq/squad''', '''plain_text'''),
('''lhoestq/test''', '''default'''),
('''lhoestq/demo1''', '''lhoestq--demo1'''),
('''dalle-mini/wit''', '''dalle-mini--wit'''),
] ,)
def snake_case__ ( UpperCamelCase ,UpperCamelCase ) -> Union[str, Any]:
_UpperCamelCase : int = get_dataset_config_names(UpperCamelCase )
assert expected in config_names
@pytest.mark.parametrize(
'''path, expected_configs, expected_splits_in_first_config''' ,[
('''squad''', ['''plain_text'''], ['''train''', '''validation''']),
('''dalle-mini/wit''', ['''dalle-mini--wit'''], ['''train''']),
('''paws''', ['''labeled_final''', '''labeled_swap''', '''unlabeled_final'''], ['''train''', '''test''', '''validation''']),
] ,)
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> Dict:
_UpperCamelCase : Dict = get_dataset_infos(UpperCamelCase )
assert list(infos.keys() ) == expected_configs
_UpperCamelCase : Dict = expected_configs[0]
assert expected_config in infos
_UpperCamelCase : Any = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'''path, expected_config, expected_splits''' ,[
('''squad''', '''plain_text''', ['''train''', '''validation''']),
('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),
('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),
] ,)
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> Union[str, Any]:
_UpperCamelCase : List[Any] = get_dataset_infos(UpperCamelCase )
assert expected_config in infos
_UpperCamelCase : Union[str, Any] = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'''path, config_name, expected_exception''' ,[
('''paws''', None, ValueError),
] ,)
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> List[Any]:
with pytest.raises(UpperCamelCase ):
get_dataset_split_names(UpperCamelCase ,config_name=UpperCamelCase )
| 683 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
"""microsoft/cvt-13""": """https://huggingface.co/microsoft/cvt-13/resolve/main/config.json""",
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class lowerCAmelCase_ ( lowerCamelCase__ ):
'''simple docstring'''
__snake_case = "cvt"
def __init__( self , _UpperCAmelCase=3 , _UpperCAmelCase=[7, 3, 3] , _UpperCAmelCase=[4, 2, 2] , _UpperCAmelCase=[2, 1, 1] , _UpperCAmelCase=[64, 1_92, 3_84] , _UpperCAmelCase=[1, 3, 6] , _UpperCAmelCase=[1, 2, 10] , _UpperCAmelCase=[4.0, 4.0, 4.0] , _UpperCAmelCase=[0.0, 0.0, 0.0] , _UpperCAmelCase=[0.0, 0.0, 0.0] , _UpperCAmelCase=[0.0, 0.0, 0.1] , _UpperCAmelCase=[True, True, True] , _UpperCAmelCase=[False, False, True] , _UpperCAmelCase=["dw_bn", "dw_bn", "dw_bn"] , _UpperCAmelCase=[3, 3, 3] , _UpperCAmelCase=[1, 1, 1] , _UpperCAmelCase=[2, 2, 2] , _UpperCAmelCase=[1, 1, 1] , _UpperCAmelCase=[1, 1, 1] , _UpperCAmelCase=0.02 , _UpperCAmelCase=1E-12 , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
snake_case_ = num_channels
snake_case_ = patch_sizes
snake_case_ = patch_stride
snake_case_ = patch_padding
snake_case_ = embed_dim
snake_case_ = num_heads
snake_case_ = depth
snake_case_ = mlp_ratio
snake_case_ = attention_drop_rate
snake_case_ = drop_rate
snake_case_ = drop_path_rate
snake_case_ = qkv_bias
snake_case_ = cls_token
snake_case_ = qkv_projection_method
snake_case_ = kernel_qkv
snake_case_ = padding_kv
snake_case_ = stride_kv
snake_case_ = padding_q
snake_case_ = stride_q
snake_case_ = initializer_range
snake_case_ = layer_norm_eps | 721 |
import requests
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> None:
"""simple docstring"""
snake_case_ = {'''Content-Type''': '''application/json'''}
snake_case_ = requests.post(SCREAMING_SNAKE_CASE , json={'''text''': message_body} , headers=SCREAMING_SNAKE_CASE )
if response.status_code != 200:
snake_case_ = (
'''Request to slack returned an error '''
f'''{response.status_code}, the response is:\n{response.text}'''
)
raise ValueError(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message("""<YOUR MESSAGE BODY>""", """<SLACK CHANNEL URL>""") | 531 | 0 |
'''simple docstring'''
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def _UpperCAmelCase ( _lowerCamelCase : int ) -> int:
_lowerCAmelCase : Any = prime_factors(_lowerCamelCase )
if is_square_free(_lowerCamelCase ):
return -1 if len(_lowerCamelCase ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 384 |
'''simple docstring'''
import qiskit
def _UpperCAmelCase ( _lowerCamelCase : int = 2 ) -> qiskit.result.counts.Counts:
_lowerCAmelCase : List[Any] = qubits
# Using Aer's simulator
_lowerCAmelCase : Optional[Any] = qiskit.Aer.get_backend("""aer_simulator""" )
# Creating a Quantum Circuit acting on the q register
_lowerCAmelCase : List[Any] = qiskit.QuantumCircuit(_lowerCamelCase , _lowerCamelCase )
# Adding a H gate on qubit 0 (now q0 in superposition)
circuit.h(0 )
for i in range(1 , _lowerCamelCase ):
# Adding CX (CNOT) gate
circuit.cx(i - 1 , _lowerCamelCase )
# Mapping the quantum measurement to the classical bits
circuit.measure(list(range(_lowerCamelCase ) ) , list(range(_lowerCamelCase ) ) )
# Now measuring any one qubit would affect other qubits to collapse
# their super position and have same state as the measured one.
# Executing the circuit on the simulator
_lowerCAmelCase : int = qiskit.execute(_lowerCamelCase , _lowerCamelCase , shots=10_00 )
return job.result().get_counts(_lowerCamelCase )
if __name__ == "__main__":
print(F'Total count for various states are: {quantum_entanglement(3)}')
| 384 | 1 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class UpperCamelCase ( snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : torch.FloatTensor
class UpperCamelCase ( snake_case , snake_case ):
"""simple docstring"""
@register_to_config
def __init__( self ,UpperCAmelCase_ = 32 ,UpperCAmelCase_ = 64 ,UpperCAmelCase_ = 20 ,UpperCAmelCase_ = 7_68 ,UpperCAmelCase_=77 ,UpperCAmelCase_=4 ,UpperCAmelCase_ = 0.0 ,UpperCAmelCase_ = "silu" ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = "linear" ,UpperCAmelCase_ = "prd" ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,):
super().__init__()
_lowercase : Optional[int] = num_attention_heads
_lowercase : List[str] = attention_head_dim
_lowercase : Dict = num_attention_heads * attention_head_dim
_lowercase : Dict = additional_embeddings
_lowercase : List[Any] = time_embed_dim or inner_dim
_lowercase : List[Any] = embedding_proj_dim or embedding_dim
_lowercase : str = clip_embed_dim or embedding_dim
_lowercase : Any = Timesteps(UpperCAmelCase_ ,UpperCAmelCase_ ,0 )
_lowercase : List[Any] = TimestepEmbedding(UpperCAmelCase_ ,UpperCAmelCase_ ,out_dim=UpperCAmelCase_ ,act_fn=UpperCAmelCase_ )
_lowercase : List[str] = nn.Linear(UpperCAmelCase_ ,UpperCAmelCase_ )
if embedding_proj_norm_type is None:
_lowercase : str = None
elif embedding_proj_norm_type == "layer":
_lowercase : Tuple = nn.LayerNorm(UpperCAmelCase_ )
else:
raise ValueError(f"""unsupported embedding_proj_norm_type: {embedding_proj_norm_type}""" )
_lowercase : Dict = nn.Linear(UpperCAmelCase_ ,UpperCAmelCase_ )
if encoder_hid_proj_type is None:
_lowercase : Union[str, Any] = None
elif encoder_hid_proj_type == "linear":
_lowercase : List[str] = nn.Linear(UpperCAmelCase_ ,UpperCAmelCase_ )
else:
raise ValueError(f"""unsupported encoder_hid_proj_type: {encoder_hid_proj_type}""" )
_lowercase : str = nn.Parameter(torch.zeros(1 ,num_embeddings + additional_embeddings ,UpperCAmelCase_ ) )
if added_emb_type == "prd":
_lowercase : str = nn.Parameter(torch.zeros(1 ,1 ,UpperCAmelCase_ ) )
elif added_emb_type is None:
_lowercase : List[str] = None
else:
raise ValueError(
f"""`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `'prd'` or `None`.""" )
_lowercase : List[Any] = nn.ModuleList(
[
BasicTransformerBlock(
UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,dropout=UpperCAmelCase_ ,activation_fn="""gelu""" ,attention_bias=UpperCAmelCase_ ,)
for d in range(UpperCAmelCase_ )
] )
if norm_in_type == "layer":
_lowercase : Optional[int] = nn.LayerNorm(UpperCAmelCase_ )
elif norm_in_type is None:
_lowercase : Dict = None
else:
raise ValueError(f"""Unsupported norm_in_type: {norm_in_type}.""" )
_lowercase : int = nn.LayerNorm(UpperCAmelCase_ )
_lowercase : int = nn.Linear(UpperCAmelCase_ ,UpperCAmelCase_ )
_lowercase : int = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] ,-10000.0 )
causal_attention_mask.triu_(1 )
_lowercase : Tuple = causal_attention_mask[None, ...]
self.register_buffer("""causal_attention_mask""" ,UpperCAmelCase_ ,persistent=UpperCAmelCase_ )
_lowercase : str = nn.Parameter(torch.zeros(1 ,UpperCAmelCase_ ) )
_lowercase : Union[str, Any] = nn.Parameter(torch.zeros(1 ,UpperCAmelCase_ ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def lowerCamelCase__ ( self ):
_lowercase : str = {}
def fn_recursive_add_processors(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ):
if hasattr(UpperCAmelCase_ ,"""set_processor""" ):
_lowercase : str = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f"""{name}.{sub_name}""" ,UpperCAmelCase_ ,UpperCAmelCase_ )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ )
return processors
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
_lowercase : List[str] = len(self.attn_processors.keys() )
if isinstance(UpperCAmelCase_ ,UpperCAmelCase_ ) and len(UpperCAmelCase_ ) != count:
raise ValueError(
f"""A dict of processors was passed, but the number of processors {len(UpperCAmelCase_ )} does not match the"""
f""" number of attention layers: {count}. Please make sure to pass {count} processor classes.""" )
def fn_recursive_attn_processor(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ):
if hasattr(UpperCAmelCase_ ,"""set_processor""" ):
if not isinstance(UpperCAmelCase_ ,UpperCAmelCase_ ):
module.set_processor(UpperCAmelCase_ )
else:
module.set_processor(processor.pop(f"""{name}.processor""" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f"""{name}.{sub_name}""" ,UpperCAmelCase_ ,UpperCAmelCase_ )
for name, module in self.named_children():
fn_recursive_attn_processor(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ )
def lowerCamelCase__ ( self ):
self.set_attn_processor(AttnProcessor() )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = True ,):
_lowercase : str = hidden_states.shape[0]
_lowercase : Any = timestep
if not torch.is_tensor(UpperCAmelCase_ ):
_lowercase : List[Any] = torch.tensor([timesteps] ,dtype=torch.long ,device=hidden_states.device )
elif torch.is_tensor(UpperCAmelCase_ ) and len(timesteps.shape ) == 0:
_lowercase : Optional[Any] = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
_lowercase : str = timesteps * torch.ones(UpperCAmelCase_ ,dtype=timesteps.dtype ,device=timesteps.device )
_lowercase : Optional[Any] = self.time_proj(UpperCAmelCase_ )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
_lowercase : Dict = timesteps_projected.to(dtype=self.dtype )
_lowercase : Dict = self.time_embedding(UpperCAmelCase_ )
if self.embedding_proj_norm is not None:
_lowercase : Union[str, Any] = self.embedding_proj_norm(UpperCAmelCase_ )
_lowercase : Union[str, Any] = self.embedding_proj(UpperCAmelCase_ )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
_lowercase : str = self.encoder_hidden_states_proj(UpperCAmelCase_ )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError("""`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set""" )
_lowercase : Tuple = self.proj_in(UpperCAmelCase_ )
_lowercase : Tuple = self.positional_embedding.to(hidden_states.dtype )
_lowercase : Optional[Any] = []
_lowercase : Union[str, Any] = 0
if encoder_hidden_states is not None:
additional_embeds.append(UpperCAmelCase_ )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
_lowercase : str = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
_lowercase : Optional[Any] = hidden_states[:, None, :]
_lowercase : int = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
_lowercase : Dict = self.prd_embedding.to(hidden_states.dtype ).expand(UpperCAmelCase_ ,-1 ,-1 )
additional_embeds.append(UpperCAmelCase_ )
_lowercase : str = torch.cat(
UpperCAmelCase_ ,dim=1 ,)
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
_lowercase : Optional[int] = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
_lowercase : List[Any] = F.pad(
UpperCAmelCase_ ,(
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) ,value=0.0 ,)
_lowercase : Optional[Any] = hidden_states + positional_embeddings
if attention_mask is not None:
_lowercase : str = (1 - attention_mask.to(hidden_states.dtype )) * -10000.0
_lowercase : Optional[Any] = F.pad(UpperCAmelCase_ ,(0, self.additional_embeddings) ,value=0.0 )
_lowercase : List[Any] = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
_lowercase : Union[str, Any] = attention_mask.repeat_interleave(self.config.num_attention_heads ,dim=0 )
if self.norm_in is not None:
_lowercase : Tuple = self.norm_in(UpperCAmelCase_ )
for block in self.transformer_blocks:
_lowercase : Optional[int] = block(UpperCAmelCase_ ,attention_mask=UpperCAmelCase_ )
_lowercase : str = self.norm_out(UpperCAmelCase_ )
if self.prd_embedding is not None:
_lowercase : Optional[int] = hidden_states[:, -1]
else:
_lowercase : List[Any] = hidden_states[:, additional_embeddings_len:]
_lowercase : Union[str, Any] = self.proj_to_clip_embeddings(UpperCAmelCase_ )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
_lowercase : List[str] = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 705 |
"""simple docstring"""
import argparse
import os
import shutil
from pathlib import Path
import onnx
import torch
from packaging import version
from torch.onnx import export
from diffusers import OnnxRuntimeModel, OnnxStableDiffusionPipeline, StableDiffusionPipeline
UpperCAmelCase: int = version.parse(version.parse(torch.__version__).base_version) < version.parse("""1.11""")
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False , ):
output_path.parent.mkdir(parents=__UpperCAmelCase , exist_ok=__UpperCAmelCase )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
__UpperCAmelCase , __UpperCAmelCase , f=output_path.as_posix() , input_names=__UpperCAmelCase , output_names=__UpperCAmelCase , dynamic_axes=__UpperCAmelCase , do_constant_folding=__UpperCAmelCase , use_external_data_format=__UpperCAmelCase , enable_onnx_checker=__UpperCAmelCase , opset_version=__UpperCAmelCase , )
else:
export(
__UpperCAmelCase , __UpperCAmelCase , f=output_path.as_posix() , input_names=__UpperCAmelCase , output_names=__UpperCAmelCase , dynamic_axes=__UpperCAmelCase , do_constant_folding=__UpperCAmelCase , opset_version=__UpperCAmelCase , )
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = False ):
_lowercase : Optional[int] = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
_lowercase : Union[str, Any] = """cuda"""
elif fpaa and not torch.cuda.is_available():
raise ValueError("""`float16` model export is only supported on GPUs with CUDA""" )
else:
_lowercase : Optional[Any] = """cpu"""
_lowercase : List[Any] = StableDiffusionPipeline.from_pretrained(__UpperCAmelCase , torch_dtype=__UpperCAmelCase ).to(__UpperCAmelCase )
_lowercase : List[str] = Path(__UpperCAmelCase )
# TEXT ENCODER
_lowercase : Optional[int] = pipeline.text_encoder.config.max_position_embeddings
_lowercase : str = pipeline.text_encoder.config.hidden_size
_lowercase : Union[str, Any] = pipeline.tokenizer(
"""A sample prompt""" , padding="""max_length""" , max_length=pipeline.tokenizer.model_max_length , truncation=__UpperCAmelCase , return_tensors="""pt""" , )
onnx_export(
pipeline.text_encoder , model_args=(text_input.input_ids.to(device=__UpperCAmelCase , dtype=torch.intaa )) , output_path=output_path / """text_encoder""" / """model.onnx""" , ordered_input_names=["""input_ids"""] , output_names=["""last_hidden_state""", """pooler_output"""] , dynamic_axes={
"""input_ids""": {0: """batch""", 1: """sequence"""},
} , opset=__UpperCAmelCase , )
del pipeline.text_encoder
# UNET
_lowercase : int = pipeline.unet.config.in_channels
_lowercase : Optional[int] = pipeline.unet.config.sample_size
_lowercase : Any = output_path / """unet""" / """model.onnx"""
onnx_export(
pipeline.unet , model_args=(
torch.randn(2 , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ).to(device=__UpperCAmelCase , dtype=__UpperCAmelCase ),
torch.randn(2 ).to(device=__UpperCAmelCase , dtype=__UpperCAmelCase ),
torch.randn(2 , __UpperCAmelCase , __UpperCAmelCase ).to(device=__UpperCAmelCase , dtype=__UpperCAmelCase ),
False,
) , output_path=__UpperCAmelCase , ordered_input_names=["""sample""", """timestep""", """encoder_hidden_states""", """return_dict"""] , output_names=["""out_sample"""] , dynamic_axes={
"""sample""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""},
"""timestep""": {0: """batch"""},
"""encoder_hidden_states""": {0: """batch""", 1: """sequence"""},
} , opset=__UpperCAmelCase , use_external_data_format=__UpperCAmelCase , )
_lowercase : Optional[int] = str(unet_path.absolute().as_posix() )
_lowercase : str = os.path.dirname(__UpperCAmelCase )
_lowercase : Dict = onnx.load(__UpperCAmelCase )
# clean up existing tensor files
shutil.rmtree(__UpperCAmelCase )
os.mkdir(__UpperCAmelCase )
# collate external tensor files into one
onnx.save_model(
__UpperCAmelCase , __UpperCAmelCase , save_as_external_data=__UpperCAmelCase , all_tensors_to_one_file=__UpperCAmelCase , location="""weights.pb""" , convert_attribute=__UpperCAmelCase , )
del pipeline.unet
# VAE ENCODER
_lowercase : str = pipeline.vae
_lowercase : Dict = vae_encoder.config.in_channels
_lowercase : str = vae_encoder.config.sample_size
# need to get the raw tensor output (sample) from the encoder
_lowercase : Dict = lambda __UpperCAmelCase , __UpperCAmelCase : vae_encoder.encode(__UpperCAmelCase , __UpperCAmelCase )[0].sample()
onnx_export(
__UpperCAmelCase , model_args=(
torch.randn(1 , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ).to(device=__UpperCAmelCase , dtype=__UpperCAmelCase ),
False,
) , output_path=output_path / """vae_encoder""" / """model.onnx""" , ordered_input_names=["""sample""", """return_dict"""] , output_names=["""latent_sample"""] , dynamic_axes={
"""sample""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""},
} , opset=__UpperCAmelCase , )
# VAE DECODER
_lowercase : List[str] = pipeline.vae
_lowercase : Dict = vae_decoder.config.latent_channels
_lowercase : Tuple = vae_decoder.config.out_channels
# forward only through the decoder part
_lowercase : List[Any] = vae_encoder.decode
onnx_export(
__UpperCAmelCase , model_args=(
torch.randn(1 , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ).to(device=__UpperCAmelCase , dtype=__UpperCAmelCase ),
False,
) , output_path=output_path / """vae_decoder""" / """model.onnx""" , ordered_input_names=["""latent_sample""", """return_dict"""] , output_names=["""sample"""] , dynamic_axes={
"""latent_sample""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""},
} , opset=__UpperCAmelCase , )
del pipeline.vae
# SAFETY CHECKER
if pipeline.safety_checker is not None:
_lowercase : str = pipeline.safety_checker
_lowercase : str = safety_checker.config.vision_config.num_channels
_lowercase : int = safety_checker.config.vision_config.image_size
_lowercase : Optional[Any] = safety_checker.forward_onnx
onnx_export(
pipeline.safety_checker , model_args=(
torch.randn(
1 , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ).to(device=__UpperCAmelCase , dtype=__UpperCAmelCase ),
torch.randn(1 , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ).to(device=__UpperCAmelCase , dtype=__UpperCAmelCase ),
) , output_path=output_path / """safety_checker""" / """model.onnx""" , ordered_input_names=["""clip_input""", """images"""] , output_names=["""out_images""", """has_nsfw_concepts"""] , dynamic_axes={
"""clip_input""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""},
"""images""": {0: """batch""", 1: """height""", 2: """width""", 3: """channels"""},
} , opset=__UpperCAmelCase , )
del pipeline.safety_checker
_lowercase : int = OnnxRuntimeModel.from_pretrained(output_path / """safety_checker""" )
_lowercase : Any = pipeline.feature_extractor
else:
_lowercase : Optional[int] = None
_lowercase : int = None
_lowercase : Union[str, Any] = OnnxStableDiffusionPipeline(
vae_encoder=OnnxRuntimeModel.from_pretrained(output_path / """vae_encoder""" ) , vae_decoder=OnnxRuntimeModel.from_pretrained(output_path / """vae_decoder""" ) , text_encoder=OnnxRuntimeModel.from_pretrained(output_path / """text_encoder""" ) , tokenizer=pipeline.tokenizer , unet=OnnxRuntimeModel.from_pretrained(output_path / """unet""" ) , scheduler=pipeline.scheduler , safety_checker=__UpperCAmelCase , feature_extractor=__UpperCAmelCase , requires_safety_checker=safety_checker is not None , )
onnx_pipeline.save_pretrained(__UpperCAmelCase )
print("""ONNX pipeline saved to""" , __UpperCAmelCase )
del pipeline
del onnx_pipeline
_lowercase : Tuple = OnnxStableDiffusionPipeline.from_pretrained(__UpperCAmelCase , provider="""CPUExecutionProvider""" )
print("""ONNX pipeline is loadable""" )
if __name__ == "__main__":
UpperCAmelCase: int = argparse.ArgumentParser()
parser.add_argument(
"""--model_path""",
type=str,
required=True,
help="""Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).""",
)
parser.add_argument("""--output_path""", type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--opset""",
default=14,
type=int,
help="""The version of the ONNX operator set to use.""",
)
parser.add_argument("""--fp16""", action="""store_true""", default=False, help="""Export the models in `float16` mode""")
UpperCAmelCase: Optional[Any] = parser.parse_args()
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
| 600 | 0 |
"""simple docstring"""
import itertools
import json
import os
import unittest
from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase_ ( lowercase_ , unittest.TestCase ):
'''simple docstring'''
_snake_case = RobertaTokenizer
_snake_case = RobertaTokenizerFast
_snake_case = True
_snake_case = {'''cls_token''': '''<s>'''}
def A__ ( self ) -> Optional[Any]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__lowerCAmelCase = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
__lowerCAmelCase = dict(zip(__UpperCamelCase , range(len(__UpperCamelCase ) ) ) )
__lowerCAmelCase = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
__lowerCAmelCase = {'unk_token': '<unk>'}
__lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__UpperCamelCase ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__UpperCamelCase ) )
def A__ ( self , **snake_case_ ) -> List[str]:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__UpperCamelCase )
def A__ ( self , **snake_case_ ) -> int:
kwargs.update(self.special_tokens_map )
return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **__UpperCamelCase )
def A__ ( self , snake_case_ ) -> Any:
__lowerCAmelCase = 'lower newer'
__lowerCAmelCase = 'lower newer'
return input_text, output_text
def A__ ( self ) -> Optional[int]:
__lowerCAmelCase = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
__lowerCAmelCase = 'lower newer'
__lowerCAmelCase = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er']
__lowerCAmelCase = tokenizer.tokenize(__UpperCamelCase ) # , add_prefix_space=True)
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
__lowerCAmelCase = tokens + [tokenizer.unk_token]
__lowerCAmelCase = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCamelCase ) , __UpperCamelCase )
def A__ ( self ) -> List[str]:
__lowerCAmelCase = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("""Hello world!""" , add_special_tokens=__UpperCamelCase ) , [0, 31_414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode("""Hello world! cécé herlolip 418""" , add_special_tokens=__UpperCamelCase ) , [0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2] , )
@slow
def A__ ( self ) -> Union[str, Any]:
__lowerCAmelCase = self.tokenizer_class.from_pretrained("""roberta-base""" )
__lowerCAmelCase = tokenizer.encode("""sequence builders""" , add_special_tokens=__UpperCamelCase )
__lowerCAmelCase = tokenizer.encode("""multi-sequence build""" , add_special_tokens=__UpperCamelCase )
__lowerCAmelCase = tokenizer.encode(
"""sequence builders""" , add_special_tokens=__UpperCamelCase , add_prefix_space=__UpperCamelCase )
__lowerCAmelCase = tokenizer.encode(
"""sequence builders""" , """multi-sequence build""" , add_special_tokens=__UpperCamelCase , add_prefix_space=__UpperCamelCase )
__lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(__UpperCamelCase )
__lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(__UpperCamelCase , __UpperCamelCase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def A__ ( self ) -> Optional[int]:
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = 'Encode this sequence.'
__lowerCAmelCase = tokenizer.byte_encoder[' '.encode("""utf-8""" )[0]]
# Testing encoder arguments
__lowerCAmelCase = tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase , add_prefix_space=__UpperCamelCase )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(__UpperCamelCase , __UpperCamelCase )
__lowerCAmelCase = tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase , add_prefix_space=__UpperCamelCase )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
tokenizer.add_special_tokens({"""bos_token""": """<s>"""} )
__lowerCAmelCase = tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(__UpperCamelCase , __UpperCamelCase )
# Testing spaces after special tokens
__lowerCAmelCase = '<mask>'
tokenizer.add_special_tokens(
{"""mask_token""": AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase )} ) # mask token has a left space
__lowerCAmelCase = tokenizer.convert_tokens_to_ids(__UpperCamelCase )
__lowerCAmelCase = 'Encode <mask> sequence'
__lowerCAmelCase = 'Encode <mask>sequence'
__lowerCAmelCase = tokenizer.encode(__UpperCamelCase )
__lowerCAmelCase = encoded.index(__UpperCamelCase )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
__lowerCAmelCase = tokenizer.encode(__UpperCamelCase )
__lowerCAmelCase = encoded.index(__UpperCamelCase )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(__UpperCamelCase , __UpperCamelCase )
def A__ ( self ) -> List[str]:
pass
def A__ ( self ) -> Any:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(__UpperCamelCase , **__UpperCamelCase )
__lowerCAmelCase = self.tokenizer_class.from_pretrained(__UpperCamelCase , **__UpperCamelCase )
__lowerCAmelCase = 'A, <mask> AllenNLP sentence.'
__lowerCAmelCase = tokenizer_r.encode_plus(__UpperCamelCase , add_special_tokens=__UpperCamelCase , return_token_type_ids=__UpperCamelCase )
__lowerCAmelCase = tokenizer_p.encode_plus(__UpperCamelCase , add_special_tokens=__UpperCamelCase , return_token_type_ids=__UpperCamelCase )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
__lowerCAmelCase = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
__lowerCAmelCase = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(
__UpperCamelCase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
__UpperCamelCase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
def A__ ( self ) -> List[Any]:
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
__lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=__UpperCamelCase , add_prefix_space=__UpperCamelCase , trim_offsets=__UpperCamelCase )
__lowerCAmelCase = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
__lowerCAmelCase = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["""add_prefix_space"""] , __UpperCamelCase )
self.assertEqual(post_processor_state["""add_prefix_space"""] , __UpperCamelCase )
self.assertEqual(post_processor_state["""trim_offsets"""] , __UpperCamelCase )
def A__ ( self ) -> Any:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__lowerCAmelCase = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
__lowerCAmelCase = f"""{text_of_1_token} {text_of_1_token}"""
__lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(
__UpperCamelCase , use_fast=__UpperCamelCase , add_prefix_space=__UpperCamelCase , trim_offsets=__UpperCamelCase )
__lowerCAmelCase = tokenizer_r(__UpperCamelCase , return_offsets_mapping=__UpperCamelCase , add_special_tokens=__UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__UpperCamelCase ) + 1, len(__UpperCamelCase ) + 1 + len(__UpperCamelCase )) , )
__lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(
__UpperCamelCase , use_fast=__UpperCamelCase , add_prefix_space=__UpperCamelCase , trim_offsets=__UpperCamelCase )
__lowerCAmelCase = tokenizer_r(__UpperCamelCase , return_offsets_mapping=__UpperCamelCase , add_special_tokens=__UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__UpperCamelCase ) + 1, len(__UpperCamelCase ) + 1 + len(__UpperCamelCase )) , )
__lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(
__UpperCamelCase , use_fast=__UpperCamelCase , add_prefix_space=__UpperCamelCase , trim_offsets=__UpperCamelCase )
__lowerCAmelCase = tokenizer_r(__UpperCamelCase , return_offsets_mapping=__UpperCamelCase , add_special_tokens=__UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__UpperCamelCase ), len(__UpperCamelCase ) + 1 + len(__UpperCamelCase )) , )
__lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(
__UpperCamelCase , use_fast=__UpperCamelCase , add_prefix_space=__UpperCamelCase , trim_offsets=__UpperCamelCase )
__lowerCAmelCase = tokenizer_r(__UpperCamelCase , return_offsets_mapping=__UpperCamelCase , add_special_tokens=__UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__UpperCamelCase ), len(__UpperCamelCase ) + 1 + len(__UpperCamelCase )) , )
__lowerCAmelCase = f""" {text}"""
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
__lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(
__UpperCamelCase , use_fast=__UpperCamelCase , add_prefix_space=__UpperCamelCase , trim_offsets=__UpperCamelCase )
__lowerCAmelCase = tokenizer_r(__UpperCamelCase , return_offsets_mapping=__UpperCamelCase , add_special_tokens=__UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__UpperCamelCase ) + 1, 1 + len(__UpperCamelCase ) + 1 + len(__UpperCamelCase )) , )
__lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(
__UpperCamelCase , use_fast=__UpperCamelCase , add_prefix_space=__UpperCamelCase , trim_offsets=__UpperCamelCase )
__lowerCAmelCase = tokenizer_r(__UpperCamelCase , return_offsets_mapping=__UpperCamelCase , add_special_tokens=__UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__UpperCamelCase ), 1 + len(__UpperCamelCase ) + 1 + len(__UpperCamelCase )) , )
__lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(
__UpperCamelCase , use_fast=__UpperCamelCase , add_prefix_space=__UpperCamelCase , trim_offsets=__UpperCamelCase )
__lowerCAmelCase = tokenizer_r(__UpperCamelCase , return_offsets_mapping=__UpperCamelCase , add_special_tokens=__UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__UpperCamelCase ), 1 + len(__UpperCamelCase ) + 1 + len(__UpperCamelCase )) , )
| 465 | """simple docstring"""
import qiskit
def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ):
lowercase_ : Tuple = qiskit.Aer.get_backend('aer_simulator' )
# Create a Quantum Circuit acting on the q register
lowercase_ : List[str] = qiskit.QuantumCircuit(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1] , [0, 1] )
# Execute the circuit on the qasm simulator
lowercase_ : List[Any] = qiskit.execute(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , shots=10_00 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE =single_qubit_measure(2, 2)
print(F"Total count for various states are: {counts}")
| 425 | 0 |
'''simple docstring'''
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class UpperCAmelCase__ :
def __init__(self , _a = None ) -> None:
if components is None:
lowercase_ : Dict = []
lowercase_ : Optional[int] = list(_a )
def __len__(self ) -> int:
return len(self.__components )
def __str__(self ) -> str:
return "(" + ",".join(map(_a , self.__components ) ) + ")"
def __add__(self , _a ) -> Vector:
lowercase_ : List[Any] = len(self )
if size == len(_a ):
lowercase_ : List[str] = [self.__components[i] + other.component(_a ) for i in range(_a )]
return Vector(_a )
else:
raise Exception('must have the same size' )
def __sub__(self , _a ) -> Vector:
lowercase_ : Optional[int] = len(self )
if size == len(_a ):
lowercase_ : Any = [self.__components[i] - other.component(_a ) for i in range(_a )]
return Vector(_a )
else: # error case
raise Exception('must have the same size' )
@overload
def __mul__(self , _a ) -> Vector:
...
@overload
def __mul__(self , _a ) -> float:
...
def __mul__(self , _a ) -> float | Vector:
if isinstance(_a , (float, int) ):
lowercase_ : Optional[int] = [c * other for c in self.__components]
return Vector(_a )
elif isinstance(_a , _a ) and len(self ) == len(_a ):
lowercase_ : Union[str, Any] = len(self )
lowercase_ : str = [self.__components[i] * other.component(_a ) for i in range(_a )]
return sum(_a )
else: # error case
raise Exception('invalid operand!' )
def _lowerCamelCase (self ) -> Vector:
return Vector(self.__components )
def _lowerCamelCase (self , _a ) -> float:
if isinstance(_a , _a ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception('index out of range' )
def _lowerCamelCase (self , _a , _a ) -> None:
assert -len(self.__components ) <= pos < len(self.__components )
lowercase_ : Dict = value
def _lowerCamelCase (self ) -> float:
if len(self.__components ) == 0:
raise Exception('Vector is empty' )
lowercase_ : str = [c**2 for c in self.__components]
return math.sqrt(sum(_a ) )
def _lowerCamelCase (self , _a , _a = False ) -> float:
lowercase_ : Any = self * other
lowercase_ : Union[str, Any] = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def _UpperCamelCase ( SCREAMING_SNAKE_CASE_ ):
assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return Vector([0] * dimension )
def _UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and (isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ))
lowercase_ : Dict = [0] * dimension
lowercase_ : Optional[Any] = 1
return Vector(SCREAMING_SNAKE_CASE_ )
def _UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
assert (
isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
and isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
and (isinstance(SCREAMING_SNAKE_CASE_ , (int, float) ))
)
return x * scalar + y
def _UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
random.seed(SCREAMING_SNAKE_CASE_ )
lowercase_ : Optional[int] = [random.randint(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for _ in range(SCREAMING_SNAKE_CASE_ )]
return Vector(SCREAMING_SNAKE_CASE_ )
class UpperCAmelCase__ :
def __init__(self , _a , _a , _a ) -> None:
lowercase_ : List[Any] = matrix
lowercase_ : Optional[Any] = w
lowercase_ : List[Any] = h
def __str__(self ) -> str:
lowercase_ : Optional[Any] = ''
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__(self , _a ) -> Matrix:
if self.__width == other.width() and self.__height == other.height():
lowercase_ : List[str] = []
for i in range(self.__height ):
lowercase_ : Tuple = [
self.__matrix[i][j] + other.component(_a , _a )
for j in range(self.__width )
]
matrix.append(_a )
return Matrix(_a , self.__width , self.__height )
else:
raise Exception('matrix must have the same dimension!' )
def __sub__(self , _a ) -> Matrix:
if self.__width == other.width() and self.__height == other.height():
lowercase_ : Dict = []
for i in range(self.__height ):
lowercase_ : Tuple = [
self.__matrix[i][j] - other.component(_a , _a )
for j in range(self.__width )
]
matrix.append(_a )
return Matrix(_a , self.__width , self.__height )
else:
raise Exception('matrices must have the same dimension!' )
@overload
def __mul__(self , _a ) -> Matrix:
...
@overload
def __mul__(self , _a ) -> Vector:
...
def __mul__(self , _a ) -> Vector | Matrix:
if isinstance(_a , _a ): # matrix-vector
if len(_a ) == self.__width:
lowercase_ : Optional[Any] = zero_vector(self.__height )
for i in range(self.__height ):
lowercase_ : str = [
self.__matrix[i][j] * other.component(_a )
for j in range(self.__width )
]
ans.change_component(_a , sum(_a ) )
return ans
else:
raise Exception(
'vector must have the same size as the '
'number of columns of the matrix!' )
elif isinstance(_a , (int, float) ): # matrix-scalar
lowercase_ : Union[str, Any] = [
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(_a , self.__width , self.__height )
return None
def _lowerCamelCase (self ) -> int:
return self.__height
def _lowerCamelCase (self ) -> int:
return self.__width
def _lowerCamelCase (self , _a , _a ) -> float:
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception('change_component: indices out of bounds' )
def _lowerCamelCase (self , _a , _a , _a ) -> None:
if 0 <= x < self.__height and 0 <= y < self.__width:
lowercase_ : Optional[Any] = value
else:
raise Exception('change_component: indices out of bounds' )
def _lowerCamelCase (self , _a , _a ) -> float:
if self.__height != self.__width:
raise Exception('Matrix is not square' )
lowercase_ : Dict = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(_a ) ):
lowercase_ : Tuple = minor[i][:y] + minor[i][y + 1 :]
return Matrix(_a , self.__width - 1 , self.__height - 1 ).determinant()
def _lowerCamelCase (self , _a , _a ) -> float:
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(_a , _a )
else:
raise Exception('Indices out of bounds' )
def _lowerCamelCase (self ) -> float:
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if self.__height < 1:
raise Exception('Matrix has no element' )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
lowercase_ : int = [
self.__matrix[0][y] * self.cofactor(0 , _a ) for y in range(self.__width )
]
return sum(_a )
def _UpperCamelCase ( SCREAMING_SNAKE_CASE_ ):
lowercase_ : list[list[float]] = [[0] * n for _ in range(SCREAMING_SNAKE_CASE_ )]
return Matrix(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
random.seed(SCREAMING_SNAKE_CASE_ )
lowercase_ : list[list[float]] = [
[random.randint(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for _ in range(SCREAMING_SNAKE_CASE_ )] for _ in range(SCREAMING_SNAKE_CASE_ )
]
return Matrix(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
| 713 | '''simple docstring'''
import math
import os
import sys
def _UpperCamelCase ( SCREAMING_SNAKE_CASE_ ):
lowercase_ : List[str] = ''
try:
with open(SCREAMING_SNAKE_CASE_ , 'rb' ) as binary_file:
lowercase_ : Dict = binary_file.read()
for dat in data:
lowercase_ : Union[str, Any] = f'''{dat:08b}'''
result += curr_byte
return result
except OSError:
print('File not accessible' )
sys.exit()
def _UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lexicon.pop(SCREAMING_SNAKE_CASE_ )
lowercase_ : Any = last_match_id
if math.loga(SCREAMING_SNAKE_CASE_ ).is_integer():
for curr_key in lexicon:
lowercase_ : Tuple = '0' + lexicon[curr_key]
lowercase_ : Any = bin(SCREAMING_SNAKE_CASE_ )[2:]
def _UpperCamelCase ( SCREAMING_SNAKE_CASE_ ):
lowercase_ : Dict = {'0': '0', '1': '1'}
lowercase_ ,lowercase_ : Any = '', ''
lowercase_ : List[Any] = len(SCREAMING_SNAKE_CASE_ )
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
lowercase_ : str = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
index += 1
lowercase_ : List[Any] = ''
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
lowercase_ : Optional[Any] = lexicon[curr_string]
result += last_match_id
return result
def _UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase_ : List[str] = os.path.getsize(SCREAMING_SNAKE_CASE_ )
lowercase_ : Any = bin(SCREAMING_SNAKE_CASE_ )[2:]
lowercase_ : Optional[Any] = len(SCREAMING_SNAKE_CASE_ )
return "0" * (length_length - 1) + file_length_binary + compressed
def _UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase_ : List[str] = 8
try:
with open(SCREAMING_SNAKE_CASE_ , 'wb' ) as opened_file:
lowercase_ : List[Any] = [
to_write[i : i + byte_length]
for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('10000000' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array:
opened_file.write(int(SCREAMING_SNAKE_CASE_ , 2 ).to_bytes(1 , byteorder='big' ) )
except OSError:
print('File not accessible' )
sys.exit()
def _UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase_ : List[str] = read_file_binary(SCREAMING_SNAKE_CASE_ )
lowercase_ : List[str] = compress_data(SCREAMING_SNAKE_CASE_ )
lowercase_ : Union[str, Any] = add_file_length(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
write_file_binary(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 438 | 0 |
'''simple docstring'''
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
UpperCAmelCase_ : Dict = {
'169M': 12,
'430M': 24,
'1B5': 24,
'3B': 32,
'7B': 32,
'14B': 40,
}
UpperCAmelCase_ : Tuple = {
'169M': 768,
'430M': 1024,
'1B5': 2048,
'3B': 2560,
'7B': 4096,
'14B': 5120,
}
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[int] = list(state_dict.keys() )
for name in state_dict_keys:
_SCREAMING_SNAKE_CASE : Tuple = state_dict.pop(SCREAMING_SNAKE_CASE__ )
# emb -> embedding
if name.startswith("""emb.""" ):
_SCREAMING_SNAKE_CASE : Any = name.replace("""emb.""" , """embeddings.""" )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith("""blocks.0.ln0""" ):
_SCREAMING_SNAKE_CASE : Optional[Any] = name.replace("""blocks.0.ln0""" , """blocks.0.pre_ln""" )
# att -> attention
_SCREAMING_SNAKE_CASE : Any = re.sub(R"""blocks\.(\d+)\.att""" , R"""blocks.\1.attention""" , SCREAMING_SNAKE_CASE__ )
# ffn -> feed_forward
_SCREAMING_SNAKE_CASE : int = re.sub(R"""blocks\.(\d+)\.ffn""" , R"""blocks.\1.feed_forward""" , SCREAMING_SNAKE_CASE__ )
# time_mix_k -> time_mix_key and reshape
if name.endswith(""".time_mix_k""" ):
_SCREAMING_SNAKE_CASE : Dict = name.replace(""".time_mix_k""" , """.time_mix_key""" )
# time_mix_v -> time_mix_value and reshape
if name.endswith(""".time_mix_v""" ):
_SCREAMING_SNAKE_CASE : List[str] = name.replace(""".time_mix_v""" , """.time_mix_value""" )
# time_mix_r -> time_mix_key and reshape
if name.endswith(""".time_mix_r""" ):
_SCREAMING_SNAKE_CASE : Optional[Any] = name.replace(""".time_mix_r""" , """.time_mix_receptance""" )
if name != "head.weight":
_SCREAMING_SNAKE_CASE : Any = """rwkv.""" + name
_SCREAMING_SNAKE_CASE : Dict = weight
return state_dict
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=None ):
"""simple docstring"""
if tokenizer_file is None:
print("""No `--tokenizer_file` provided, we will use the default tokenizer.""" )
_SCREAMING_SNAKE_CASE : Optional[int] = 5_0277
_SCREAMING_SNAKE_CASE : Dict = AutoTokenizer.from_pretrained("""EleutherAI/gpt-neox-20b""" )
else:
_SCREAMING_SNAKE_CASE : Optional[int] = PreTrainedTokenizerFast(tokenizer_file=SCREAMING_SNAKE_CASE__ )
_SCREAMING_SNAKE_CASE : List[str] = len(SCREAMING_SNAKE_CASE__ )
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE__ )
# 2. Build the config
_SCREAMING_SNAKE_CASE : List[str] = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
_SCREAMING_SNAKE_CASE : Union[str, Any] = candidate
break
if size is None:
raise ValueError("""Could not infer the size, please provide it with the `--size` argument.""" )
if size not in possible_sizes:
raise ValueError(f"""`size` should be one of {possible_sizes}, got {size}.""" )
_SCREAMING_SNAKE_CASE : Tuple = RwkvConfig(
vocab_size=SCREAMING_SNAKE_CASE__ , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(SCREAMING_SNAKE_CASE__ )
# 3. Download model file then convert state_dict
_SCREAMING_SNAKE_CASE : List[Any] = hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
_SCREAMING_SNAKE_CASE : Union[str, Any] = torch.load(SCREAMING_SNAKE_CASE__ , map_location="""cpu""" )
_SCREAMING_SNAKE_CASE : int = convert_state_dict(SCREAMING_SNAKE_CASE__ )
# 4. Split in shards and save
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[Any] = shard_checkpoint(SCREAMING_SNAKE_CASE__ )
for shard_file, shard in shards.items():
torch.save(SCREAMING_SNAKE_CASE__ , os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
if index is not None:
_SCREAMING_SNAKE_CASE : Any = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Save the index as well
with open(SCREAMING_SNAKE_CASE__ , """w""" , encoding="""utf-8""" ) as f:
_SCREAMING_SNAKE_CASE : List[str] = json.dumps(SCREAMING_SNAKE_CASE__ , indent=2 , sort_keys=SCREAMING_SNAKE_CASE__ ) + """\n"""
f.write(SCREAMING_SNAKE_CASE__ )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
"""Cleaning up shards. This may error with an OOM error, it this is the case don't worry you still have converted the model.""" )
_SCREAMING_SNAKE_CASE : Dict = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
_SCREAMING_SNAKE_CASE : Any = torch.load(os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError("""Please provide a `model_name` to push the model to the Hub.""" )
_SCREAMING_SNAKE_CASE : int = AutoModelForCausalLM.from_pretrained(SCREAMING_SNAKE_CASE__ )
model.push_to_hub(SCREAMING_SNAKE_CASE__ , max_shard_size="""2GB""" )
tokenizer.push_to_hub(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
UpperCAmelCase_ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--repo_id', default=None, type=str, required=True, help='Repo ID from which to pull the checkpoint.'
)
parser.add_argument(
'--checkpoint_file', default=None, type=str, required=True, help='Name of the checkpoint file in the repo.'
)
parser.add_argument(
'--output_dir', default=None, type=str, required=True, help='Where to save the converted model.'
)
parser.add_argument(
'--tokenizer_file',
default=None,
type=str,
help='Path to the tokenizer file to use (if not provided, only the model is converted).',
)
parser.add_argument(
'--size',
default=None,
type=str,
help='Size of the model. Will be inferred from the `checkpoint_file` if not passed.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Push to the Hub the converted model.',
)
parser.add_argument(
'--model_name',
default=None,
type=str,
help='Name of the pushed model on the Hub, including the username / organization.',
)
UpperCAmelCase_ : Union[str, Any] = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 533 |
'''simple docstring'''
from __future__ import annotations
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
if len(SCREAMING_SNAKE_CASE__ ) <= 1 or n <= 1:
return
insert_next(SCREAMING_SNAKE_CASE__ , n - 1 )
rec_insertion_sort(SCREAMING_SNAKE_CASE__ , n - 1 )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
if index >= len(SCREAMING_SNAKE_CASE__ ) or collection[index - 1] <= collection[index]:
return
# Swaps adjacent elements since they are not in ascending order
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[Any] = (
collection[index],
collection[index - 1],
)
insert_next(SCREAMING_SNAKE_CASE__ , index + 1 )
if __name__ == "__main__":
UpperCAmelCase_ : List[str] = input('Enter integers separated by spaces: ')
UpperCAmelCase_ : list[int] = [int(num) for num in numbers.split()]
rec_insertion_sort(number_list, len(number_list))
print(number_list)
| 533 | 1 |
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class __a :
'''simple docstring'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__=13 , UpperCamelCase__=7 , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=99 , UpperCamelCase__=32 , UpperCamelCase__=2 , UpperCamelCase__=4 , UpperCamelCase__=37 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=512 , UpperCamelCase__=16 , UpperCamelCase__=2 , UpperCamelCase__=0.02 , UpperCamelCase__=3 , UpperCamelCase__=4 , UpperCamelCase__=None , ):
SCREAMING_SNAKE_CASE_ : Optional[int] = parent
SCREAMING_SNAKE_CASE_ : int = 13
SCREAMING_SNAKE_CASE_ : Optional[int] = 7
SCREAMING_SNAKE_CASE_ : Dict = True
SCREAMING_SNAKE_CASE_ : Optional[Any] = True
SCREAMING_SNAKE_CASE_ : Dict = True
SCREAMING_SNAKE_CASE_ : Dict = True
SCREAMING_SNAKE_CASE_ : str = 99
SCREAMING_SNAKE_CASE_ : Any = 32
SCREAMING_SNAKE_CASE_ : Optional[Any] = 2
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 4
SCREAMING_SNAKE_CASE_ : Dict = 37
SCREAMING_SNAKE_CASE_ : Optional[int] = 'gelu'
SCREAMING_SNAKE_CASE_ : Dict = 0.1
SCREAMING_SNAKE_CASE_ : Tuple = 0.1
SCREAMING_SNAKE_CASE_ : Any = 512
SCREAMING_SNAKE_CASE_ : Optional[Any] = 16
SCREAMING_SNAKE_CASE_ : Dict = 2
SCREAMING_SNAKE_CASE_ : str = 0.02
SCREAMING_SNAKE_CASE_ : List[Any] = 3
SCREAMING_SNAKE_CASE_ : int = 4
SCREAMING_SNAKE_CASE_ : List[Any] = None
def __snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE_ : Optional[int] = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ : Dict = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE_ : Dict = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE_ : Optional[Any] = None
SCREAMING_SNAKE_CASE_ : List[Any] = None
SCREAMING_SNAKE_CASE_ : List[str] = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE_ : Dict = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE_ : Tuple = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=UpperCamelCase__ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE_ : str = TFRoFormerModel(config=UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ : List[Any] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
SCREAMING_SNAKE_CASE_ : List[Any] = [input_ids, input_mask]
SCREAMING_SNAKE_CASE_ : Optional[Any] = model(UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ : List[Any] = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = True
SCREAMING_SNAKE_CASE_ : Optional[Any] = TFRoFormerForCausalLM(config=UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
SCREAMING_SNAKE_CASE_ : Optional[Any] = model(UpperCamelCase__ )['logits']
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE_ : List[str] = TFRoFormerForMaskedLM(config=UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ : List[Any] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
SCREAMING_SNAKE_CASE_ : Any = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.num_labels
SCREAMING_SNAKE_CASE_ : Dict = TFRoFormerForSequenceClassification(config=UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ : str = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
SCREAMING_SNAKE_CASE_ : Any = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE_ : List[Any] = self.num_choices
SCREAMING_SNAKE_CASE_ : List[Any] = TFRoFormerForMultipleChoice(config=UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ : str = tf.tile(tf.expand_dims(UpperCamelCase__ , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf.tile(tf.expand_dims(UpperCamelCase__ , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE_ : Dict = tf.tile(tf.expand_dims(UpperCamelCase__ , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE_ : int = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
SCREAMING_SNAKE_CASE_ : Any = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE_ : int = self.num_labels
SCREAMING_SNAKE_CASE_ : Dict = TFRoFormerForTokenClassification(config=UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ : List[Any] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
SCREAMING_SNAKE_CASE_ : List[Any] = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE_ : List[Any] = TFRoFormerForQuestionAnswering(config=UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ : int = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
SCREAMING_SNAKE_CASE_ : str = model(UpperCamelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Dict = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
) : Any = config_and_inputs
SCREAMING_SNAKE_CASE_ : Optional[int] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class __a ( __A , __A , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
UpperCAmelCase__ : Dict = (
{
"""feature-extraction""": TFRoFormerModel,
"""fill-mask""": TFRoFormerForMaskedLM,
"""question-answering""": TFRoFormerForQuestionAnswering,
"""text-classification""": TFRoFormerForSequenceClassification,
"""text-generation""": TFRoFormerForCausalLM,
"""token-classification""": TFRoFormerForTokenClassification,
"""zero-shot""": TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCAmelCase__ : int = False
UpperCAmelCase__ : Union[str, Any] = False
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def __snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Dict = TFRoFormerModelTester(self )
SCREAMING_SNAKE_CASE_ : str = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 )
def __snake_case ( self ):
self.config_tester.run_common_tests()
def __snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def __snake_case ( self ):
SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase__ )
def __snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*UpperCamelCase__ )
def __snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCamelCase__ )
def __snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCamelCase__ )
def __snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase__ )
def __snake_case ( self ):
SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCamelCase__ )
@slow
def __snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[int] = TFRoFormerModel.from_pretrained('junnyu/roformer_chinese_base' )
self.assertIsNotNone(UpperCamelCase__ )
@require_tf
class __a ( unittest.TestCase ):
'''simple docstring'''
@slow
def __snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[Any] = TFRoFormerForMaskedLM.from_pretrained('junnyu/roformer_chinese_base' )
SCREAMING_SNAKE_CASE_ : Tuple = tf.constant([[0, 1, 2, 3, 4, 5]] )
SCREAMING_SNAKE_CASE_ : Any = model(UpperCamelCase__ )[0]
# TODO Replace vocab size
SCREAMING_SNAKE_CASE_ : Tuple = 50000
SCREAMING_SNAKE_CASE_ : str = [1, 6, vocab_size]
self.assertEqual(output.shape , UpperCamelCase__ )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
SCREAMING_SNAKE_CASE_ : str = tf.constant(
[
[
[-0.12_05_33_41, -1.0_26_49_01, 0.29_22_19_46],
[-1.5_13_37_83, 0.19_74_33, 0.15_19_06_07],
[-5.0_13_54_03, -3.90_02_56, -0.84_03_87_64],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , UpperCamelCase__ , atol=1E-4 )
@require_tf
class __a ( unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = 1e-4
def __snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = tf.constant([[4, 10]] )
SCREAMING_SNAKE_CASE_ : int = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
SCREAMING_SNAKE_CASE_ : Any = emba(input_ids.shape )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf.constant(
[[0.00_00, 0.00_00, 0.00_00, 1.00_00, 1.00_00, 1.00_00], [0.84_15, 0.04_64, 0.00_22, 0.54_03, 0.99_89, 1.00_00]] )
tf.debugging.assert_near(UpperCamelCase__ , UpperCamelCase__ , atol=self.tolerance )
def __snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = tf.constant(
[
[0.00_00, 0.00_00, 0.00_00, 0.00_00, 0.00_00],
[0.84_15, 0.82_19, 0.80_20, 0.78_19, 0.76_17],
[0.90_93, 0.93_64, 0.95_81, 0.97_49, 0.98_70],
] )
SCREAMING_SNAKE_CASE_ : str = TFRoFormerSinusoidalPositionalEmbedding(num_positions=512 , embedding_dim=512 )
emba([2, 16, 512] )
SCREAMING_SNAKE_CASE_ : Tuple = emba.weight[:3, :5]
tf.debugging.assert_near(UpperCamelCase__ , UpperCamelCase__ , atol=self.tolerance )
@require_tf
class __a ( unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = 1e-4
def __snake_case ( self ):
# 2,12,16,64
SCREAMING_SNAKE_CASE_ : Dict = tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
SCREAMING_SNAKE_CASE_ : Optional[int] = -tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
SCREAMING_SNAKE_CASE_ : int = TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 )
SCREAMING_SNAKE_CASE_ : int = embed_positions([2, 16, 768] )[None, None, :, :]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = TFRoFormerSelfAttention.apply_rotary_position_embeddings(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ : Tuple = tf.constant(
[
[0.00_00, 0.01_00, 0.02_00, 0.03_00, 0.04_00, 0.05_00, 0.06_00, 0.07_00],
[-0.20_12, 0.88_97, 0.02_63, 0.94_01, 0.20_74, 0.94_63, 0.34_81, 0.93_43],
[-1.70_57, 0.62_71, -1.21_45, 1.38_97, -0.63_03, 1.76_47, -0.11_73, 1.89_85],
[-2.17_31, -1.63_97, -2.73_58, 0.28_54, -2.18_40, 1.71_83, -1.30_18, 2.48_71],
[0.27_17, -3.61_73, -2.92_06, -2.19_88, -3.66_38, 0.38_58, -2.91_55, 2.29_80],
[3.98_59, -2.15_80, -0.79_84, -4.49_04, -4.11_81, -2.02_52, -4.47_82, 1.12_53],
] )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf.constant(
[
[0.00_00, -0.01_00, -0.02_00, -0.03_00, -0.04_00, -0.05_00, -0.06_00, -0.07_00],
[0.20_12, -0.88_97, -0.02_63, -0.94_01, -0.20_74, -0.94_63, -0.34_81, -0.93_43],
[1.70_57, -0.62_71, 1.21_45, -1.38_97, 0.63_03, -1.76_47, 0.11_73, -1.89_85],
[2.17_31, 1.63_97, 2.73_58, -0.28_54, 2.18_40, -1.71_83, 1.30_18, -2.48_71],
[-0.27_17, 3.61_73, 2.92_06, 2.19_88, 3.66_38, -0.38_58, 2.91_55, -2.29_80],
[-3.98_59, 2.15_80, 0.79_84, 4.49_04, 4.11_81, 2.02_52, 4.47_82, -1.12_53],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , UpperCamelCase__ , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , UpperCamelCase__ , atol=self.tolerance ) | 97 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class __a ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[int] = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_ : List[str] = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
SCREAMING_SNAKE_CASE_ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
SCREAMING_SNAKE_CASE_ : Tuple = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'image_std': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
}
SCREAMING_SNAKE_CASE_ : str = os.path.join(self.tmpdirname , UpperCamelCase__ )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self , **UpperCamelCase__ ):
return BertTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def __snake_case ( self , **UpperCamelCase__ ):
return BertTokenizerFast.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def __snake_case ( self , **UpperCamelCase__ ):
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def __snake_case ( self ):
shutil.rmtree(self.tmpdirname )
def __snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
SCREAMING_SNAKE_CASE_ : Optional[Any] = [Image.fromarray(np.moveaxis(UpperCamelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : Dict = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE_ : Dict = self.get_image_processor()
SCREAMING_SNAKE_CASE_ : Optional[int] = AlignProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
processor_slow.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ : List[Any] = AlignProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
processor_fast.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , UpperCamelCase__ )
self.assertIsInstance(processor_fast.tokenizer , UpperCamelCase__ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , UpperCamelCase__ )
self.assertIsInstance(processor_fast.image_processor , UpperCamelCase__ )
def __snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
SCREAMING_SNAKE_CASE_ : int = self.get_image_processor(do_normalize=UpperCamelCase__ , padding_value=1.0 )
SCREAMING_SNAKE_CASE_ : List[Any] = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=UpperCamelCase__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCamelCase__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase__ )
def __snake_case ( self ):
SCREAMING_SNAKE_CASE_ : int = self.get_image_processor()
SCREAMING_SNAKE_CASE_ : str = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : str = AlignProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_ : int = image_processor(UpperCamelCase__ , return_tensors='np' )
SCREAMING_SNAKE_CASE_ : Dict = processor(images=UpperCamelCase__ , return_tensors='np' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[str] = self.get_image_processor()
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : List[str] = AlignProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ : Tuple = 'lower newer'
SCREAMING_SNAKE_CASE_ : Tuple = processor(text=UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ : int = tokenizer(UpperCamelCase__ , padding='max_length' , max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Dict = self.get_image_processor()
SCREAMING_SNAKE_CASE_ : List[Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : str = AlignProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ : str = 'lower newer'
SCREAMING_SNAKE_CASE_ : Optional[int] = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_ : Optional[int] = processor(text=UpperCamelCase__ , images=UpperCamelCase__ )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'token_type_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase__ ):
processor()
def __snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[str] = self.get_image_processor()
SCREAMING_SNAKE_CASE_ : List[str] = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AlignProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ : Tuple = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
SCREAMING_SNAKE_CASE_ : List[str] = processor.batch_decode(UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ : Any = tokenizer.batch_decode(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Dict = self.get_image_processor()
SCREAMING_SNAKE_CASE_ : int = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AlignProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ : str = 'lower newer'
SCREAMING_SNAKE_CASE_ : List[str] = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_ : List[Any] = processor(text=UpperCamelCase__ , images=UpperCamelCase__ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names ) | 97 | 1 |
"""simple docstring"""
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
lowercase__ = pd.read_csv("""sample_data.csv""", header=None)
lowercase__ = df.shape[:1][0]
# If you're using some other dataset input the target column
lowercase__ = df.iloc[:, 1:2]
lowercase__ = actual_data.values.reshape(len_data, 1)
lowercase__ = MinMaxScaler().fit_transform(actual_data)
lowercase__ = 10
lowercase__ = 5
lowercase__ = 20
lowercase__ = len_data - periods * look_back
lowercase__ = actual_data[:division]
lowercase__ = actual_data[division - look_back :]
lowercase__ , lowercase__ = [], []
lowercase__ , lowercase__ = [], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
lowercase__ = np.array(train_x)
lowercase__ = np.array(test_x)
lowercase__ = np.array([list(i.ravel()) for i in train_y])
lowercase__ = np.array([list(i.ravel()) for i in test_y])
lowercase__ = Sequential()
model.add(LSTM(128, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(64, input_shape=(128, 1)))
model.add(Dense(forward_days))
model.compile(loss="""mean_squared_error""", optimizer="""adam""")
lowercase__ = model.fit(
x_train, y_train, epochs=150, verbose=1, shuffle=True, batch_size=4
)
lowercase__ = model.predict(x_test)
| 610 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_UpperCamelCase = {
'configuration_pix2struct': [
'PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Pix2StructConfig',
'Pix2StructTextConfig',
'Pix2StructVisionConfig',
],
'processing_pix2struct': ['Pix2StructProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = ['Pix2StructImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
'PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST',
'Pix2StructPreTrainedModel',
'Pix2StructForConditionalGeneration',
'Pix2StructVisionModel',
'Pix2StructTextModel',
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 179 | 0 |
import doctest
from collections import deque
import numpy as np
class lowerCAmelCase_ :
"""simple docstring"""
def __init__(self ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = [2, 1, 2, -1]
SCREAMING_SNAKE_CASE__ : Optional[int] = [1, 2, 3, 4]
def __magic_name__ (self ) -> list[float]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = len(self.first_signal )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = len(self.second_signal )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = max(A__ , A__ )
# create a zero matrix of max_length x max_length
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [[0] * max_length for i in range(A__ )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(A__ ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = deque(self.second_signal )
rotated_signal.rotate(A__ )
for j, item in enumerate(A__ ):
matrix[i][j] += item
# multiply the matrix with the first signal
SCREAMING_SNAKE_CASE__ : str = np.matmul(np.transpose(A__ ) , np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(A__ , 2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 700 |
"""simple docstring"""
from manim import *
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
def __magic_name__ (self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = Rectangle(height=0.5 , width=0.5 )
SCREAMING_SNAKE_CASE__ : Dict = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = Rectangle(height=0.25 , width=0.25 )
SCREAMING_SNAKE_CASE__ : Any = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE__ : int = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE__ : Any = VGroup(*SCREAMING_SNAKE_CASE__ ).arrange(SCREAMING_SNAKE_CASE__ , buff=0 )
SCREAMING_SNAKE_CASE__ : Any = VGroup(*SCREAMING_SNAKE_CASE__ ).arrange(SCREAMING_SNAKE_CASE__ , buff=0 )
SCREAMING_SNAKE_CASE__ : Optional[int] = VGroup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).arrange(SCREAMING_SNAKE_CASE__ , buff=0 )
SCREAMING_SNAKE_CASE__ : List[str] = Text("""CPU""" , font_size=24 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = Group(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).arrange(SCREAMING_SNAKE_CASE__ , buff=0.5 , aligned_edge=SCREAMING_SNAKE_CASE__ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[Any] = [mem.copy() for i in range(4 )]
SCREAMING_SNAKE_CASE__ : List[Any] = VGroup(*SCREAMING_SNAKE_CASE__ ).arrange(SCREAMING_SNAKE_CASE__ , buff=0 )
SCREAMING_SNAKE_CASE__ : Any = Text("""GPU""" , font_size=24 )
SCREAMING_SNAKE_CASE__ : int = Group(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).arrange(SCREAMING_SNAKE_CASE__ , buff=0.5 , aligned_edge=SCREAMING_SNAKE_CASE__ )
gpu.move_to([-1, -1, 0] )
self.add(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[Any] = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE__ : Optional[Any] = VGroup(*SCREAMING_SNAKE_CASE__ ).arrange(SCREAMING_SNAKE_CASE__ , buff=0 )
SCREAMING_SNAKE_CASE__ : Tuple = Text("""Model""" , font_size=24 )
SCREAMING_SNAKE_CASE__ : Dict = Group(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).arrange(SCREAMING_SNAKE_CASE__ , buff=0.5 , aligned_edge=SCREAMING_SNAKE_CASE__ )
model.move_to([3, -1.0, 0] )
self.add(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
SCREAMING_SNAKE_CASE__ : Any = []
for i, rect in enumerate(SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE__ : List[str] = fill.copy().set_fill(SCREAMING_SNAKE_CASE__ , opacity=0.8 )
target.move_to(SCREAMING_SNAKE_CASE__ )
model_arr.append(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[str] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(SCREAMING_SNAKE_CASE__ , opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(SCREAMING_SNAKE_CASE__ )
self.add(*SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Tuple = [meta_mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE__ : List[str] = [meta_mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE__ : Any = VGroup(*SCREAMING_SNAKE_CASE__ ).arrange(SCREAMING_SNAKE_CASE__ , buff=0 )
SCREAMING_SNAKE_CASE__ : int = VGroup(*SCREAMING_SNAKE_CASE__ ).arrange(SCREAMING_SNAKE_CASE__ , buff=0 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = VGroup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).arrange(SCREAMING_SNAKE_CASE__ , buff=0 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = Text("""Disk""" , font_size=24 )
SCREAMING_SNAKE_CASE__ : Any = Group(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).arrange(SCREAMING_SNAKE_CASE__ , buff=0.5 , aligned_edge=SCREAMING_SNAKE_CASE__ )
disk.move_to([-4, -1.25, 0] )
self.add(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Any = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
SCREAMING_SNAKE_CASE__ : List[str] = MarkupText(
F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Any = MarkupText(
F'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=18 , )
blue_text.next_to(SCREAMING_SNAKE_CASE__ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = MarkupText(
F'''Now watch as an input is passed through the model\nand how the memory is utilized and handled.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(SCREAMING_SNAKE_CASE__ ) )
SCREAMING_SNAKE_CASE__ : Dict = Square(0.3 )
input.set_fill(SCREAMING_SNAKE_CASE__ , opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] , SCREAMING_SNAKE_CASE__ , buff=0.5 )
self.play(Write(SCREAMING_SNAKE_CASE__ ) )
input.generate_target()
input.target.next_to(model_arr[0] , direction=SCREAMING_SNAKE_CASE__ , buff=0.02 )
self.play(MoveToTarget(SCREAMING_SNAKE_CASE__ ) )
self.play(FadeOut(SCREAMING_SNAKE_CASE__ ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = Arrow(start=SCREAMING_SNAKE_CASE__ , end=SCREAMING_SNAKE_CASE__ , color=SCREAMING_SNAKE_CASE__ , buff=0.5 )
a.next_to(model_arr[0].get_left() , SCREAMING_SNAKE_CASE__ , buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
SCREAMING_SNAKE_CASE__ : Tuple = MarkupText(
F'''As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(SCREAMING_SNAKE_CASE__ , run_time=3 ) )
SCREAMING_SNAKE_CASE__ : Tuple = {"""run_time""": 1, """fade_in""": True, """fade_out""": True, """buff""": 0.02}
self.play(
Write(SCREAMING_SNAKE_CASE__ ) , Circumscribe(model_arr[0] , color=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) , Circumscribe(model_cpu_arr[0] , color=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) , Circumscribe(gpu_rect[0] , color=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) , )
self.play(MoveToTarget(model_cpu_arr[0] ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 , SCREAMING_SNAKE_CASE__ , buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = AnimationGroup(
FadeOut(SCREAMING_SNAKE_CASE__ , run_time=0.5 ) , MoveToTarget(SCREAMING_SNAKE_CASE__ , run_time=0.5 ) , FadeIn(SCREAMING_SNAKE_CASE__ , run_time=0.5 ) , lag_ratio=0.2 )
self.play(SCREAMING_SNAKE_CASE__ )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 0.7
self.play(
Circumscribe(model_arr[i] , **SCREAMING_SNAKE_CASE__ ) , Circumscribe(cpu_left_col_base[i] , **SCREAMING_SNAKE_CASE__ ) , Circumscribe(cpu_left_col_base[i + 1] , color=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) , Circumscribe(gpu_rect[0] , color=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) , Circumscribe(model_arr[i + 1] , color=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 )
self.play(
Circumscribe(model_arr[-1] , color=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) , Circumscribe(cpu_left_col_base[-1] , color=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) , Circumscribe(gpu_rect[0] , color=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) , )
self.play(MoveToTarget(model_cpu_arr[i] ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = a_c
SCREAMING_SNAKE_CASE__ : Optional[Any] = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 )
self.play(
FadeOut(SCREAMING_SNAKE_CASE__ ) , FadeOut(SCREAMING_SNAKE_CASE__ , run_time=0.5 ) , )
SCREAMING_SNAKE_CASE__ : Dict = MarkupText(F'''Inference on a model too large for GPU memory\nis successfully completed.''' , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(SCREAMING_SNAKE_CASE__ , run_time=3 ) , MoveToTarget(SCREAMING_SNAKE_CASE__ ) )
self.wait()
| 545 | 0 |
'''simple docstring'''
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
def __init__( self : Any ,lowercase__ : Tuple ,lowercase__ : bool = True ,lowercase__ : Dict[str, int] = None ,lowercase__ : int = 3_2 ,lowercase__ : bool = True ,lowercase__ : Union[int, float] = 1 / 2_5_5 ,lowercase__ : bool = True ,lowercase__ : bool = True ,lowercase__ : Optional[Union[float, List[float]]] = [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] ,lowercase__ : Optional[Union[float, List[float]]] = [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] ,lowercase__ : bool = True ,lowercase__ : Any=7 ,lowercase__ : Optional[int]=3_0 ,lowercase__ : Tuple=4_0_0 ,lowercase__ : List[Any]=3 ,):
__lowercase = parent
__lowercase = do_resize
__lowercase = size if size is not None else {'''shortest_edge''': 2_8_8}
__lowercase = size_divisor
__lowercase = do_rescale
__lowercase = rescale_factor
__lowercase = do_normalize
__lowercase = do_center_crop
__lowercase = image_mean
__lowercase = image_std
__lowercase = do_pad
__lowercase = batch_size
__lowercase = num_channels
__lowercase = min_resolution
__lowercase = max_resolution
def SCREAMING_SNAKE_CASE ( self : List[str] ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : Union[str, Any] ,lowercase__ : Optional[int]=False ):
if not batched:
__lowercase = self.size['''shortest_edge''']
__lowercase = image_inputs[0]
if isinstance(lowercase__ ,Image.Image ):
__lowercase , __lowercase = image.size
else:
__lowercase , __lowercase = image.shape[1], image.shape[2]
__lowercase = size / min(lowercase__ ,lowercase__ )
if h < w:
__lowercase , __lowercase = size, scale * w
else:
__lowercase , __lowercase = scale * h, size
__lowercase = int((1_3_3_3 / 8_0_0) * size )
if max(lowercase__ ,lowercase__ ) > max_size:
__lowercase = max_size / max(lowercase__ ,lowercase__ )
__lowercase = newh * scale
__lowercase = neww * scale
__lowercase , __lowercase = int(newh + 0.5 ), int(neww + 0.5 )
__lowercase , __lowercase = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
__lowercase = []
for image in image_inputs:
__lowercase , __lowercase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__lowercase = max(lowercase__ ,key=lambda lowercase__ : item[0] )[0]
__lowercase = max(lowercase__ ,key=lambda lowercase__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowercase_ (lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = BridgeTowerImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = BridgeTowerImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE ( self : str ):
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
__lowercase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase__ ,'''image_mean''' ) )
self.assertTrue(hasattr(lowercase__ ,'''image_std''' ) )
self.assertTrue(hasattr(lowercase__ ,'''do_normalize''' ) )
self.assertTrue(hasattr(lowercase__ ,'''do_resize''' ) )
self.assertTrue(hasattr(lowercase__ ,'''size''' ) )
self.assertTrue(hasattr(lowercase__ ,'''size_divisor''' ) )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
pass
def SCREAMING_SNAKE_CASE ( self : List[str] ):
# Initialize image processor
__lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowercase = prepare_image_inputs(self.image_processor_tester ,equal_resolution=lowercase__ )
for image in image_inputs:
self.assertIsInstance(lowercase__ ,Image.Image )
# Test not batched input
__lowercase = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
__lowercase , __lowercase = self.image_processor_tester.get_expected_values(lowercase__ )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
__lowercase = image_processing(lowercase__ ,return_tensors='''pt''' ).pixel_values
__lowercase , __lowercase = self.image_processor_tester.get_expected_values(lowercase__ ,batched=lowercase__ )
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
def SCREAMING_SNAKE_CASE ( self : Dict ):
# Initialize image processor
__lowercase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowercase = prepare_image_inputs(self.image_processor_tester ,equal_resolution=lowercase__ ,numpify=lowercase__ )
for image in image_inputs:
self.assertIsInstance(lowercase__ ,np.ndarray )
# Test not batched input
__lowercase = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
__lowercase , __lowercase = self.image_processor_tester.get_expected_values(lowercase__ )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
__lowercase = image_processing(lowercase__ ,return_tensors='''pt''' ).pixel_values
__lowercase , __lowercase = self.image_processor_tester.get_expected_values(lowercase__ ,batched=lowercase__ )
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
# Initialize image processor
__lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowercase = prepare_image_inputs(self.image_processor_tester ,equal_resolution=lowercase__ ,torchify=lowercase__ )
for image in image_inputs:
self.assertIsInstance(lowercase__ ,torch.Tensor )
# Test not batched input
__lowercase = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
__lowercase , __lowercase = self.image_processor_tester.get_expected_values(lowercase__ )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
__lowercase = image_processing(lowercase__ ,return_tensors='''pt''' ).pixel_values
__lowercase , __lowercase = self.image_processor_tester.get_expected_values(lowercase__ ,batched=lowercase__ )
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
| 41 |
import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase : List[str] = {'vocab_file': 'spiece.model'}
lowerCAmelCase : Optional[int] = {
'vocab_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/spiece.model',
't5-base': 'https://huggingface.co/t5-base/resolve/main/spiece.model',
't5-large': 'https://huggingface.co/t5-large/resolve/main/spiece.model',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/spiece.model',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/spiece.model',
}
}
# TODO(PVP) - this should be removed in Transformers v5
lowerCAmelCase : Optional[int] = {
't5-small': 5_12,
't5-base': 5_12,
't5-large': 5_12,
't5-3b': 5_12,
't5-11b': 5_12,
}
lowerCAmelCase : Optional[int] = '▁'
class _A ( __magic_name__):
SCREAMING_SNAKE_CASE : Tuple = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE : Tuple = ['''input_ids''', '''attention_mask''']
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="</s>" , _SCREAMING_SNAKE_CASE="<unk>" , _SCREAMING_SNAKE_CASE="<pad>" , _SCREAMING_SNAKE_CASE=100 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE=True , **_SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
if extra_ids > 0 and additional_special_tokens is None:
SCREAMING_SNAKE_CASE_ : List[str] = [f"<extra_id_{i}>" for i in range(_SCREAMING_SNAKE_CASE )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
SCREAMING_SNAKE_CASE_ : Dict = len(set(filter(lambda _SCREAMING_SNAKE_CASE : bool('extra_id' in str(_SCREAMING_SNAKE_CASE ) ) , _SCREAMING_SNAKE_CASE ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f"Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"
' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'
' tokens' )
if legacy:
logger.warning_once(
f"You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to"
' read the related pull request available at https://github.com/huggingface/transformers/pull/24565' )
SCREAMING_SNAKE_CASE_ : int = legacy
SCREAMING_SNAKE_CASE_ : int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , extra_ids=_SCREAMING_SNAKE_CASE , additional_special_tokens=_SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , legacy=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
SCREAMING_SNAKE_CASE_ : Any = vocab_file
SCREAMING_SNAKE_CASE_ : Optional[Any] = extra_ids
SCREAMING_SNAKE_CASE_ : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_SCREAMING_SNAKE_CASE )
@staticmethod
def UpperCAmelCase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
SCREAMING_SNAKE_CASE_ : int = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'This tokenizer was incorrectly instantiated with a model max length of'
f" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this"
' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'
' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'
f" {pretrained_model_name_or_path} automatically truncating your input to"
f" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences"
f" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with"
' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'
' instantiate this tokenizer with `model_max_length` set to your preferred value.' , _SCREAMING_SNAKE_CASE , )
return max_model_length
@property
def UpperCAmelCase ( self ):
"""simple docstring"""
return self.sp_model.get_piece_size() + self._extra_ids
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = {self.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_SCREAMING_SNAKE_CASE , token_ids_a=_SCREAMING_SNAKE_CASE , already_has_special_tokens=_SCREAMING_SNAKE_CASE )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(_SCREAMING_SNAKE_CASE )) + [1]
return ([0] * len(_SCREAMING_SNAKE_CASE )) + [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1]
def UpperCAmelCase ( self ):
"""simple docstring"""
return list(
set(filter(lambda _SCREAMING_SNAKE_CASE : bool(re.search(r'<extra_id_\d+>' , _SCREAMING_SNAKE_CASE ) ) is not None , self.additional_special_tokens ) ) )
def UpperCAmelCase ( self ):
"""simple docstring"""
return [self._convert_token_to_id(_SCREAMING_SNAKE_CASE ) for token in self.get_sentinel_tokens()]
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if len(_SCREAMING_SNAKE_CASE ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f"This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated"
' eos tokens being added.' )
return token_ids
else:
return token_ids + [self.eos_token_id]
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = self._add_eos_if_not_present(_SCREAMING_SNAKE_CASE )
if token_ids_a is None:
return token_ids_a
else:
SCREAMING_SNAKE_CASE_ : int = self._add_eos_if_not_present(_SCREAMING_SNAKE_CASE )
return token_ids_a + token_ids_a
def __getstate__( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = self.__dict__.copy()
SCREAMING_SNAKE_CASE_ : Optional[Any] = None
return state
def __setstate__( self , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
SCREAMING_SNAKE_CASE_ : str = {}
SCREAMING_SNAKE_CASE_ : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if not self.legacy:
SCREAMING_SNAKE_CASE_ : Dict = SPIECE_UNDERLINE + text.replace(_SCREAMING_SNAKE_CASE , ' ' )
return super().tokenize(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if not self.legacy:
SCREAMING_SNAKE_CASE_ : List[str] = text.startswith(_SCREAMING_SNAKE_CASE )
if is_first:
SCREAMING_SNAKE_CASE_ : Optional[int] = text[1:]
SCREAMING_SNAKE_CASE_ : Tuple = self.sp_model.encode(_SCREAMING_SNAKE_CASE , out_type=_SCREAMING_SNAKE_CASE )
if not self.legacy and not is_first and not text.startswith(' ' ) and tokens[0].startswith(_SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ : Any = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:]
return tokens
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if token.startswith('<extra_id_' ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = re.match(r'<extra_id_(\d+)>' , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Dict = int(match.group(1 ) )
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(_SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if index < self.sp_model.get_piece_size():
SCREAMING_SNAKE_CASE_ : str = self.sp_model.IdToPiece(_SCREAMING_SNAKE_CASE )
else:
SCREAMING_SNAKE_CASE_ : List[Any] = f"<extra_id_{self.vocab_size - 1 - index}>"
return token
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = []
SCREAMING_SNAKE_CASE_ : str = ''
SCREAMING_SNAKE_CASE_ : Dict = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_SCREAMING_SNAKE_CASE ) + token
SCREAMING_SNAKE_CASE_ : int = True
SCREAMING_SNAKE_CASE_ : Dict = []
else:
current_sub_tokens.append(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Dict = False
out_string += self.sp_model.decode(_SCREAMING_SNAKE_CASE )
return out_string.strip()
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
SCREAMING_SNAKE_CASE_ : Dict = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(_SCREAMING_SNAKE_CASE , 'wb' ) as fi:
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(_SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 511 | 0 |
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class __magic_name__ :
def __init__( self : Optional[int] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : str=None , lowerCAmelCase__ : List[str]=None , lowerCAmelCase__ : Optional[int]=None , lowerCAmelCase__ : Union[str, Any]="resnet50" , lowerCAmelCase__ : Union[str, Any]=3 , lowerCAmelCase__ : Any=3_2 , lowerCAmelCase__ : str=3 , lowerCAmelCase__ : Dict=True , lowerCAmelCase__ : Dict=True , ) -> List[str]:
UpperCAmelCase = parent
UpperCAmelCase = out_indices if out_indices is not None else [4]
UpperCAmelCase = stage_names
UpperCAmelCase = out_features
UpperCAmelCase = backbone
UpperCAmelCase = batch_size
UpperCAmelCase = image_size
UpperCAmelCase = num_channels
UpperCAmelCase = use_pretrained_backbone
UpperCAmelCase = is_training
def _UpperCamelCase ( self : Union[str, Any] ) -> Union[str, Any]:
UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase = self.get_config()
return config, pixel_values
def _UpperCamelCase ( self : Tuple ) -> str:
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def _UpperCamelCase ( self : Dict , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Any ) -> Any:
UpperCAmelCase = TimmBackbone(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
UpperCAmelCase = model(lowerCAmelCase__ )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 1_4, 1_4) , )
def _UpperCamelCase ( self : List[str] ) -> int:
UpperCAmelCase = self.prepare_config_and_inputs()
UpperCAmelCase = config_and_inputs
UpperCAmelCase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class __magic_name__ ( lowercase__ , lowercase__ , lowercase__ , unittest.TestCase ):
UpperCAmelCase = (TimmBackbone,) if is_torch_available() else ()
UpperCAmelCase = {"""feature-extraction""": TimmBackbone} if is_torch_available() else {}
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
def _UpperCamelCase ( self : List[str] ) -> Any:
UpperCAmelCase = TimmBackboneModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ )
def _UpperCamelCase ( self : Any ) -> Optional[int]:
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _UpperCamelCase ( self : List[Any] ) -> List[Any]:
UpperCAmelCase = "resnet18"
UpperCAmelCase = "microsoft/resnet-18"
UpperCAmelCase = AutoBackbone.from_pretrained(lowerCAmelCase__ , use_timm_backbone=lowerCAmelCase__ )
UpperCAmelCase = AutoBackbone.from_pretrained(lowerCAmelCase__ )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
UpperCAmelCase = AutoBackbone.from_pretrained(lowerCAmelCase__ , use_timm_backbone=lowerCAmelCase__ , out_indices=[1, 2, 3] )
UpperCAmelCase = AutoBackbone.from_pretrained(lowerCAmelCase__ , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip("TimmBackbone doesn't support feed forward chunking" )
def _UpperCamelCase ( self : Dict ) -> Optional[int]:
pass
@unittest.skip("TimmBackbone doesn't have num_hidden_layers attribute" )
def _UpperCamelCase ( self : Optional[Any] ) -> List[Any]:
pass
@unittest.skip("TimmBackbone initialization is managed on the timm side" )
def _UpperCamelCase ( self : Tuple ) -> Optional[Any]:
pass
@unittest.skip("TimmBackbone models doesn't have inputs_embeds" )
def _UpperCamelCase ( self : Dict ) -> Optional[int]:
pass
@unittest.skip("TimmBackbone models doesn't have inputs_embeds" )
def _UpperCamelCase ( self : Dict ) -> str:
pass
@unittest.skip("TimmBackbone model cannot be created without specifying a backbone checkpoint" )
def _UpperCamelCase ( self : List[str] ) -> Any:
pass
@unittest.skip("Only checkpoints on timm can be loaded into TimmBackbone" )
def _UpperCamelCase ( self : Optional[int] ) -> int:
pass
@unittest.skip("model weights aren't tied in TimmBackbone." )
def _UpperCamelCase ( self : List[str] ) -> List[str]:
pass
@unittest.skip("model weights aren't tied in TimmBackbone." )
def _UpperCamelCase ( self : Optional[int] ) -> Union[str, Any]:
pass
@unittest.skip("Only checkpoints on timm can be loaded into TimmBackbone" )
def _UpperCamelCase ( self : int ) -> List[Any]:
pass
@unittest.skip("Only checkpoints on timm can be loaded into TimmBackbone" )
def _UpperCamelCase ( self : List[Any] ) -> Optional[int]:
pass
@unittest.skip("TimmBackbone doesn't have hidden size info in its configuration." )
def _UpperCamelCase ( self : Optional[Any] ) -> Tuple:
pass
@unittest.skip("TimmBackbone doesn't support output_attentions." )
def _UpperCamelCase ( self : int ) -> Tuple:
pass
@unittest.skip("Safetensors is not supported by timm." )
def _UpperCamelCase ( self : int ) -> List[Any]:
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _UpperCamelCase ( self : int ) -> Tuple:
pass
def _UpperCamelCase ( self : Dict ) -> Any:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(lowerCAmelCase__ )
UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase = [*signature.parameters.keys()]
UpperCAmelCase = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCAmelCase__ )
def _UpperCamelCase ( self : Optional[int] ) -> Union[str, Any]:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase = True
UpperCAmelCase = self.has_attentions
# no need to test all models as different heads yield the same functionality
UpperCAmelCase = self.all_model_classes[0]
UpperCAmelCase = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
UpperCAmelCase = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase = model(**lowerCAmelCase__ )
UpperCAmelCase = outputs[0][-1]
# Encoder-/Decoder-only models
UpperCAmelCase = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
UpperCAmelCase = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=lowerCAmelCase__ )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def _UpperCamelCase ( self : Optional[int] ) -> List[str]:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
UpperCAmelCase = model(**lowerCAmelCase__ )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
UpperCAmelCase = copy.deepcopy(lowerCAmelCase__ )
UpperCAmelCase = None
UpperCAmelCase = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
UpperCAmelCase = model(**lowerCAmelCase__ )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
UpperCAmelCase = copy.deepcopy(lowerCAmelCase__ )
UpperCAmelCase = False
UpperCAmelCase = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
UpperCAmelCase = model(**lowerCAmelCase__ )
| 703 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"unc-nlp/lxmert-base-uncased": "https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json",
}
class __magic_name__ ( _snake_case ):
UpperCAmelCase = """lxmert"""
UpperCAmelCase = {}
def __init__( self : int , lowerCAmelCase__ : Any=3_0_5_2_2 , lowerCAmelCase__ : List[str]=7_6_8 , lowerCAmelCase__ : Union[str, Any]=1_2 , lowerCAmelCase__ : List[Any]=9_5_0_0 , lowerCAmelCase__ : Any=1_6_0_0 , lowerCAmelCase__ : Union[str, Any]=4_0_0 , lowerCAmelCase__ : Tuple=3_0_7_2 , lowerCAmelCase__ : Dict="gelu" , lowerCAmelCase__ : Tuple=0.1 , lowerCAmelCase__ : Tuple=0.1 , lowerCAmelCase__ : int=5_1_2 , lowerCAmelCase__ : List[str]=2 , lowerCAmelCase__ : List[str]=0.02 , lowerCAmelCase__ : str=1e-1_2 , lowerCAmelCase__ : str=9 , lowerCAmelCase__ : int=5 , lowerCAmelCase__ : Optional[int]=5 , lowerCAmelCase__ : List[Any]=2_0_4_8 , lowerCAmelCase__ : Any=4 , lowerCAmelCase__ : Dict=6.67 , lowerCAmelCase__ : Any=True , lowerCAmelCase__ : Union[str, Any]=True , lowerCAmelCase__ : Any=True , lowerCAmelCase__ : Tuple=True , lowerCAmelCase__ : Optional[Any]=True , lowerCAmelCase__ : Optional[int]=True , lowerCAmelCase__ : Tuple=True , **lowerCAmelCase__ : List[Any] , ) -> Dict:
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_attention_heads
UpperCAmelCase = hidden_act
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = initializer_range
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = num_qa_labels
UpperCAmelCase = num_object_labels
UpperCAmelCase = num_attr_labels
UpperCAmelCase = l_layers
UpperCAmelCase = x_layers
UpperCAmelCase = r_layers
UpperCAmelCase = visual_feat_dim
UpperCAmelCase = visual_pos_dim
UpperCAmelCase = visual_loss_normalizer
UpperCAmelCase = task_matched
UpperCAmelCase = task_mask_lm
UpperCAmelCase = task_obj_predict
UpperCAmelCase = task_qa
UpperCAmelCase = visual_obj_loss
UpperCAmelCase = visual_attr_loss
UpperCAmelCase = visual_feat_loss
UpperCAmelCase = {"vision": r_layers, "cross_encoder": x_layers, "language": l_layers}
super().__init__(**lowerCAmelCase__ )
| 1 | 0 |
"""simple docstring"""
import numpy as np
def UpperCAmelCase ( _lowercase : np.ndarray ) -> np.ndarray:
"""simple docstring"""
return 1 / (1 + np.exp(-vector ))
def UpperCAmelCase ( _lowercase : np.ndarray ) -> np.ndarray:
"""simple docstring"""
return vector * sigmoid(_lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod() | 552 |
"""simple docstring"""
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class __a ( __snake_case , __snake_case , __snake_case ):
lowerCamelCase : Optional[int] =[R'h\.\d+\.attn\.bias', R'h\.\d+\.attn\.masked_bias']
@register_to_config
def __init__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = 5_0257 , UpperCAmelCase = 1024 , UpperCAmelCase = 768 , UpperCAmelCase = 12 , UpperCAmelCase = 12 , UpperCAmelCase = None , UpperCAmelCase = "gelu_new" , UpperCAmelCase = 0.1 , UpperCAmelCase = 0.1 , UpperCAmelCase = 0.1 , UpperCAmelCase = 1E-5 , UpperCAmelCase = 0.0_2 , UpperCAmelCase = True , UpperCAmelCase = True , UpperCAmelCase = False , UpperCAmelCase = False , ):
'''simple docstring'''
super().__init__()
lowerCAmelCase_ = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
F"""`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and"""
F""" `n_embd`: {n_embd} are not equal.""" )
lowerCAmelCase_ = prefix_inner_dim
lowerCAmelCase_ = prefix_hidden_dim
lowerCAmelCase_ = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
lowerCAmelCase_ = (
nn.Linear(self.prefix_hidden_dim , UpperCAmelCase ) if self.prefix_hidden_dim is not None else nn.Identity()
)
lowerCAmelCase_ = GPTaConfig(
vocab_size=UpperCAmelCase , n_positions=UpperCAmelCase , n_embd=UpperCAmelCase , n_layer=UpperCAmelCase , n_head=UpperCAmelCase , n_inner=UpperCAmelCase , activation_function=UpperCAmelCase , resid_pdrop=UpperCAmelCase , embd_pdrop=UpperCAmelCase , attn_pdrop=UpperCAmelCase , layer_norm_epsilon=UpperCAmelCase , initializer_range=UpperCAmelCase , scale_attn_weights=UpperCAmelCase , use_cache=UpperCAmelCase , scale_attn_by_inverse_layer_idx=UpperCAmelCase , reorder_and_upcast_attn=UpperCAmelCase , )
lowerCAmelCase_ = GPTaLMHeadModel(UpperCAmelCase )
def lowerCamelCase_ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None , ):
'''simple docstring'''
lowerCAmelCase_ = self.transformer.transformer.wte(UpperCAmelCase )
lowerCAmelCase_ = self.encode_prefix(UpperCAmelCase )
lowerCAmelCase_ = self.decode_prefix(UpperCAmelCase )
lowerCAmelCase_ = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
lowerCAmelCase_ = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
lowerCAmelCase_ = torch.cat((dummy_token, input_ids) , dim=1 )
lowerCAmelCase_ = self.transformer(inputs_embeds=UpperCAmelCase , labels=UpperCAmelCase , attention_mask=UpperCAmelCase )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def lowerCamelCase_ ( self , UpperCAmelCase , UpperCAmelCase ):
'''simple docstring'''
return torch.zeros(UpperCAmelCase , self.prefix_length , dtype=torch.intaa , device=UpperCAmelCase )
def lowerCamelCase_ ( self , UpperCAmelCase ):
'''simple docstring'''
return self.encode_prefix(UpperCAmelCase )
@torch.no_grad()
def lowerCamelCase_ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = torch.split(UpperCAmelCase , 1 , dim=0 )
lowerCAmelCase_ = []
lowerCAmelCase_ = []
for feature in features:
lowerCAmelCase_ = self.decode_prefix(feature.to(UpperCAmelCase ) ) # back to the clip feature
# Only support beam search for now
lowerCAmelCase_ , lowerCAmelCase_ = self.generate_beam(
input_embeds=UpperCAmelCase , device=UpperCAmelCase , eos_token_id=UpperCAmelCase )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
lowerCAmelCase_ = torch.stack(UpperCAmelCase )
lowerCAmelCase_ = torch.stack(UpperCAmelCase )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def lowerCamelCase_ ( self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase = 5 , UpperCAmelCase = 67 , UpperCAmelCase = 1.0 , UpperCAmelCase = None , ):
'''simple docstring'''
lowerCAmelCase_ = eos_token_id
lowerCAmelCase_ = None
lowerCAmelCase_ = None
lowerCAmelCase_ = torch.ones(UpperCAmelCase , device=UpperCAmelCase , dtype=torch.int )
lowerCAmelCase_ = torch.zeros(UpperCAmelCase , device=UpperCAmelCase , dtype=torch.bool )
if input_embeds is not None:
lowerCAmelCase_ = input_embeds
else:
lowerCAmelCase_ = self.transformer.transformer.wte(UpperCAmelCase )
for i in range(UpperCAmelCase ):
lowerCAmelCase_ = self.transformer(inputs_embeds=UpperCAmelCase )
lowerCAmelCase_ = outputs.logits
lowerCAmelCase_ = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
lowerCAmelCase_ = logits.softmax(-1 ).log()
if scores is None:
lowerCAmelCase_ , lowerCAmelCase_ = logits.topk(UpperCAmelCase , -1 )
lowerCAmelCase_ = generated.expand(UpperCAmelCase , *generated.shape[1:] )
lowerCAmelCase_ , lowerCAmelCase_ = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
lowerCAmelCase_ = next_tokens
else:
lowerCAmelCase_ = tokens.expand(UpperCAmelCase , *tokens.shape[1:] )
lowerCAmelCase_ = torch.cat((tokens, next_tokens) , dim=1 )
else:
lowerCAmelCase_ = -float(np.inf )
lowerCAmelCase_ = 0
lowerCAmelCase_ = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
lowerCAmelCase_ = scores_sum / seq_lengths[:, None]
lowerCAmelCase_ , lowerCAmelCase_ = scores_sum_average.view(-1 ).topk(UpperCAmelCase , -1 )
lowerCAmelCase_ = next_tokens // scores_sum.shape[1]
lowerCAmelCase_ = seq_lengths[next_tokens_source]
lowerCAmelCase_ = next_tokens % scores_sum.shape[1]
lowerCAmelCase_ = next_tokens.unsqueeze(1 )
lowerCAmelCase_ = tokens[next_tokens_source]
lowerCAmelCase_ = torch.cat((tokens, next_tokens) , dim=1 )
lowerCAmelCase_ = generated[next_tokens_source]
lowerCAmelCase_ = scores_sum_average * seq_lengths
lowerCAmelCase_ = is_stopped[next_tokens_source]
lowerCAmelCase_ = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
lowerCAmelCase_ = torch.cat((generated, next_token_embed) , dim=1 )
lowerCAmelCase_ = is_stopped + next_tokens.eq(UpperCAmelCase ).squeeze()
if is_stopped.all():
break
lowerCAmelCase_ = scores / seq_lengths
lowerCAmelCase_ = scores.argsort(descending=UpperCAmelCase )
# tokens tensors are already padded to max_seq_length
lowerCAmelCase_ = [tokens[i] for i in order]
lowerCAmelCase_ = torch.stack(UpperCAmelCase , dim=0 )
lowerCAmelCase_ = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths | 552 | 1 |
"""simple docstring"""
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
lowerCamelCase : str =logging.get_logger(__name__)
class __snake_case( A_ ):
'''simple docstring'''
def _a ( self , __lowerCamelCase ):
'''simple docstring'''
if isinstance(__lowerCamelCase , __lowerCamelCase ):
__A : List[Any] = [label.strip() for label in labels.split(',' ) if label.strip()]
return labels
def __call__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
'''simple docstring'''
if len(__lowerCamelCase ) == 0 or len(__lowerCamelCase ) == 0:
raise ValueError('You must include at least one label and at least one sequence.' )
if hypothesis_template.format(labels[0] ) == hypothesis_template:
raise ValueError(
(
'The provided hypothesis_template "{}" was not able to be formatted with the target labels. '
'Make sure the passed template includes formatting syntax such as {{}} where the label should go.'
).format(__lowerCamelCase ) )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
__A : List[str] = [sequences]
__A : List[str] = []
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(__lowerCamelCase )] for label in labels] )
return sequence_pairs, sequences
@add_end_docstrings(A_ )
class __snake_case( A_ ):
'''simple docstring'''
def __init__( self , __lowerCamelCase=ZeroShotClassificationArgumentHandler() , *__lowerCamelCase , **__lowerCamelCase ):
'''simple docstring'''
__A : Dict = args_parser
super().__init__(*__lowerCamelCase , **__lowerCamelCase )
if self.entailment_id == -1:
logger.warning(
'Failed to determine \'entailment\' label id from the label2id mapping in the model config. Setting to '
'-1. Define a descriptive label2id mapping in the model config to ensure correct outputs.' )
@property
def _a ( self ):
'''simple docstring'''
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith('entail' ):
return ind
return -1
def _a ( self , __lowerCamelCase , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=TruncationStrategy.ONLY_FIRST , **__lowerCamelCase ):
'''simple docstring'''
__A : str = self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
'Tokenizer was not supporting padding necessary for zero-shot, attempting to use '
' `pad_token=eos_token`' )
__A : Any = self.tokenizer.eos_token
try:
__A : int = self.tokenizer(
__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_tensors=__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , )
except Exception as e:
if "too short" in str(__lowerCamelCase ):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
__A : Optional[int] = self.tokenizer(
__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_tensors=__lowerCamelCase , padding=__lowerCamelCase , truncation=TruncationStrategy.DO_NOT_TRUNCATE , )
else:
raise e
return inputs
def _a ( self , **__lowerCamelCase ):
'''simple docstring'''
if kwargs.get('multi_class' , __lowerCamelCase ) is not None:
__A : int = kwargs['multi_class']
logger.warning(
'The `multi_class` argument has been deprecated and renamed to `multi_label`. '
'`multi_class` will be removed in a future version of Transformers.' )
__A : List[Any] = {}
if "candidate_labels" in kwargs:
__A : Any = self._args_parser._parse_labels(kwargs['candidate_labels'] )
if "hypothesis_template" in kwargs:
__A : Any = kwargs['hypothesis_template']
__A : str = {}
if "multi_label" in kwargs:
__A : Dict = kwargs['multi_label']
return preprocess_params, {}, postprocess_params
def __call__( self , __lowerCamelCase , *__lowerCamelCase , **__lowerCamelCase , ):
'''simple docstring'''
if len(__lowerCamelCase ) == 0:
pass
elif len(__lowerCamelCase ) == 1 and "candidate_labels" not in kwargs:
__A : str = args[0]
else:
raise ValueError(F'Unable to understand extra arguments {args}' )
return super().__call__(__lowerCamelCase , **__lowerCamelCase )
def _a ( self , __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase="This example is {}." ):
'''simple docstring'''
__A , __A : Optional[int] = self._args_parser(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
for i, (candidate_label, sequence_pair) in enumerate(zip(__lowerCamelCase , __lowerCamelCase ) ):
__A : Tuple = self._parse_and_tokenize([sequence_pair] )
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(__lowerCamelCase ) - 1,
**model_input,
}
def _a ( self , __lowerCamelCase ):
'''simple docstring'''
__A : List[Any] = inputs['candidate_label']
__A : Optional[Any] = inputs['sequence']
__A : int = {k: inputs[k] for k in self.tokenizer.model_input_names}
__A : Tuple = self.model(**__lowerCamelCase )
__A : Tuple = {
'candidate_label': candidate_label,
'sequence': sequence,
'is_last': inputs['is_last'],
**outputs,
}
return model_outputs
def _a ( self , __lowerCamelCase , __lowerCamelCase=False ):
'''simple docstring'''
__A : List[str] = [outputs['candidate_label'] for outputs in model_outputs]
__A : Any = [outputs['sequence'] for outputs in model_outputs]
__A : Optional[Any] = np.concatenate([output['logits'].numpy() for output in model_outputs] )
__A : str = logits.shape[0]
__A : Optional[int] = len(__lowerCamelCase )
__A : Dict = N // n
__A : Dict = logits.reshape((num_sequences, n, -1) )
if multi_label or len(__lowerCamelCase ) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
__A : List[Any] = self.entailment_id
__A : int = -1 if entailment_id == 0 else 0
__A : Any = reshaped_outputs[..., [contradiction_id, entailment_id]]
__A : str = np.exp(__lowerCamelCase ) / np.exp(__lowerCamelCase ).sum(-1 , keepdims=__lowerCamelCase )
__A : Any = scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
__A : Optional[Any] = reshaped_outputs[..., self.entailment_id]
__A : Tuple = np.exp(__lowerCamelCase ) / np.exp(__lowerCamelCase ).sum(-1 , keepdims=__lowerCamelCase )
__A : Any = list(reversed(scores[0].argsort() ) )
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
}
| 237 | """simple docstring"""
from __future__ import annotations
from random import random
from typing import Generic, TypeVar
lowerCamelCase : Tuple =TypeVar('''KT''')
lowerCamelCase : Dict =TypeVar('''VT''')
class __snake_case( Generic[KT, VT] ):
'''simple docstring'''
def __init__( self , __lowerCamelCase = "root" , __lowerCamelCase = None ):
'''simple docstring'''
__A : str = key
__A : int = value
__A : list[Node[KT, VT]] = []
def __repr__( self ):
'''simple docstring'''
return F'Node({self.key}: {self.value})'
@property
def _a ( self ):
'''simple docstring'''
return len(self.forward )
class __snake_case( Generic[KT, VT] ):
'''simple docstring'''
def __init__( self , __lowerCamelCase = 0.5 , __lowerCamelCase = 16 ):
'''simple docstring'''
__A : Node[KT, VT] = Node[KT, VT]()
__A : str = 0
__A : Tuple = p
__A : Union[str, Any] = max_level
def __str__( self ):
'''simple docstring'''
__A : List[Any] = list(self )
if len(__lowerCamelCase ) == 0:
return F'SkipList(level={self.level})'
__A : Any = max((len(str(__lowerCamelCase ) ) for item in items) , default=4 )
__A : int = max(__lowerCamelCase , 4 ) + 4
__A : Optional[int] = self.head
__A : Union[str, Any] = []
__A : int = node.forward.copy()
lines.append(F'[{node.key}]'.ljust(__lowerCamelCase , '-' ) + '* ' * len(__lowerCamelCase ) )
lines.append(' ' * label_size + '| ' * len(__lowerCamelCase ) )
while len(node.forward ) != 0:
__A : List[Any] = node.forward[0]
lines.append(
F'[{node.key}]'.ljust(__lowerCamelCase , '-' )
+ ' '.join(str(n.key ) if n.key == node.key else '|' for n in forwards ) )
lines.append(' ' * label_size + '| ' * len(__lowerCamelCase ) )
__A : Tuple = node.forward
lines.append('None'.ljust(__lowerCamelCase ) + '* ' * len(__lowerCamelCase ) )
return F'SkipList(level={self.level})\n' + "\n".join(__lowerCamelCase )
def __iter__( self ):
'''simple docstring'''
__A : Dict = self.head
while len(node.forward ) != 0:
yield node.forward[0].key
__A : Any = node.forward[0]
def _a ( self ):
'''simple docstring'''
__A : Tuple = 1
while random() < self.p and level < self.max_level:
level += 1
return level
def _a ( self , __lowerCamelCase ):
'''simple docstring'''
__A : Dict = []
__A : List[str] = self.head
for i in reversed(range(self.level ) ):
# i < node.level - When node level is lesser than `i` decrement `i`.
# node.forward[i].key < key - Jumping to node with key value higher
# or equal to searched key would result
# in skipping searched key.
while i < node.level and node.forward[i].key < key:
__A : Any = node.forward[i]
# Each leftmost node (relative to searched node) will potentially have to
# be updated.
update_vector.append(__lowerCamelCase )
update_vector.reverse() # Note that we were inserting values in reverse order.
# len(node.forward) != 0 - If current node doesn't contain any further
# references then searched key is not present.
# node.forward[0].key == key - Next node key should be equal to search key
# if key is present.
if len(node.forward ) != 0 and node.forward[0].key == key:
return node.forward[0], update_vector
else:
return None, update_vector
def _a ( self , __lowerCamelCase ):
'''simple docstring'''
__A , __A : Optional[int] = self._locate_node(__lowerCamelCase )
if node is not None:
for i, update_node in enumerate(__lowerCamelCase ):
# Remove or replace all references to removed node.
if update_node.level > i and update_node.forward[i].key == key:
if node.level > i:
__A : Any = node.forward[i]
else:
__A : int = update_node.forward[:i]
def _a ( self , __lowerCamelCase , __lowerCamelCase ):
'''simple docstring'''
__A , __A : Any = self._locate_node(__lowerCamelCase )
if node is not None:
__A : Optional[Any] = value
else:
__A : str = self.random_level()
if level > self.level:
# After level increase we have to add additional nodes to head.
for _ in range(self.level - 1 , __lowerCamelCase ):
update_vector.append(self.head )
__A : Union[str, Any] = level
__A : str = Node(__lowerCamelCase , __lowerCamelCase )
for i, update_node in enumerate(update_vector[:level] ):
# Change references to pass through new node.
if update_node.level > i:
new_node.forward.append(update_node.forward[i] )
if update_node.level < i + 1:
update_node.forward.append(__lowerCamelCase )
else:
__A : str = new_node
def _a ( self , __lowerCamelCase ):
'''simple docstring'''
__A , __A : str = self._locate_node(__lowerCamelCase )
if node is not None:
return node.value
return None
def _lowercase ( ) -> Any:
'''simple docstring'''
__A : int = SkipList()
skip_list.insert('Key1' , 3 )
skip_list.insert('Key2' , 12 )
skip_list.insert('Key3' , 41 )
skip_list.insert('Key4' , -19 )
__A : List[Any] = skip_list.head
__A : str = {}
while node.level != 0:
__A : Optional[Any] = node.forward[0]
__A : Optional[Any] = node.value
assert len(_SCREAMING_SNAKE_CASE ) == 4
assert all_values["Key1"] == 3
assert all_values["Key2"] == 12
assert all_values["Key3"] == 41
assert all_values["Key4"] == -19
def _lowercase ( ) -> Any:
'''simple docstring'''
__A : Tuple = SkipList()
skip_list.insert('Key1' , 10 )
skip_list.insert('Key1' , 12 )
skip_list.insert('Key5' , 7 )
skip_list.insert('Key7' , 10 )
skip_list.insert('Key10' , 5 )
skip_list.insert('Key7' , 7 )
skip_list.insert('Key5' , 5 )
skip_list.insert('Key10' , 10 )
__A : Union[str, Any] = skip_list.head
__A : Optional[Any] = {}
while node.level != 0:
__A : str = node.forward[0]
__A : Tuple = node.value
if len(_SCREAMING_SNAKE_CASE ) != 4:
print()
assert len(_SCREAMING_SNAKE_CASE ) == 4
assert all_values["Key1"] == 12
assert all_values["Key7"] == 7
assert all_values["Key5"] == 5
assert all_values["Key10"] == 10
def _lowercase ( ) -> int:
'''simple docstring'''
__A : Optional[Any] = SkipList()
assert skip_list.find('Some key' ) is None
def _lowercase ( ) -> Any:
'''simple docstring'''
__A : List[str] = SkipList()
skip_list.insert('Key2' , 20 )
assert skip_list.find('Key2' ) == 20
skip_list.insert('Some Key' , 10 )
skip_list.insert('Key2' , 8 )
skip_list.insert('V' , 13 )
assert skip_list.find('Y' ) is None
assert skip_list.find('Key2' ) == 8
assert skip_list.find('Some Key' ) == 10
assert skip_list.find('V' ) == 13
def _lowercase ( ) -> str:
'''simple docstring'''
__A : int = SkipList()
skip_list.delete('Some key' )
assert len(skip_list.head.forward ) == 0
def _lowercase ( ) -> Dict:
'''simple docstring'''
__A : Union[str, Any] = SkipList()
skip_list.insert('Key1' , 12 )
skip_list.insert('V' , 13 )
skip_list.insert('X' , 14 )
skip_list.insert('Key2' , 15 )
skip_list.delete('V' )
skip_list.delete('Key2' )
assert skip_list.find('V' ) is None
assert skip_list.find('Key2' ) is None
def _lowercase ( ) -> Dict:
'''simple docstring'''
__A : Dict = SkipList()
skip_list.insert('Key1' , 12 )
skip_list.insert('V' , 13 )
skip_list.insert('X' , 14 )
skip_list.insert('Key2' , 15 )
skip_list.delete('V' )
assert skip_list.find('V' ) is None
assert skip_list.find('X' ) == 14
assert skip_list.find('Key1' ) == 12
assert skip_list.find('Key2' ) == 15
skip_list.delete('X' )
assert skip_list.find('V' ) is None
assert skip_list.find('X' ) is None
assert skip_list.find('Key1' ) == 12
assert skip_list.find('Key2' ) == 15
skip_list.delete('Key1' )
assert skip_list.find('V' ) is None
assert skip_list.find('X' ) is None
assert skip_list.find('Key1' ) is None
assert skip_list.find('Key2' ) == 15
skip_list.delete('Key2' )
assert skip_list.find('V' ) is None
assert skip_list.find('X' ) is None
assert skip_list.find('Key1' ) is None
assert skip_list.find('Key2' ) is None
def _lowercase ( ) -> Union[str, Any]:
'''simple docstring'''
__A : int = SkipList()
skip_list.insert('Key1' , 12 )
skip_list.insert('V' , 13 )
skip_list.insert('X' , 142 )
skip_list.insert('Key2' , 15 )
skip_list.delete('X' )
def traverse_keys(_SCREAMING_SNAKE_CASE : Union[str, Any] ):
yield node.key
for forward_node in node.forward:
yield from traverse_keys(_SCREAMING_SNAKE_CASE )
assert len(set(traverse_keys(skip_list.head ) ) ) == 4
def _lowercase ( ) -> Tuple:
'''simple docstring'''
def is_sorted(_SCREAMING_SNAKE_CASE : Optional[int] ):
return all(next_item >= item for item, next_item in zip(_SCREAMING_SNAKE_CASE , lst[1:] ) )
__A : Tuple = SkipList()
for i in range(10 ):
skip_list.insert(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
assert is_sorted(list(_SCREAMING_SNAKE_CASE ) )
skip_list.delete(5 )
skip_list.delete(8 )
skip_list.delete(2 )
assert is_sorted(list(_SCREAMING_SNAKE_CASE ) )
skip_list.insert(-12 , -12 )
skip_list.insert(77 , 77 )
assert is_sorted(list(_SCREAMING_SNAKE_CASE ) )
def _lowercase ( ) -> Optional[int]:
'''simple docstring'''
for _ in range(100 ):
# Repeat test 100 times due to the probabilistic nature of skip list
# random values == random bugs
test_insert()
test_insert_overrides_existing_value()
test_searching_empty_list_returns_none()
test_search()
test_deleting_item_from_empty_list_do_nothing()
test_deleted_items_are_not_founded_by_find_method()
test_delete_removes_only_given_key()
test_delete_doesnt_leave_dead_nodes()
test_iter_always_yields_sorted_values()
def _lowercase ( ) -> Any:
'''simple docstring'''
__A : Optional[int] = SkipList()
skip_list.insert(2 , '2' )
skip_list.insert(4 , '4' )
skip_list.insert(6 , '4' )
skip_list.insert(4 , '5' )
skip_list.insert(8 , '4' )
skip_list.insert(9 , '4' )
skip_list.delete(4 )
print(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 237 | 1 |
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import NezhaConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
)
from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCamelCase :
"""simple docstring"""
def __init__( self : Optional[int] , _lowerCamelCase : List[Any] , _lowerCamelCase : Dict=1_3 , _lowerCamelCase : Dict=7 , _lowerCamelCase : int=True , _lowerCamelCase : List[Any]=True , _lowerCamelCase : Tuple=True , _lowerCamelCase : int=True , _lowerCamelCase : List[str]=9_9 , _lowerCamelCase : Optional[Any]=3_2 , _lowerCamelCase : List[Any]=5 , _lowerCamelCase : List[str]=4 , _lowerCamelCase : Any=3_7 , _lowerCamelCase : Optional[int]="gelu" , _lowerCamelCase : Dict=0.1 , _lowerCamelCase : Dict=0.1 , _lowerCamelCase : str=1_2_8 , _lowerCamelCase : List[Any]=3_2 , _lowerCamelCase : str=1_6 , _lowerCamelCase : Union[str, Any]=2 , _lowerCamelCase : List[Any]=0.02 , _lowerCamelCase : List[Any]=3 , _lowerCamelCase : Optional[int]=4 , _lowerCamelCase : Optional[int]=None , ):
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_input_mask
A__ = use_token_type_ids
A__ = use_labels
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = type_sequence_label_size
A__ = initializer_range
A__ = num_labels
A__ = num_choices
A__ = scope
def A__ ( self : Any ):
A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ = None
if self.use_input_mask:
A__ = random_attention_mask([self.batch_size, self.seq_length] )
A__ = None
if self.use_token_type_ids:
A__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A__ = None
A__ = None
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A__ = ids_tensor([self.batch_size] , self.num_choices )
A__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A__ ( self : Dict ):
return NezhaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCamelCase , initializer_range=self.initializer_range , )
def A__ ( self : Optional[Any] ):
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) = self.prepare_config_and_inputs()
A__ = True
A__ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
A__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def A__ ( self : str , _lowerCamelCase : Optional[int] , _lowerCamelCase : int , _lowerCamelCase : str , _lowerCamelCase : Any , _lowerCamelCase : Dict , _lowerCamelCase : Tuple , _lowerCamelCase : Optional[Any] ):
A__ = NezhaModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A__ = model(_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase )
A__ = model(_lowerCamelCase , token_type_ids=_lowerCamelCase )
A__ = model(_lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def A__ ( self : Union[str, Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Optional[int] , _lowerCamelCase : str , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : List[str] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[Any] , ):
A__ = True
A__ = NezhaModel(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A__ = model(
_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , encoder_hidden_states=_lowerCamelCase , encoder_attention_mask=_lowerCamelCase , )
A__ = model(
_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , encoder_hidden_states=_lowerCamelCase , )
A__ = model(_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def A__ ( self : int , _lowerCamelCase : int , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : int , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Dict ):
A__ = NezhaForMaskedLM(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A__ = model(_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A__ ( self : Optional[Any] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : str , _lowerCamelCase : List[Any] , _lowerCamelCase : List[Any] , _lowerCamelCase : List[str] , _lowerCamelCase : Tuple , _lowerCamelCase : Tuple ):
A__ = NezhaForNextSentencePrediction(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A__ = model(
_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , labels=_lowerCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def A__ ( self : Optional[int] , _lowerCamelCase : int , _lowerCamelCase : Optional[int] , _lowerCamelCase : Dict , _lowerCamelCase : int , _lowerCamelCase : Tuple , _lowerCamelCase : List[Any] , _lowerCamelCase : Any ):
A__ = NezhaForPreTraining(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A__ = model(
_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , labels=_lowerCamelCase , next_sentence_label=_lowerCamelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def A__ ( self : List[str] , _lowerCamelCase : List[Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : List[Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Optional[Any] ):
A__ = NezhaForQuestionAnswering(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A__ = model(
_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , start_positions=_lowerCamelCase , end_positions=_lowerCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A__ ( self : List[str] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Any , _lowerCamelCase : Dict , _lowerCamelCase : Dict , _lowerCamelCase : Any , _lowerCamelCase : Optional[Any] , _lowerCamelCase : List[Any] ):
A__ = self.num_labels
A__ = NezhaForSequenceClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A__ = model(_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A__ ( self : int , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : List[str] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : List[str] , _lowerCamelCase : Union[str, Any] ):
A__ = self.num_labels
A__ = NezhaForTokenClassification(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A__ = model(_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A__ ( self : List[str] , _lowerCamelCase : int , _lowerCamelCase : Dict , _lowerCamelCase : int , _lowerCamelCase : List[Any] , _lowerCamelCase : str , _lowerCamelCase : Any , _lowerCamelCase : Union[str, Any] ):
A__ = self.num_choices
A__ = NezhaForMultipleChoice(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ = model(
_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , labels=_lowerCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A__ ( self : Optional[int] ):
A__ = self.prepare_config_and_inputs()
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) = config_and_inputs
A__ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase ( a , a , a , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase : List[str] =(
(
NezhaModel,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
)
if is_torch_available()
else ()
)
_lowerCamelCase : Optional[int] =(
{
"feature-extraction": NezhaModel,
"fill-mask": NezhaForMaskedLM,
"question-answering": NezhaForQuestionAnswering,
"text-classification": NezhaForSequenceClassification,
"token-classification": NezhaForTokenClassification,
"zero-shot": NezhaForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowerCamelCase : Optional[int] =True
def A__ ( self : Tuple , _lowerCamelCase : str , _lowerCamelCase : List[Any] , _lowerCamelCase : Any=False ):
A__ = super()._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
if return_labels:
if model_class in get_values(_lowerCamelCase ):
A__ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_lowerCamelCase )
A__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_lowerCamelCase )
return inputs_dict
def A__ ( self : List[Any] ):
A__ = NezhaModelTester(self )
A__ = ConfigTester(self , config_class=_lowerCamelCase , hidden_size=3_7 )
def A__ ( self : Optional[Any] ):
self.config_tester.run_common_tests()
def A__ ( self : Optional[int] ):
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def A__ ( self : Dict ):
A__ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*_lowerCamelCase )
def A__ ( self : Dict ):
# This regression test was failing with PyTorch < 1.3
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) = self.model_tester.prepare_config_and_inputs_for_decoder()
A__ = None
self.model_tester.create_and_check_model_as_decoder(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , )
def A__ ( self : str ):
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_lowerCamelCase )
def A__ ( self : Dict ):
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_lowerCamelCase )
def A__ ( self : str ):
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_next_sequence_prediction(*_lowerCamelCase )
def A__ ( self : Tuple ):
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_lowerCamelCase )
def A__ ( self : Union[str, Any] ):
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_lowerCamelCase )
def A__ ( self : List[str] ):
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_lowerCamelCase )
def A__ ( self : List[str] ):
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_lowerCamelCase )
@slow
def A__ ( self : Tuple ):
for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = NezhaModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
@slow
@require_torch_gpu
def A__ ( self : str ):
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# NezhaForMultipleChoice behaves incorrectly in JIT environments.
if model_class == NezhaForMultipleChoice:
return
A__ = True
A__ = model_class(config=_lowerCamelCase )
A__ = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase )
A__ = torch.jit.trace(
_lowerCamelCase , (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(_lowerCamelCase , os.path.join(_lowerCamelCase , '''bert.pt''' ) )
A__ = torch.jit.load(os.path.join(_lowerCamelCase , '''bert.pt''' ) , map_location=_lowerCamelCase )
loaded(inputs_dict['''input_ids'''].to(_lowerCamelCase ) , inputs_dict['''attention_mask'''].to(_lowerCamelCase ) )
@require_torch
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def A__ ( self : List[str] ):
A__ = NezhaModel.from_pretrained('''sijunhe/nezha-cn-base''' )
A__ = torch.tensor([[0, 1, 2, 3, 4, 5]] )
A__ = torch.tensor([[0, 1, 1, 1, 1, 1]] )
with torch.no_grad():
A__ = model(_lowerCamelCase , attention_mask=_lowerCamelCase )[0]
A__ = torch.Size((1, 6, 7_6_8) )
self.assertEqual(output.shape , _lowerCamelCase )
A__ = torch.tensor([[[0.0_685, 0.2_441, 0.1_102], [0.0_600, 0.1_906, 0.1_349], [0.0_221, 0.0_819, 0.0_586]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _lowerCamelCase , atol=1E-4 ) )
@slow
def A__ ( self : Optional[int] ):
A__ = NezhaForMaskedLM.from_pretrained('''sijunhe/nezha-cn-base''' )
A__ = torch.tensor([[0, 1, 2, 3, 4, 5]] )
A__ = torch.tensor([[1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
A__ = model(_lowerCamelCase , attention_mask=_lowerCamelCase )[0]
A__ = torch.Size((1, 6, 2_1_1_2_8) )
self.assertEqual(output.shape , _lowerCamelCase )
A__ = torch.tensor(
[[-2.7_939, -1.7_902, -2.2_189], [-2.8_585, -1.8_908, -2.3_723], [-2.6_499, -1.7_750, -2.2_558]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _lowerCamelCase , atol=1E-4 ) )
| 571 |
"""simple docstring"""
def UpperCAmelCase ( A : int , A : int ):
'''simple docstring'''
return int((input_a, input_a).count(1 ) != 0 )
def UpperCAmelCase ( ):
'''simple docstring'''
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 573 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ..utils import _LazyModule
a_ = {
"config": [
"EXTERNAL_DATA_FORMAT_SIZE_LIMIT",
"OnnxConfig",
"OnnxConfigWithPast",
"OnnxSeq2SeqConfigWithPast",
"PatchingSpec",
],
"convert": ["export", "validate_model_outputs"],
"features": ["FeaturesManager"],
"utils": ["ParameterFormat", "compute_serialized_parameters_size"],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
a_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 718 |
"""simple docstring"""
import numpy as np
def a__ ( __lowercase , __lowercase ) -> np.ndarray:
return np.where(vector > 0 , __lowercase , (alpha * (np.exp(__lowercase ) - 1)) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 621 | 0 |
'''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowercase = 16
lowercase = 32
def __A ( _SCREAMING_SNAKE_CASE : Accelerator , _SCREAMING_SNAKE_CASE : int = 1_6 ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = AutoTokenizer.from_pretrained("bert-base-cased" )
__SCREAMING_SNAKE_CASE : Optional[Any] = load_dataset("glue" , "mrpc" )
def tokenize_function(_SCREAMING_SNAKE_CASE : Tuple ):
# max_length=None => use the model max length (it's actually the default)
__SCREAMING_SNAKE_CASE : int = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__SCREAMING_SNAKE_CASE : List[str] = datasets.map(
_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__SCREAMING_SNAKE_CASE : Optional[Any] = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(_SCREAMING_SNAKE_CASE : Dict ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__SCREAMING_SNAKE_CASE : str = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__SCREAMING_SNAKE_CASE : List[Any] = 1_6
elif accelerator.mixed_precision != "no":
__SCREAMING_SNAKE_CASE : Optional[int] = 8
else:
__SCREAMING_SNAKE_CASE : List[Any] = None
return tokenizer.pad(
_SCREAMING_SNAKE_CASE , padding="longest" , max_length=_SCREAMING_SNAKE_CASE , pad_to_multiple_of=_SCREAMING_SNAKE_CASE , return_tensors="pt" , )
# Instantiate dataloaders.
__SCREAMING_SNAKE_CASE : List[Any] = DataLoader(
tokenized_datasets["train"] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : Tuple = DataLoader(
tokenized_datasets["validation"] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowercase = mocked_dataloaders # noqa: F811
def __A ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
if os.environ.get("TESTING_MOCKED_DATALOADERS" , _SCREAMING_SNAKE_CASE ) == "1":
__SCREAMING_SNAKE_CASE : int = 2
# New Code #
__SCREAMING_SNAKE_CASE : List[Any] = int(args.gradient_accumulation_steps )
__SCREAMING_SNAKE_CASE : List[Any] = int(args.local_sgd_steps )
# Initialize accelerator
__SCREAMING_SNAKE_CASE : List[Any] = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=_SCREAMING_SNAKE_CASE )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError("LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__SCREAMING_SNAKE_CASE : str = config["lr"]
__SCREAMING_SNAKE_CASE : List[Any] = int(config["num_epochs"] )
__SCREAMING_SNAKE_CASE : Optional[int] = int(config["seed"] )
__SCREAMING_SNAKE_CASE : int = int(config["batch_size"] )
__SCREAMING_SNAKE_CASE : Dict = evaluate.load("glue" , "mrpc" )
set_seed(_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[Any] = get_dataloaders(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__SCREAMING_SNAKE_CASE : Tuple = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=_SCREAMING_SNAKE_CASE )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__SCREAMING_SNAKE_CASE : Tuple = model.to(accelerator.device )
# Instantiate optimizer
__SCREAMING_SNAKE_CASE : str = AdamW(params=model.parameters() , lr=_SCREAMING_SNAKE_CASE )
# Instantiate scheduler
__SCREAMING_SNAKE_CASE : List[str] = get_linear_schedule_with_warmup(
optimizer=_SCREAMING_SNAKE_CASE , num_warmup_steps=1_0_0 , num_training_steps=(len(_SCREAMING_SNAKE_CASE ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : str = accelerator.prepare(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Now we train the model
for epoch in range(_SCREAMING_SNAKE_CASE ):
model.train()
with LocalSGD(
accelerator=_SCREAMING_SNAKE_CASE , model=_SCREAMING_SNAKE_CASE , local_sgd_steps=_SCREAMING_SNAKE_CASE , enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(_SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(_SCREAMING_SNAKE_CASE ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = model(**_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : int = output.loss
accelerator.backward(_SCREAMING_SNAKE_CASE )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(_SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__SCREAMING_SNAKE_CASE : List[str] = model(**_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : List[str] = outputs.logits.argmax(dim=-1 )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Dict = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=_SCREAMING_SNAKE_CASE , references=_SCREAMING_SNAKE_CASE , )
__SCREAMING_SNAKE_CASE : Optional[int] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'epoch {epoch}:' , _SCREAMING_SNAKE_CASE )
def __A ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
# New Code #
parser.add_argument(
"--gradient_accumulation_steps" , type=_SCREAMING_SNAKE_CASE , default=1 , help="The number of minibatches to be ran before gradients are accumulated." , )
parser.add_argument(
"--local_sgd_steps" , type=_SCREAMING_SNAKE_CASE , default=8 , help="Number of local SGD steps or None to disable local SGD" )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
__SCREAMING_SNAKE_CASE : int = parser.parse_args()
__SCREAMING_SNAKE_CASE : List[str] = {"lr": 2E-5, "num_epochs": 3, "seed": 4_2, "batch_size": 1_6}
training_function(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 211 |
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def __A ( _SCREAMING_SNAKE_CASE : Tuple ):
"""simple docstring"""
if "img_encoder.pos_embed" in name:
__SCREAMING_SNAKE_CASE : Tuple = name.replace("img_encoder.pos_embed" , "vision_model.embeddings.position_embeddings" )
if "img_encoder.patch_embed.proj" in name:
__SCREAMING_SNAKE_CASE : str = name.replace("img_encoder.patch_embed.proj" , "vision_model.embeddings.patch_embeddings.projection" )
if "img_encoder.patch_embed.norm" in name:
__SCREAMING_SNAKE_CASE : Optional[Any] = name.replace("img_encoder.patch_embed.norm" , "vision_model.embeddings.layernorm" )
if "img_encoder.layers" in name:
__SCREAMING_SNAKE_CASE : List[Any] = name.replace("img_encoder.layers" , "vision_model.encoder.stages" )
if "blocks" in name and "res" not in name:
__SCREAMING_SNAKE_CASE : List[str] = name.replace("blocks" , "layers" )
if "attn" in name and "pre_assign" not in name:
__SCREAMING_SNAKE_CASE : Optional[Any] = name.replace("attn" , "self_attn" )
if "proj" in name and "self_attn" in name and "text" not in name:
__SCREAMING_SNAKE_CASE : List[Any] = name.replace("proj" , "out_proj" )
if "pre_assign_attn.attn.proj" in name:
__SCREAMING_SNAKE_CASE : Tuple = name.replace("pre_assign_attn.attn.proj" , "pre_assign_attn.attn.out_proj" )
if "norm1" in name:
__SCREAMING_SNAKE_CASE : List[str] = name.replace("norm1" , "layer_norm1" )
if "norm2" in name and "pre_assign" not in name:
__SCREAMING_SNAKE_CASE : int = name.replace("norm2" , "layer_norm2" )
if "img_encoder.norm" in name:
__SCREAMING_SNAKE_CASE : int = name.replace("img_encoder.norm" , "vision_model.layernorm" )
# text encoder
if "text_encoder.token_embedding" in name:
__SCREAMING_SNAKE_CASE : Any = name.replace("text_encoder.token_embedding" , "text_model.embeddings.token_embedding" )
if "text_encoder.positional_embedding" in name:
__SCREAMING_SNAKE_CASE : Any = name.replace("text_encoder.positional_embedding" , "text_model.embeddings.position_embedding.weight" )
if "text_encoder.transformer.resblocks." in name:
__SCREAMING_SNAKE_CASE : List[str] = name.replace("text_encoder.transformer.resblocks." , "text_model.encoder.layers." )
if "ln_1" in name:
__SCREAMING_SNAKE_CASE : Tuple = name.replace("ln_1" , "layer_norm1" )
if "ln_2" in name:
__SCREAMING_SNAKE_CASE : Optional[Any] = name.replace("ln_2" , "layer_norm2" )
if "c_fc" in name:
__SCREAMING_SNAKE_CASE : List[str] = name.replace("c_fc" , "fc1" )
if "c_proj" in name:
__SCREAMING_SNAKE_CASE : Any = name.replace("c_proj" , "fc2" )
if "text_encoder" in name:
__SCREAMING_SNAKE_CASE : str = name.replace("text_encoder" , "text_model" )
if "ln_final" in name:
__SCREAMING_SNAKE_CASE : List[str] = name.replace("ln_final" , "final_layer_norm" )
# projection layers
if "img_projector.linear_hidden." in name:
__SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace("img_projector.linear_hidden." , "visual_projection." )
if "img_projector.linear_out." in name:
__SCREAMING_SNAKE_CASE : Any = name.replace("img_projector.linear_out." , "visual_projection.3." )
if "text_projector.linear_hidden" in name:
__SCREAMING_SNAKE_CASE : int = name.replace("text_projector.linear_hidden" , "text_projection" )
if "text_projector.linear_out" in name:
__SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace("text_projector.linear_out" , "text_projection.3" )
return name
def __A ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Tuple ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
__SCREAMING_SNAKE_CASE : Tuple = orig_state_dict.pop(_SCREAMING_SNAKE_CASE )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
__SCREAMING_SNAKE_CASE : int = key.split("." )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = int(key_split[2] ), int(key_split[4] )
__SCREAMING_SNAKE_CASE : List[str] = config.vision_config.hidden_size
if "weight" in key:
__SCREAMING_SNAKE_CASE : Optional[Any] = val[:dim, :]
__SCREAMING_SNAKE_CASE : int = val[dim : dim * 2, :]
__SCREAMING_SNAKE_CASE : str = val[-dim:, :]
else:
__SCREAMING_SNAKE_CASE : Optional[int] = val[:dim]
__SCREAMING_SNAKE_CASE : Any = val[dim : dim * 2]
__SCREAMING_SNAKE_CASE : Tuple = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
__SCREAMING_SNAKE_CASE : Tuple = key.split("." )
__SCREAMING_SNAKE_CASE : Any = int(key_split[3] )
__SCREAMING_SNAKE_CASE : List[str] = config.text_config.hidden_size
if "weight" in key:
__SCREAMING_SNAKE_CASE : Dict = val[:dim, :]
__SCREAMING_SNAKE_CASE : int = val[
dim : dim * 2, :
]
__SCREAMING_SNAKE_CASE : str = val[-dim:, :]
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = val[:dim]
__SCREAMING_SNAKE_CASE : List[Any] = val[dim : dim * 2]
__SCREAMING_SNAKE_CASE : Dict = val[-dim:]
else:
__SCREAMING_SNAKE_CASE : Dict = rename_key(_SCREAMING_SNAKE_CASE )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
__SCREAMING_SNAKE_CASE : Union[str, Any] = val.squeeze_()
else:
__SCREAMING_SNAKE_CASE : str = val
return orig_state_dict
def __A ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = "http://images.cocodataset.org/val2017/000000039769.jpg"
__SCREAMING_SNAKE_CASE : int = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def __A ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Dict="groupvit-gcc-yfcc" , _SCREAMING_SNAKE_CASE : str=False ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = GroupViTConfig()
__SCREAMING_SNAKE_CASE : str = GroupViTModel(_SCREAMING_SNAKE_CASE ).eval()
__SCREAMING_SNAKE_CASE : List[Any] = torch.load(_SCREAMING_SNAKE_CASE , map_location="cpu" )["model"]
__SCREAMING_SNAKE_CASE : str = convert_state_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Dict = model.load_state_dict(_SCREAMING_SNAKE_CASE , strict=_SCREAMING_SNAKE_CASE )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(_SCREAMING_SNAKE_CASE ) == 0)
# verify result
__SCREAMING_SNAKE_CASE : Optional[Any] = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32" )
__SCREAMING_SNAKE_CASE : List[Any] = prepare_img()
__SCREAMING_SNAKE_CASE : Union[str, Any] = processor(text=["a photo of a cat", "a photo of a dog"] , images=_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , return_tensors="pt" )
with torch.no_grad():
__SCREAMING_SNAKE_CASE : str = model(**_SCREAMING_SNAKE_CASE )
if model_name == "groupvit-gcc-yfcc":
__SCREAMING_SNAKE_CASE : Dict = torch.tensor([[1_3.3_5_2_3, 6.3_6_2_9]] )
elif model_name == "groupvit-gcc-redcaps":
__SCREAMING_SNAKE_CASE : Tuple = torch.tensor([[1_6.1_8_7_3, 8.6_2_3_0]] )
else:
raise ValueError(f'Model name {model_name} not supported.' )
assert torch.allclose(outputs.logits_per_image , _SCREAMING_SNAKE_CASE , atol=1E-3 )
processor.save_pretrained(_SCREAMING_SNAKE_CASE )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
print("Successfully saved processor and model to" , _SCREAMING_SNAKE_CASE )
if push_to_hub:
print("Pushing to the hub..." )
processor.push_to_hub(_SCREAMING_SNAKE_CASE , organization="nielsr" )
model.push_to_hub(_SCREAMING_SNAKE_CASE , organization="nielsr" )
if __name__ == "__main__":
lowercase = argparse.ArgumentParser()
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to dump the processor and PyTorch model.'''
)
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to GroupViT checkpoint''')
parser.add_argument(
'''--model_name''',
default='''groupvit-gccy-fcc''',
type=str,
help='''Name of the model. Expecting either \'groupvit-gcc-yfcc\' or \'groupvit-gcc-redcaps\'''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.''',
)
lowercase = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 211 | 1 |
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def _lowerCamelCase ( snake_case , snake_case , snake_case , snake_case ):
_lowerCAmelCase = {
'en': 'Machine learning is great, isn\'t it?',
'ru': 'Машинное обучение - это здорово, не так ли?',
'de': 'Maschinelles Lernen ist großartig, nicht wahr?',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
_lowerCAmelCase = {
'wmt16-en-de-dist-12-1': [28.3, 27.52],
'wmt16-en-de-dist-6-1': [27.4, 27.11],
'wmt16-en-de-12-1': [26.9, 25.75],
}
_lowerCAmelCase = F'{src_lang}-{tgt_lang}'
_lowerCAmelCase = F'\n---\nlanguage:\n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt16\n- allenai\nlicense: apache-2.0\ndatasets:\n- wmt16\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.\n\nFor more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).\n\nAll 3 models are available:\n\n* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)\n* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)\n* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)\n\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = "allenai/{model_name}"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = "{texts[src_lang]}"\ninput_ids = tokenizer.encode(input, return_tensors="pt")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n\n## Training data\n\nPretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).\n\n## Eval results\n\nHere are the BLEU scores:\n\nmodel | fairseq | transformers\n-------|---------|----------\n{model_name} | {scores[model_name][0]} | {scores[model_name][1]}\n\nThe score is slightly below the score reported in the paper, as the researchers don\'t use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=5\nmkdir -p $DATA_DIR\nsacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt16/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)\n\n\n### BibTeX entry and citation info\n\n```\n@misc{{kasai2020deep,\n title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},\n author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},\n year={{2020}},\n eprint={{2006.10369}},\n archivePrefix={{arXiv}},\n primaryClass={{cs.CL}}\n}}\n```\n\n'
model_card_dir.mkdir(parents=snake_case , exist_ok=snake_case )
_lowerCAmelCase = os.path.join(snake_case , 'README.md' )
print(F'Generating {path}' )
with open(snake_case , 'w' , encoding='utf-8' ) as f:
f.write(snake_case )
# make sure we are under the root of the project
_lowercase: str = Path(__file__).resolve().parent.parent.parent
_lowercase: Union[str, Any] = repo_dir / '''model_cards'''
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
_lowercase: List[Any] = model_cards_dir / '''allenai''' / model_name
write_model_card(model_card_dir, src_lang='''en''', tgt_lang='''de''', model_name=model_name)
| 225 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowercase: Any = {
'''configuration_roberta''': ['''ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RobertaConfig''', '''RobertaOnnxConfig'''],
'''tokenization_roberta''': ['''RobertaTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase: Tuple = ['''RobertaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase: Optional[Any] = [
'''ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RobertaForCausalLM''',
'''RobertaForMaskedLM''',
'''RobertaForMultipleChoice''',
'''RobertaForQuestionAnswering''',
'''RobertaForSequenceClassification''',
'''RobertaForTokenClassification''',
'''RobertaModel''',
'''RobertaPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase: Optional[Any] = [
'''TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFRobertaForCausalLM''',
'''TFRobertaForMaskedLM''',
'''TFRobertaForMultipleChoice''',
'''TFRobertaForQuestionAnswering''',
'''TFRobertaForSequenceClassification''',
'''TFRobertaForTokenClassification''',
'''TFRobertaMainLayer''',
'''TFRobertaModel''',
'''TFRobertaPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase: Optional[Any] = [
'''FlaxRobertaForCausalLM''',
'''FlaxRobertaForMaskedLM''',
'''FlaxRobertaForMultipleChoice''',
'''FlaxRobertaForQuestionAnswering''',
'''FlaxRobertaForSequenceClassification''',
'''FlaxRobertaForTokenClassification''',
'''FlaxRobertaModel''',
'''FlaxRobertaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
_lowercase: Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 225 | 1 |
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class lowercase ( A__ ):
'''simple docstring'''
def __init__( self , _snake_case , _snake_case = None , _snake_case = None , _snake_case = True , _snake_case = None , _snake_case = False , _snake_case = None , _snake_case = True , _snake_case = "arrow" , **_snake_case , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(
split=_snake_case , features=_snake_case , cache_dir=_snake_case , keep_in_memory=_snake_case , streaming=_snake_case , **_snake_case , )
UpperCAmelCase = load_from_cache_file
UpperCAmelCase = file_format
UpperCAmelCase = Spark(
df=_snake_case , features=_snake_case , cache_dir=_snake_case , working_dir=_snake_case , **_snake_case , )
def snake_case_ ( self ) -> Union[str, Any]:
"""simple docstring"""
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
UpperCAmelCase = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=_snake_case , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 254 |
from ..utils import DummyObject, requires_backends
class lowercase ( metaclass=A__ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ["""torch""", """scipy"""]
def __init__( self , *_snake_case , **_snake_case ) -> Dict:
"""simple docstring"""
requires_backends(self , ['''torch''', '''scipy'''] )
@classmethod
def snake_case_ ( cls , *_snake_case , **_snake_case ) -> str:
"""simple docstring"""
requires_backends(cls , ['''torch''', '''scipy'''] )
@classmethod
def snake_case_ ( cls , *_snake_case , **_snake_case ) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ['''torch''', '''scipy'''] )
| 254 | 1 |
from __future__ import annotations
lowerCamelCase_ = {
'''A''': ['''B''', '''C''', '''E'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F''', '''G'''],
'''D''': ['''B'''],
'''E''': ['''A''', '''B''', '''D'''],
'''F''': ['''C'''],
'''G''': ['''C'''],
}
class __lowerCamelCase :
def __init__( self , lowerCamelCase , lowerCamelCase ) -> None:
snake_case_ = graph
# mapping node to its parent in resulting breadth first tree
snake_case_ = {}
snake_case_ = source_vertex
def lowerCAmelCase_ ( self ) -> None:
snake_case_ = {self.source_vertex}
snake_case_ = None
snake_case_ = [self.source_vertex] # first in first out queue
while queue:
snake_case_ = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(lowerCamelCase )
snake_case_ = vertex
queue.append(lowerCamelCase )
def lowerCAmelCase_ ( self , lowerCamelCase ) -> str:
if target_vertex == self.source_vertex:
return self.source_vertex
snake_case_ = self.parent.get(lowerCamelCase )
if target_vertex_parent is None:
snake_case_ = (
f'''No path from vertex: {self.source_vertex} to vertex: {target_vertex}'''
)
raise ValueError(lowerCamelCase )
return self.shortest_path(lowerCamelCase ) + f'''->{target_vertex}'''
if __name__ == "__main__":
lowerCamelCase_ = Graph(graph, '''G''')
g.breath_first_search()
print(g.shortest_path('''D'''))
print(g.shortest_path('''G'''))
print(g.shortest_path('''Foo''')) | 701 |
def UpperCamelCase( lowercase_ = 200 ) -> int:
'''simple docstring'''
snake_case_ = [1, 2, 5, 10, 20, 50, 100, 200]
snake_case_ = [0] * (pence + 1)
snake_case_ = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(lowercase_ , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(200) == 73682 | 161 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCAmelCase : Any = {
'''configuration_clap''': [
'''CLAP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ClapAudioConfig''',
'''ClapConfig''',
'''ClapTextConfig''',
],
'''processing_clap''': ['''ClapProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Optional[int] = [
'''CLAP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ClapModel''',
'''ClapPreTrainedModel''',
'''ClapTextModel''',
'''ClapTextModelWithProjection''',
'''ClapAudioModel''',
'''ClapAudioModelWithProjection''',
]
_UpperCAmelCase : Optional[Any] = ['''ClapFeatureExtractor''']
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
_UpperCAmelCase : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 72 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'naver-clova-ix/donut-base': 'https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json',
# See all Donut models at https://huggingface.co/models?filter=donut-swin
}
class snake_case_ ( a ):
'''simple docstring'''
__UpperCamelCase = 'donut-swin'
__UpperCamelCase = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self, A_=224, A_=4, A_=3, A_=96, A_=[2, 2, 6, 2], A_=[3, 6, 12, 24], A_=7, A_=4.0, A_=True, A_=0.0, A_=0.0, A_=0.1, A_="gelu", A_=False, A_=0.02, A_=1E-5, **A_, ) -> List[str]:
super().__init__(**A_ )
UpperCAmelCase__ =image_size
UpperCAmelCase__ =patch_size
UpperCAmelCase__ =num_channels
UpperCAmelCase__ =embed_dim
UpperCAmelCase__ =depths
UpperCAmelCase__ =len(A_ )
UpperCAmelCase__ =num_heads
UpperCAmelCase__ =window_size
UpperCAmelCase__ =mlp_ratio
UpperCAmelCase__ =qkv_bias
UpperCAmelCase__ =hidden_dropout_prob
UpperCAmelCase__ =attention_probs_dropout_prob
UpperCAmelCase__ =drop_path_rate
UpperCAmelCase__ =hidden_act
UpperCAmelCase__ =use_absolute_embeddings
UpperCAmelCase__ =layer_norm_eps
UpperCAmelCase__ =initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCAmelCase__ =int(embed_dim * 2 ** (len(A_ ) - 1) )
| 625 | 0 |
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class __lowercase ( tf.keras.optimizers.schedules.LearningRateSchedule ):
'''simple docstring'''
def __init__( self : str , _a : float , _a : Callable , _a : int , _a : float = 1.0 , _a : str = None , ):
super().__init__()
UpperCamelCase__ = initial_learning_rate
UpperCamelCase__ = warmup_steps
UpperCamelCase__ = power
UpperCamelCase__ = decay_schedule_fn
UpperCamelCase__ = name
def __call__( self : str , _a : Tuple ):
with tf.name_scope(self.name or '''WarmUp''' ) as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
UpperCamelCase__ = tf.cast(_a , tf.floataa )
UpperCamelCase__ = tf.cast(self.warmup_steps , tf.floataa )
UpperCamelCase__ = global_step_float / warmup_steps_float
UpperCamelCase__ = self.initial_learning_rate * tf.math.pow(_a , self.power )
return tf.cond(
global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=_a , )
def A_ ( self : str ):
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def lowerCamelCase_ ( UpperCamelCase__ : float, UpperCamelCase__ : int, UpperCamelCase__ : int, UpperCamelCase__ : float = 0.0, UpperCamelCase__ : float = 0.9, UpperCamelCase__ : float = 0.999, UpperCamelCase__ : float = 1e-8, UpperCamelCase__ : Optional[float] = None, UpperCamelCase__ : Optional[float] = None, UpperCamelCase__ : float = 0.0, UpperCamelCase__ : float = 1.0, UpperCamelCase__ : Optional[List[str]] = None, ):
'''simple docstring'''
UpperCamelCase__ = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=UpperCamelCase__, decay_steps=num_train_steps - num_warmup_steps, end_learning_rate=init_lr * min_lr_ratio, power=UpperCamelCase__, )
if num_warmup_steps:
UpperCamelCase__ = WarmUp(
initial_learning_rate=UpperCamelCase__, decay_schedule_fn=UpperCamelCase__, warmup_steps=UpperCamelCase__, )
if weight_decay_rate > 0.0:
UpperCamelCase__ = AdamWeightDecay(
learning_rate=UpperCamelCase__, weight_decay_rate=UpperCamelCase__, beta_a=UpperCamelCase__, beta_a=UpperCamelCase__, epsilon=UpperCamelCase__, clipnorm=UpperCamelCase__, global_clipnorm=UpperCamelCase__, exclude_from_weight_decay=['''LayerNorm''', '''layer_norm''', '''bias'''], include_in_weight_decay=UpperCamelCase__, )
else:
UpperCamelCase__ = tf.keras.optimizers.Adam(
learning_rate=UpperCamelCase__, beta_a=UpperCamelCase__, beta_a=UpperCamelCase__, epsilon=UpperCamelCase__, clipnorm=UpperCamelCase__, global_clipnorm=UpperCamelCase__, )
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class __lowercase ( A ):
'''simple docstring'''
def __init__( self : List[str] , _a : Union[float, tf.keras.optimizers.schedules.LearningRateSchedule] = 0.001 , _a : float = 0.9 , _a : float = 0.999 , _a : float = 1E-7 , _a : bool = False , _a : float = 0.0 , _a : Optional[List[str]] = None , _a : Optional[List[str]] = None , _a : str = "AdamWeightDecay" , **_a : Tuple , ):
super().__init__(_a , _a , _a , _a , _a , _a , **_a )
UpperCamelCase__ = weight_decay_rate
UpperCamelCase__ = include_in_weight_decay
UpperCamelCase__ = exclude_from_weight_decay
@classmethod
def A_ ( cls : int , _a : str ):
UpperCamelCase__ = {'''WarmUp''': WarmUp}
return super(_a , cls ).from_config(_a , custom_objects=_a )
def A_ ( self : Union[str, Any] , _a : Any , _a : Any , _a : List[Any] ):
super(_a , self )._prepare_local(_a , _a , _a )
UpperCamelCase__ = tf.constant(
self.weight_decay_rate , name='''adam_weight_decay_rate''' )
def A_ ( self : Optional[int] , _a : Optional[int] , _a : str , _a : str ):
UpperCamelCase__ = self._do_use_weight_decay(var.name )
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]['''weight_decay_rate'''] , use_locking=self._use_locking , )
return tf.no_op()
def A_ ( self : int , _a : List[str] , _a : Any=None , **_a : List[Any] ):
UpperCamelCase__ , UpperCamelCase__ = list(zip(*_a ) )
return super(_a , self ).apply_gradients(zip(_a , _a ) , name=_a , **_a )
def A_ ( self : Dict , _a : str , _a : List[Any] , _a : Optional[Any] ):
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
UpperCamelCase__ = apply_state or {}
UpperCamelCase__ = apply_state.get((var_device, var_dtype) )
if coefficients is None:
UpperCamelCase__ = self._fallback_apply_state(_a , _a )
UpperCamelCase__ = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def A_ ( self : Optional[Any] , _a : int , _a : List[Any] , _a : Any=None ):
UpperCamelCase__ , UpperCamelCase__ = self._get_lr(var.device , var.dtype.base_dtype , _a )
UpperCamelCase__ = self._decay_weights_op(_a , _a , _a )
with tf.control_dependencies([decay] ):
return super(_a , self )._resource_apply_dense(_a , _a , **_a )
def A_ ( self : Optional[int] , _a : Dict , _a : Any , _a : int , _a : Optional[Any]=None ):
UpperCamelCase__ , UpperCamelCase__ = self._get_lr(var.device , var.dtype.base_dtype , _a )
UpperCamelCase__ = self._decay_weights_op(_a , _a , _a )
with tf.control_dependencies([decay] ):
return super(_a , self )._resource_apply_sparse(_a , _a , _a , **_a )
def A_ ( self : int ):
UpperCamelCase__ = super().get_config()
config.update({'''weight_decay_rate''': self.weight_decay_rate} )
return config
def A_ ( self : List[str] , _a : Tuple ):
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(_a , _a ) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(_a , _a ) is not None:
return False
return True
class __lowercase ( A ):
'''simple docstring'''
def __init__( self : str ):
UpperCamelCase__ = []
UpperCamelCase__ = None
@property
def A_ ( self : Dict ):
if self._accum_steps is None:
UpperCamelCase__ = tf.Variable(
tf.constant(0 , dtype=tf.intaa ) , trainable=_a , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
return self._accum_steps.value()
@property
def A_ ( self : List[Any] ):
if not self._gradients:
raise ValueError('''The accumulator should be called first to initialize the gradients''' )
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self : int , _a : Any ):
if not self._gradients:
UpperCamelCase__ = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(_a ) , trainable=_a , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
if gradient is not None
else gradient
for gradient in gradients
] )
if len(_a ) != len(self._gradients ):
raise ValueError(F"""Expected {len(self._gradients )} gradients, but got {len(_a )}""" )
for accum_gradient, gradient in zip(self._gradients , _a ):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(_a )
self._accum_steps.assign_add(1 )
def A_ ( self : List[Any] ):
if not self._gradients:
return
self._accum_steps.assign(0 )
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(_a ) )
| 591 | # This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class __lowercase ( A, A, A, unittest.TestCase ):
'''simple docstring'''
_A : List[Any] = StableDiffusionControlNetImgaImgPipeline
_A : Any = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
_A : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
_A : List[str] = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({'''control_image'''} )
_A : str = IMAGE_TO_IMAGE_IMAGE_PARAMS
def A_ ( self : Optional[Any] ):
torch.manual_seed(0 )
UpperCamelCase__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
torch.manual_seed(0 )
UpperCamelCase__ = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
torch.manual_seed(0 )
UpperCamelCase__ = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=_a , set_alpha_to_one=_a , )
torch.manual_seed(0 )
UpperCamelCase__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
UpperCamelCase__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
UpperCamelCase__ = CLIPTextModel(_a )
UpperCamelCase__ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
UpperCamelCase__ = {
'''unet''': unet,
'''controlnet''': controlnet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def A_ ( self : Optional[Any] , _a : Optional[int] , _a : Optional[Any]=0 ):
if str(_a ).startswith('''mps''' ):
UpperCamelCase__ = torch.manual_seed(_a )
else:
UpperCamelCase__ = torch.Generator(device=_a ).manual_seed(_a )
UpperCamelCase__ = 2
UpperCamelCase__ = randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=_a , device=torch.device(_a ) , )
UpperCamelCase__ = floats_tensor(control_image.shape , rng=random.Random(_a ) ).to(_a )
UpperCamelCase__ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCamelCase__ = Image.fromarray(np.uinta(_a ) ).convert('''RGB''' ).resize((64, 64) )
UpperCamelCase__ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''image''': image,
'''control_image''': control_image,
}
return inputs
def A_ ( self : Union[str, Any] ):
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def A_ ( self : Any ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def A_ ( self : Any ):
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
class __lowercase ( A, A, unittest.TestCase ):
'''simple docstring'''
_A : int = StableDiffusionControlNetImgaImgPipeline
_A : Any = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
_A : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
_A : Optional[Any] = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def A_ ( self : Tuple ):
torch.manual_seed(0 )
UpperCamelCase__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
torch.manual_seed(0 )
def init_weights(_a : List[str] ):
if isinstance(_a , torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
UpperCamelCase__ = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(_a )
torch.manual_seed(0 )
UpperCamelCase__ = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(_a )
torch.manual_seed(0 )
UpperCamelCase__ = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=_a , set_alpha_to_one=_a , )
torch.manual_seed(0 )
UpperCamelCase__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
UpperCamelCase__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
UpperCamelCase__ = CLIPTextModel(_a )
UpperCamelCase__ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
UpperCamelCase__ = MultiControlNetModel([controlneta, controlneta] )
UpperCamelCase__ = {
'''unet''': unet,
'''controlnet''': controlnet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def A_ ( self : Tuple , _a : Dict , _a : Optional[int]=0 ):
if str(_a ).startswith('''mps''' ):
UpperCamelCase__ = torch.manual_seed(_a )
else:
UpperCamelCase__ = torch.Generator(device=_a ).manual_seed(_a )
UpperCamelCase__ = 2
UpperCamelCase__ = [
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=_a , device=torch.device(_a ) , ),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=_a , device=torch.device(_a ) , ),
]
UpperCamelCase__ = floats_tensor(control_image[0].shape , rng=random.Random(_a ) ).to(_a )
UpperCamelCase__ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCamelCase__ = Image.fromarray(np.uinta(_a ) ).convert('''RGB''' ).resize((64, 64) )
UpperCamelCase__ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''image''': image,
'''control_image''': control_image,
}
return inputs
def A_ ( self : str ):
UpperCamelCase__ = self.get_dummy_components()
UpperCamelCase__ = self.pipeline_class(**_a )
pipe.to(_a )
UpperCamelCase__ = 10.0
UpperCamelCase__ = 4
UpperCamelCase__ = self.get_dummy_inputs(_a )
UpperCamelCase__ = steps
UpperCamelCase__ = scale
UpperCamelCase__ = pipe(**_a )[0]
UpperCamelCase__ = self.get_dummy_inputs(_a )
UpperCamelCase__ = steps
UpperCamelCase__ = scale
UpperCamelCase__ = pipe(**_a , control_guidance_start=0.1 , control_guidance_end=0.2 )[0]
UpperCamelCase__ = self.get_dummy_inputs(_a )
UpperCamelCase__ = steps
UpperCamelCase__ = scale
UpperCamelCase__ = pipe(**_a , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0]
UpperCamelCase__ = self.get_dummy_inputs(_a )
UpperCamelCase__ = steps
UpperCamelCase__ = scale
UpperCamelCase__ = pipe(**_a , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
def A_ ( self : int ):
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def A_ ( self : int ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def A_ ( self : int ):
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
def A_ ( self : Dict ):
UpperCamelCase__ = self.get_dummy_components()
UpperCamelCase__ = self.pipeline_class(**_a )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(_a )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def A_ ( self : Optional[int] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A_ ( self : Dict ):
UpperCamelCase__ = ControlNetModel.from_pretrained('''lllyasviel/sd-controlnet-canny''' )
UpperCamelCase__ = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , safety_checker=_a , controlnet=_a )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=_a )
UpperCamelCase__ = torch.Generator(device='''cpu''' ).manual_seed(0 )
UpperCamelCase__ = '''evil space-punk bird'''
UpperCamelCase__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png''' ).resize((512, 512) )
UpperCamelCase__ = load_image(
'''https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png''' ).resize((512, 512) )
UpperCamelCase__ = pipe(
_a , _a , control_image=_a , generator=_a , output_type='''np''' , num_inference_steps=50 , strength=0.6 , )
UpperCamelCase__ = output.images[0]
assert image.shape == (512, 512, 3)
UpperCamelCase__ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy''' )
assert np.abs(expected_image - image ).max() < 9E-2
| 591 | 1 |
'''simple docstring'''
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
UpperCAmelCase = 16
UpperCAmelCase = 32
def __UpperCamelCase ( lowercase__ : Optional[Any], lowercase__ : int, lowercase__ : Optional[int], lowercase__ : Optional[Any], lowercase__ : Union[str, Any] = 16 ):
'''simple docstring'''
__lowercase =AutoTokenizer.from_pretrained('bert-base-cased' )
__lowercase =DatasetDict(
{
'train': dataset['train'].select(lowercase__ ),
'validation': dataset['train'].select(lowercase__ ),
'test': dataset['validation'],
} )
def tokenize_function(lowercase__ : List[str] ):
# max_length=None => use the model max length (it's actually the default)
__lowercase =tokenizer(examples['sentence1'], examples['sentence2'], truncation=lowercase__, max_length=lowercase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__lowercase =datasets.map(
lowercase__, batched=lowercase__, remove_columns=['idx', 'sentence1', 'sentence2'], )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__lowercase =tokenized_datasets.rename_column('label', 'labels' )
def collate_fn(lowercase__ : Optional[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__lowercase =1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__lowercase =16
elif accelerator.mixed_precision != "no":
__lowercase =8
else:
__lowercase =None
return tokenizer.pad(
lowercase__, padding='longest', max_length=lowercase__, pad_to_multiple_of=lowercase__, return_tensors='pt', )
# Instantiate dataloaders.
__lowercase =DataLoader(
tokenized_datasets['train'], shuffle=lowercase__, collate_fn=lowercase__, batch_size=lowercase__ )
__lowercase =DataLoader(
tokenized_datasets['validation'], shuffle=lowercase__, collate_fn=lowercase__, batch_size=lowercase__ )
__lowercase =DataLoader(
tokenized_datasets['test'], shuffle=lowercase__, collate_fn=lowercase__, batch_size=lowercase__ )
return train_dataloader, eval_dataloader, test_dataloader
def __UpperCamelCase ( lowercase__ : List[str], lowercase__ : Any ):
'''simple docstring'''
__lowercase =[]
# Download the dataset
__lowercase =load_dataset('glue', 'mrpc' )
# Create our splits
__lowercase =StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
__lowercase =Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__lowercase =config['lr']
__lowercase =int(config['num_epochs'] )
__lowercase =int(config['seed'] )
__lowercase =int(config['batch_size'] )
__lowercase =evaluate.load('glue', 'mrpc' )
# If the batch size is too big we use gradient accumulation
__lowercase =1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
__lowercase =batch_size // MAX_GPU_BATCH_SIZE
__lowercase =MAX_GPU_BATCH_SIZE
set_seed(lowercase__ )
# New Code #
# Create our folds:
__lowercase =kfold.split(np.zeros(datasets['train'].num_rows ), datasets['train']['label'] )
__lowercase =[]
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(lowercase__ ):
__lowercase , __lowercase , __lowercase =get_fold_dataloaders(
lowercase__, lowercase__, lowercase__, lowercase__, )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__lowercase =AutoModelForSequenceClassification.from_pretrained('bert-base-cased', return_dict=lowercase__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__lowercase =model.to(accelerator.device )
# Instantiate optimizer
__lowercase =AdamW(params=model.parameters(), lr=lowercase__ )
# Instantiate scheduler
__lowercase =get_linear_schedule_with_warmup(
optimizer=lowercase__, num_warmup_steps=1_00, num_training_steps=(len(lowercase__ ) * num_epochs) // gradient_accumulation_steps, )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase =accelerator.prepare(
lowercase__, lowercase__, lowercase__, lowercase__, lowercase__ )
# Now we train the model
for epoch in range(lowercase__ ):
model.train()
for step, batch in enumerate(lowercase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
__lowercase =model(**lowercase__ )
__lowercase =outputs.loss
__lowercase =loss / gradient_accumulation_steps
accelerator.backward(lowercase__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowercase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__lowercase =model(**lowercase__ )
__lowercase =outputs.logits.argmax(dim=-1 )
__lowercase , __lowercase =accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=lowercase__, references=lowercase__, )
__lowercase =metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''', lowercase__ )
# New Code #
# We also run predictions on the test set at the very end
__lowercase =[]
for step, batch in enumerate(lowercase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__lowercase =model(**lowercase__ )
__lowercase =outputs.logits
__lowercase , __lowercase =accelerator.gather_for_metrics((predictions, batch['labels']) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(lowercase__, dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
__lowercase =torch.cat(lowercase__, dim=0 )
__lowercase =torch.stack(lowercase__, dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
__lowercase =metric.compute(predictions=lowercase__, references=lowercase__ )
accelerator.print('Average test metrics from all folds:', lowercase__ )
def __UpperCamelCase ( ):
'''simple docstring'''
__lowercase =argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision', type=lowercase__, default=lowercase__, choices=['no', 'fp16', 'bf16', 'fp8'], help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.', )
parser.add_argument('--cpu', action='store_true', help='If passed, will train on the CPU.' )
# New Code #
parser.add_argument('--num_folds', type=lowercase__, default=3, help='The number of splits to perform across the dataset' )
__lowercase =parser.parse_args()
__lowercase ={'lr': 2E-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(lowercase__, lowercase__ )
if __name__ == "__main__":
main()
| 119 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase = {
'''configuration_clipseg''': [
'''CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''CLIPSegConfig''',
'''CLIPSegTextConfig''',
'''CLIPSegVisionConfig''',
],
'''processing_clipseg''': ['''CLIPSegProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CLIPSegModel''',
'''CLIPSegPreTrainedModel''',
'''CLIPSegTextModel''',
'''CLIPSegVisionModel''',
'''CLIPSegForImageSegmentation''',
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 90 | 0 |
import warnings
warnings.warn(
'memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: '
'`from accelerate import find_executable_batch_size` to avoid this warning.',
FutureWarning,
)
| 719 |
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
UpperCamelCase = imread(r'digital_image_processing/image_data/lena_small.jpg')
UpperCamelCase = cvtColor(img, COLOR_BGR2GRAY)
def __lowerCamelCase ( ) -> int:
__UpperCamelCase : int = cn.convert_to_negative(__lowerCAmelCase )
# assert negative_img array for at least one True
assert negative_img.any()
def __lowerCamelCase ( ) -> Optional[Any]:
with Image.open("""digital_image_processing/image_data/lena_small.jpg""" ) as img:
# Work around assertion for response
assert str(cc.change_contrast(__lowerCAmelCase , 110 ) ).startswith(
"""<PIL.Image.Image image mode=RGB size=100x100 at""" )
def __lowerCamelCase ( ) -> Dict:
__UpperCamelCase : str = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def __lowerCamelCase ( ) -> str:
__UpperCamelCase : List[Any] = imread("""digital_image_processing/image_data/lena_small.jpg""" , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
__UpperCamelCase : Optional[int] = canny.canny(__lowerCAmelCase )
# assert canny array for at least one True
assert canny_array.any()
def __lowerCamelCase ( ) -> Optional[int]:
assert gg.gaussian_filter(__lowerCAmelCase , 5 , sigma=0.9 ).all()
def __lowerCamelCase ( ) -> Tuple:
# laplace diagonals
__UpperCamelCase : List[str] = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
__UpperCamelCase : Any = conv.img_convolve(__lowerCAmelCase , __lowerCAmelCase ).astype(__lowerCAmelCase )
assert res.any()
def __lowerCamelCase ( ) -> List[str]:
assert med.median_filter(__lowerCAmelCase , 3 ).any()
def __lowerCamelCase ( ) -> int:
__UpperCamelCase , __UpperCamelCase : List[Any] = sob.sobel_filter(__lowerCAmelCase )
assert grad.any() and theta.any()
def __lowerCamelCase ( ) -> Optional[int]:
__UpperCamelCase : int = sp.make_sepia(__lowerCAmelCase , 20 )
assert sepia.all()
def __lowerCamelCase ( __lowerCAmelCase : str = "digital_image_processing/image_data/lena_small.jpg" ) -> Union[str, Any]:
__UpperCamelCase : str = bs.Burkes(imread(__lowerCAmelCase , 1 ) , 120 )
burkes.process()
assert burkes.output_img.any()
def __lowerCamelCase ( __lowerCAmelCase : str = "digital_image_processing/image_data/lena_small.jpg" , ) -> str:
__UpperCamelCase : Dict = rs.NearestNeighbour(imread(__lowerCAmelCase , 1 ) , 400 , 200 )
nn.process()
assert nn.output.any()
def __lowerCamelCase ( ) -> Union[str, Any]:
__UpperCamelCase : Any = """digital_image_processing/image_data/lena.jpg"""
# Reading the image and converting it to grayscale.
__UpperCamelCase : int = imread(__lowerCAmelCase , 0 )
# Test for get_neighbors_pixel function() return not None
__UpperCamelCase : Dict = 0
__UpperCamelCase : Optional[Any] = 0
__UpperCamelCase : str = image[x_coordinate][y_coordinate]
__UpperCamelCase : Tuple = lbp.get_neighbors_pixel(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
__UpperCamelCase : List[Any] = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
__UpperCamelCase : str = lbp.local_binary_value(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
assert lbp_image.any()
| 515 | 0 |
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase ( self : str ) -> Optional[int]:
__lowerCAmelCase = ['a', 'b', 'c']
# Defaults to last layer if both are None
__lowerCAmelCase , __lowerCAmelCase = get_aligned_output_features_output_indices(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , ['c'] )
self.assertEqual(lowerCAmelCase_ , [2] )
# Out indices set to match out features
__lowerCAmelCase , __lowerCAmelCase = get_aligned_output_features_output_indices(['a', 'c'] , lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , ['a', 'c'] )
self.assertEqual(lowerCAmelCase_ , [0, 2] )
# Out features set to match out indices
__lowerCAmelCase , __lowerCAmelCase = get_aligned_output_features_output_indices(lowerCAmelCase_ , [0, 2] , lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , ['a', 'c'] )
self.assertEqual(lowerCAmelCase_ , [0, 2] )
# Out features selected from negative indices
__lowerCAmelCase , __lowerCAmelCase = get_aligned_output_features_output_indices(lowerCAmelCase_ , [-3, -1] , lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , ['a', 'c'] )
self.assertEqual(lowerCAmelCase_ , [-3, -1] )
def lowercase ( self : List[Any] ) -> Dict:
# Stage names must be set
with self.assertRaises(lowerCAmelCase_ ):
verify_out_features_out_indices(['a', 'b'] , (0, 1) , lowerCAmelCase_ )
# Out features must be a list
with self.assertRaises(lowerCAmelCase_ ):
verify_out_features_out_indices(('a', 'b') , (0, 1) , ['a', 'b'] )
# Out features must be a subset of stage names
with self.assertRaises(lowerCAmelCase_ ):
verify_out_features_out_indices(['a', 'b'] , (0, 1) , ['a'] )
# Out indices must be a list or tuple
with self.assertRaises(lowerCAmelCase_ ):
verify_out_features_out_indices(lowerCAmelCase_ , 0 , ['a', 'b'] )
# Out indices must be a subset of stage names
with self.assertRaises(lowerCAmelCase_ ):
verify_out_features_out_indices(lowerCAmelCase_ , (0, 1) , ['a'] )
# Out features and out indices must be the same length
with self.assertRaises(lowerCAmelCase_ ):
verify_out_features_out_indices(['a', 'b'] , (0,) , ['a', 'b', 'c'] )
# Out features should match out indices
with self.assertRaises(lowerCAmelCase_ ):
verify_out_features_out_indices(['a', 'b'] , (0, 2) , ['a', 'b', 'c'] )
# Out features and out indices should be in order
with self.assertRaises(lowerCAmelCase_ ):
verify_out_features_out_indices(['b', 'a'] , (0, 1) , ['a', 'b'] )
# Check passes with valid inputs
verify_out_features_out_indices(['a', 'b', 'd'] , (0, 1, -1) , ['a', 'b', 'c', 'd'] )
def lowercase ( self : Dict ) -> List[str]:
__lowerCAmelCase = BackboneMixin()
__lowerCAmelCase = ['a', 'b', 'c']
__lowerCAmelCase = ['a', 'c']
__lowerCAmelCase = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features , ['a', 'c'] )
self.assertEqual(backbone.out_indices , [0, 2] )
# Check out features and indices are updated correctly
__lowerCAmelCase = ['a', 'b']
self.assertEqual(backbone.out_features , ['a', 'b'] )
self.assertEqual(backbone.out_indices , [0, 1] )
__lowerCAmelCase = [-3, -1]
self.assertEqual(backbone.out_features , ['a', 'c'] )
self.assertEqual(backbone.out_indices , [-3, -1] )
| 53 |
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
_snake_case : Optional[int] = logging.getLogger(__name__)
_snake_case : Dict = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
_snake_case : List[Any] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
a_ = field(
default=_UpperCamelCase , metadata={
"""help""": (
"""The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."""
)
} , )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(_UpperCamelCase )} , )
a_ = field(
default=_UpperCamelCase , metadata={
"""help""": (
"""Override some existing default config settings when a model is trained from scratch. Example: """
"""n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"""
)
} , )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
a_ = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
a_ = field(
default=_UpperCamelCase , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
def lowercase ( self : List[Any] ) -> List[Any]:
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
'--config_overrides can\'t be used in combination with --config_name or --model_name_or_path' )
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """The name of the dataset to use (via the datasets library)."""} )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
a_ = field(default=_UpperCamelCase , metadata={"""help""": """The input training data file (a text file)."""} )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """An optional input train ref data file for whole word masking in Chinese."""} , )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """An optional input validation ref data file for whole word masking in Chinese."""} , )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
a_ = field(
default=5 , metadata={
"""help""": """The percentage of the train set used as validation set in case there's no validation split"""
} , )
a_ = field(
default=_UpperCamelCase , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated. Default to the max input length of the model."""
)
} , )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
a_ = field(
default=0.15 , metadata={"""help""": """Ratio of tokens to mask for masked language modeling loss"""} )
a_ = field(
default=_UpperCamelCase , metadata={
"""help""": (
"""Whether to pad all samples to `max_seq_length`. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch."""
)
} , )
def lowercase ( self : int ) -> int:
if self.train_file is not None:
__lowerCAmelCase = self.train_file.split('.' )[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
__lowerCAmelCase = self.validation_file.split('.' )[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def a_ ( lowerCAmelCase_ : List[str], lowerCAmelCase_ : Union[str, Any] ):
with open(lowerCAmelCase_, 'r', encoding='utf-8' ) as f:
__lowerCAmelCase = [json.loads(lowerCAmelCase_ ) for line in f.read().splitlines() if (len(lowerCAmelCase_ ) > 0 and not line.isspace())]
assert len(lowerCAmelCase_ ) == len(lowerCAmelCase_ )
__lowerCAmelCase = {c: dataset[c] for c in dataset.column_names}
__lowerCAmelCase = refs
return Dataset.from_dict(lowerCAmelCase_ )
def a_ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__lowerCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
__lowerCAmelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__lowerCAmelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', handlers=[logging.StreamHandler(sys.stdout )], )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s', lowerCAmelCase_ )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__lowerCAmelCase = load_dataset(data_args.dataset_name, data_args.dataset_config_name )
if "validation" not in datasets.keys():
__lowerCAmelCase = load_dataset(
data_args.dataset_name, data_args.dataset_config_name, split=F"""train[:{data_args.validation_split_percentage}%]""", )
__lowerCAmelCase = load_dataset(
data_args.dataset_name, data_args.dataset_config_name, split=F"""train[{data_args.validation_split_percentage}%:]""", )
else:
__lowerCAmelCase = {}
if data_args.train_file is not None:
__lowerCAmelCase = data_args.train_file
if data_args.validation_file is not None:
__lowerCAmelCase = data_args.validation_file
__lowerCAmelCase = data_args.train_file.split('.' )[-1]
if extension == "txt":
__lowerCAmelCase = 'text'
__lowerCAmelCase = load_dataset(lowerCAmelCase_, data_files=lowerCAmelCase_ )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__lowerCAmelCase = {
'cache_dir': model_args.cache_dir,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.config_name:
__lowerCAmelCase = AutoConfig.from_pretrained(model_args.config_name, **lowerCAmelCase_ )
elif model_args.model_name_or_path:
__lowerCAmelCase = AutoConfig.from_pretrained(model_args.model_name_or_path, **lowerCAmelCase_ )
else:
__lowerCAmelCase = CONFIG_MAPPING[model_args.model_type]()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.config_overrides is not None:
logger.info(F"""Overriding config: {model_args.config_overrides}""" )
config.update_from_string(model_args.config_overrides )
logger.info(F"""New config: {config}""" )
__lowerCAmelCase = {
'cache_dir': model_args.cache_dir,
'use_fast': model_args.use_fast_tokenizer,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
__lowerCAmelCase = AutoTokenizer.from_pretrained(model_args.tokenizer_name, **lowerCAmelCase_ )
elif model_args.model_name_or_path:
__lowerCAmelCase = AutoTokenizer.from_pretrained(model_args.model_name_or_path, **lowerCAmelCase_ )
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported by this script.'
'You can do it from another script, save it, and load it from here, using --tokenizer_name.' )
if model_args.model_name_or_path:
__lowerCAmelCase = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path, from_tf=bool('.ckpt' in model_args.model_name_or_path ), config=lowerCAmelCase_, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
else:
logger.info('Training new model from scratch' )
__lowerCAmelCase = AutoModelForMaskedLM.from_config(lowerCAmelCase_ )
model.resize_token_embeddings(len(lowerCAmelCase_ ) )
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
__lowerCAmelCase = datasets['train'].column_names
else:
__lowerCAmelCase = datasets['validation'].column_names
__lowerCAmelCase = 'text' if 'text' in column_names else column_names[0]
__lowerCAmelCase = 'max_length' if data_args.pad_to_max_length else False
def tokenize_function(lowerCAmelCase_ : str ):
# Remove empty lines
__lowerCAmelCase = [line for line in examples['text'] if len(lowerCAmelCase_ ) > 0 and not line.isspace()]
return tokenizer(examples['text'], padding=lowerCAmelCase_, truncation=lowerCAmelCase_, max_length=data_args.max_seq_length )
__lowerCAmelCase = datasets.map(
lowerCAmelCase_, batched=lowerCAmelCase_, num_proc=data_args.preprocessing_num_workers, remove_columns=[text_column_name], load_from_cache_file=not data_args.overwrite_cache, )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
__lowerCAmelCase = add_chinese_references(tokenized_datasets['train'], data_args.train_ref_file )
if data_args.validation_ref_file is not None:
__lowerCAmelCase = add_chinese_references(
tokenized_datasets['validation'], data_args.validation_ref_file )
# If we have ref files, need to avoid it removed by trainer
__lowerCAmelCase = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
__lowerCAmelCase = False
# Data collator
# This one will take care of randomly masking the tokens.
__lowerCAmelCase = DataCollatorForWholeWordMask(tokenizer=lowerCAmelCase_, mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
__lowerCAmelCase = Trainer(
model=lowerCAmelCase_, args=lowerCAmelCase_, train_dataset=tokenized_datasets['train'] if training_args.do_train else None, eval_dataset=tokenized_datasets['validation'] if training_args.do_eval else None, tokenizer=lowerCAmelCase_, data_collator=lowerCAmelCase_, )
# Training
if training_args.do_train:
if last_checkpoint is not None:
__lowerCAmelCase = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ):
__lowerCAmelCase = model_args.model_name_or_path
else:
__lowerCAmelCase = None
__lowerCAmelCase = trainer.train(resume_from_checkpoint=lowerCAmelCase_ )
trainer.save_model() # Saves the tokenizer too for easy upload
__lowerCAmelCase = os.path.join(training_args.output_dir, 'train_results.txt' )
if trainer.is_world_process_zero():
with open(lowerCAmelCase_, 'w' ) as writer:
logger.info('***** Train results *****' )
for key, value in sorted(train_result.metrics.items() ):
logger.info(F""" {key} = {value}""" )
writer.write(F"""{key} = {value}\n""" )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir, 'trainer_state.json' ) )
# Evaluation
__lowerCAmelCase = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
__lowerCAmelCase = trainer.evaluate()
__lowerCAmelCase = math.exp(eval_output['eval_loss'] )
__lowerCAmelCase = perplexity
__lowerCAmelCase = os.path.join(training_args.output_dir, 'eval_results_mlm_wwm.txt' )
if trainer.is_world_process_zero():
with open(lowerCAmelCase_, 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in sorted(results.items() ):
logger.info(F""" {key} = {value}""" )
writer.write(F"""{key} = {value}\n""" )
return results
def a_ ( lowerCAmelCase_ : Tuple ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 53 | 1 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_lowerCAmelCase = logging.get_logger(__name__)
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = ["pixel_values"]
def __init__( self ,_A = True ,_A = None ,_A = PIL.Image.BICUBIC ,_A = True ,_A = None ,_A = 1 / 255 ,_A = True ,_A = True ,_A = None ,_A = None ,**_A ,):
'''simple docstring'''
super().__init__(**_A )
_lowerCAmelCase : int = size if size is not None else {'height': 256, 'width': 256}
_lowerCAmelCase : List[Any] = get_size_dict(_A )
_lowerCAmelCase : Optional[Any] = crop_size if crop_size is not None else {'height': 224, 'width': 224}
_lowerCAmelCase : int = get_size_dict(_A ,param_name='crop_size' )
_lowerCAmelCase : int = do_resize
_lowerCAmelCase : int = size
_lowerCAmelCase : str = resample
_lowerCAmelCase : Dict = do_center_crop
_lowerCAmelCase : Dict = crop_size
_lowerCAmelCase : Any = do_rescale
_lowerCAmelCase : int = rescale_factor
_lowerCAmelCase : int = do_normalize
_lowerCAmelCase : int = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_lowerCAmelCase : str = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __lowerCamelCase ( self ,_A ,_A ,_A = PIL.Image.BICUBIC ,_A = None ,**_A ,):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = get_size_dict(_A )
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" )
return resize(
_A ,size=(size['height'], size['width']) ,resample=_A ,data_format=_A ,**_A )
def __lowerCamelCase ( self ,_A ,_A ,_A = None ,**_A ,):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = get_size_dict(_A )
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" )
return center_crop(_A ,size=(size['height'], size['width']) ,data_format=_A ,**_A )
def __lowerCamelCase ( self ,_A ,_A ,_A = None ,**_A ,):
'''simple docstring'''
return rescale(_A ,scale=_A ,data_format=_A ,**_A )
def __lowerCamelCase ( self ,_A ,_A ,_A ,_A = None ,**_A ,):
'''simple docstring'''
return normalize(_A ,mean=_A ,std=_A ,data_format=_A ,**_A )
def __lowerCamelCase ( self ,_A ,_A = None ,_A = None ,_A=None ,_A = None ,_A = None ,_A = None ,_A = None ,_A = None ,_A = None ,_A = None ,_A = None ,_A = ChannelDimension.FIRST ,**_A ,):
'''simple docstring'''
_lowerCAmelCase : Any = do_resize if do_resize is not None else self.do_resize
_lowerCAmelCase : str = resample if resample is not None else self.resample
_lowerCAmelCase : List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
_lowerCAmelCase : Any = do_rescale if do_rescale is not None else self.do_rescale
_lowerCAmelCase : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowerCAmelCase : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
_lowerCAmelCase : Any = image_mean if image_mean is not None else self.image_mean
_lowerCAmelCase : List[str] = image_std if image_std is not None else self.image_std
_lowerCAmelCase : List[str] = size if size is not None else self.size
_lowerCAmelCase : List[str] = get_size_dict(_A )
_lowerCAmelCase : int = crop_size if crop_size is not None else self.crop_size
_lowerCAmelCase : str = get_size_dict(_A ,param_name='crop_size' )
_lowerCAmelCase : Dict = make_list_of_images(_A )
if not valid_images(_A ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
_lowerCAmelCase : Optional[int] = [to_numpy_array(_A ) for image in images]
if do_resize:
_lowerCAmelCase : int = [self.resize(image=_A ,size=_A ,resample=_A ) for image in images]
if do_center_crop:
_lowerCAmelCase : List[Any] = [self.center_crop(image=_A ,size=_A ) for image in images]
if do_rescale:
_lowerCAmelCase : Optional[Any] = [self.rescale(image=_A ,scale=_A ) for image in images]
if do_normalize:
_lowerCAmelCase : Union[str, Any] = [self.normalize(image=_A ,mean=_A ,std=_A ) for image in images]
_lowerCAmelCase : Optional[Any] = [to_channel_dimension_format(_A ,_A ) for image in images]
_lowerCAmelCase : Optional[int] = {'pixel_values': images}
return BatchFeature(data=_A ,tensor_type=_A )
| 721 |
"""simple docstring"""
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
_lowerCAmelCase = get_logger()
_lowerCAmelCase = None
class __UpperCamelCase ( TensorFormatter[Mapping, "jax.Array", Mapping] ):
def __init__( self ,_A=None ,_A=None ,**_A ):
'''simple docstring'''
super().__init__(features=_A )
import jax
from jaxlib.xla_client import Device
if isinstance(_A ,_A ):
raise ValueError(
F"""Expected {device} to be a `str` not {type(_A )}, as `jaxlib.xla_extension.Device` """
'is not serializable neither with `pickle` nor with `dill`. Instead you can surround '
'the device with `str()` to get its string identifier that will be internally mapped '
'to the actual `jaxlib.xla_extension.Device`.' )
_lowerCAmelCase : int = device if isinstance(_A ,_A ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
_lowerCAmelCase : Any = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
F"""Device with string identifier {self.device} not listed among the available """
F"""devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default """
F"""device: {str(jax.devices()[0] )}.""" )
_lowerCAmelCase : List[str] = str(jax.devices()[0] )
_lowerCAmelCase : int = jnp_array_kwargs
@staticmethod
def __lowerCamelCase ( ):
'''simple docstring'''
import jax
return {str(_A ): device for device in jax.devices()}
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
import jax
import jax.numpy as jnp
if isinstance(_A ,_A ) and column:
if all(
isinstance(_A ,jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(_A ,axis=0 )
return column
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
import jax
import jax.numpy as jnp
if isinstance(_A ,(str, bytes, type(_A )) ):
return value
elif isinstance(_A ,(np.character, np.ndarray) ) and np.issubdtype(value.dtype ,np.character ):
return value.tolist()
_lowerCAmelCase : Optional[Any] = {}
if isinstance(_A ,(np.number, np.ndarray) ) and np.issubdtype(value.dtype ,np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
_lowerCAmelCase : List[str] = {'dtype': jnp.intaa}
else:
_lowerCAmelCase : Tuple = {'dtype': jnp.intaa}
elif isinstance(_A ,(np.number, np.ndarray) ) and np.issubdtype(value.dtype ,np.floating ):
_lowerCAmelCase : Any = {'dtype': jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(_A ,PIL.Image.Image ):
_lowerCAmelCase : int = np.asarray(_A )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
_lowerCAmelCase : Optional[Any] = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(_A ,**{**default_dtype, **self.jnp_array_kwargs} )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(_A ,torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(_A ,'__array__' ) and not isinstance(_A ,jax.Array ):
_lowerCAmelCase : Optional[Any] = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(_A ,np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(_A ) for substruct in data_struct] )
elif isinstance(_A ,(list, tuple) ):
return self._consolidate([self.recursive_tensorize(_A ) for substruct in data_struct] )
return self._tensorize(_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return map_nested(self._recursive_tensorize ,_A ,map_list=_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.numpy_arrow_extractor().extract_row(_A )
_lowerCAmelCase : int = self.python_features_decoder.decode_row(_A )
return self.recursive_tensorize(_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Dict = self.numpy_arrow_extractor().extract_column(_A )
_lowerCAmelCase : List[Any] = self.python_features_decoder.decode_column(_A ,pa_table.column_names[0] )
_lowerCAmelCase : Optional[Any] = self.recursive_tensorize(_A )
_lowerCAmelCase : Optional[Any] = self._consolidate(_A )
return column
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.numpy_arrow_extractor().extract_batch(_A )
_lowerCAmelCase : Any = self.python_features_decoder.decode_batch(_A )
_lowerCAmelCase : str = self.recursive_tensorize(_A )
for column_name in batch:
_lowerCAmelCase : Optional[Any] = self._consolidate(batch[column_name] )
return batch
| 16 | 0 |
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = (DDIMParallelScheduler,)
UpperCAmelCase = (("eta", 0.0), ("num_inference_steps", 50))
def UpperCamelCase_ ( self : Dict , **_A : Any ):
_UpperCamelCase = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''clip_sample''': True,
}
config.update(**_A )
return config
def UpperCamelCase_ ( self : str , **_A : int ):
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config(**_A )
_UpperCamelCase = scheduler_class(**_A )
_UpperCamelCase , _UpperCamelCase = 10, 0.0
_UpperCamelCase = self.dummy_model()
_UpperCamelCase = self.dummy_sample_deter
scheduler.set_timesteps(_A )
for t in scheduler.timesteps:
_UpperCamelCase = model(_A , _A )
_UpperCamelCase = scheduler.step(_A , _A , _A , _A ).prev_sample
return sample
def UpperCamelCase_ ( self : List[str] ):
for timesteps in [100, 500, 1000]:
self.check_over_configs(num_train_timesteps=_A )
def UpperCamelCase_ ( self : Union[str, Any] ):
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=_A )
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config(steps_offset=1 )
_UpperCamelCase = scheduler_class(**_A )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([801, 601, 401, 201, 1] ) )
def UpperCamelCase_ ( self : Union[str, Any] ):
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=_A , beta_end=_A )
def UpperCamelCase_ ( self : Optional[int] ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_A )
def UpperCamelCase_ ( self : Tuple ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_A )
def UpperCamelCase_ ( self : Union[str, Any] ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_A )
def UpperCamelCase_ ( self : Tuple ):
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=_A )
def UpperCamelCase_ ( self : List[Any] ):
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=_A )
def UpperCamelCase_ ( self : Union[str, Any] ):
self.check_over_configs(thresholding=_A )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=_A , prediction_type=_A , sample_max_value=_A , )
def UpperCamelCase_ ( self : str ):
for t in [1, 10, 49]:
self.check_over_forward(time_step=_A )
def UpperCamelCase_ ( self : int ):
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 500] ):
self.check_over_forward(time_step=_A , num_inference_steps=_A )
def UpperCamelCase_ ( self : Dict ):
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=_A , eta=_A )
def UpperCamelCase_ ( self : List[Any] ):
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**_A )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(420 , 400 ) - 0.1_4771 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(980 , 960 ) - 0.3_2460 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 , 486 ) - 0.0_0979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 , 998 ) - 0.02 ) ) < 1e-5
def UpperCamelCase_ ( self : int ):
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**_A )
_UpperCamelCase , _UpperCamelCase = 10, 0.0
scheduler.set_timesteps(_A )
_UpperCamelCase = self.dummy_model()
_UpperCamelCase = self.dummy_sample_deter
_UpperCamelCase = self.dummy_sample_deter + 0.1
_UpperCamelCase = self.dummy_sample_deter - 0.1
_UpperCamelCase = samplea.shape[0]
_UpperCamelCase = torch.stack([samplea, samplea, samplea] , dim=0 )
_UpperCamelCase = torch.arange(_A )[0:3, None].repeat(1 , _A )
_UpperCamelCase = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
_UpperCamelCase = scheduler.batch_step_no_noise(_A , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , _A )
_UpperCamelCase = torch.sum(torch.abs(_A ) )
_UpperCamelCase = torch.mean(torch.abs(_A ) )
assert abs(result_sum.item() - 1147.7904 ) < 1e-2
assert abs(result_mean.item() - 0.4982 ) < 1e-3
def UpperCamelCase_ ( self : int ):
_UpperCamelCase = self.full_loop()
_UpperCamelCase = torch.sum(torch.abs(_A ) )
_UpperCamelCase = torch.mean(torch.abs(_A ) )
assert abs(result_sum.item() - 172.0067 ) < 1e-2
assert abs(result_mean.item() - 0.22_3967 ) < 1e-3
def UpperCamelCase_ ( self : str ):
_UpperCamelCase = self.full_loop(prediction_type='''v_prediction''' )
_UpperCamelCase = torch.sum(torch.abs(_A ) )
_UpperCamelCase = torch.mean(torch.abs(_A ) )
assert abs(result_sum.item() - 52.5302 ) < 1e-2
assert abs(result_mean.item() - 0.0684 ) < 1e-3
def UpperCamelCase_ ( self : Any ):
# We specify different beta, so that the first alpha is 0.99
_UpperCamelCase = self.full_loop(set_alpha_to_one=_A , beta_start=0.01 )
_UpperCamelCase = torch.sum(torch.abs(_A ) )
_UpperCamelCase = torch.mean(torch.abs(_A ) )
assert abs(result_sum.item() - 149.8295 ) < 1e-2
assert abs(result_mean.item() - 0.1951 ) < 1e-3
def UpperCamelCase_ ( self : List[Any] ):
# We specify different beta, so that the first alpha is 0.99
_UpperCamelCase = self.full_loop(set_alpha_to_one=_A , beta_start=0.01 )
_UpperCamelCase = torch.sum(torch.abs(_A ) )
_UpperCamelCase = torch.mean(torch.abs(_A ) )
assert abs(result_sum.item() - 149.0784 ) < 1e-2
assert abs(result_mean.item() - 0.1941 ) < 1e-3
| 10 | import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
_lowerCAmelCase = HfApi()
_lowerCAmelCase = {}
# fmt: off
_lowerCAmelCase = torch.tensor([
-0.7515, -1.6883, 0.2420, 0.0300, 0.6347, 1.3433, -1.1743, -3.7467,
1.2342, -2.2485, 0.4636, 0.8076, -0.7991, 0.3969, 0.8498, 0.9189,
-1.8887, -3.3522, 0.7639, 0.2040, 0.6271, -2.7148, -1.6316, 3.0839,
0.3186, 0.2721, -0.9759, -1.2461, 2.6257, 1.3557
])
_lowerCAmelCase = torch.tensor([
-2.3639, -2.5344, 0.0054, -0.6674, 1.5990, 1.0158, 0.3124, -2.1436,
1.8795, -2.5429, -0.1566, -0.3973, 1.2490, 2.6447, 1.2283, -0.5208,
-2.8154, -3.5119, 2.3838, 1.2033, 1.7201, -2.1256, -1.4576, 2.7948,
2.4204, -0.9752, -1.2546, 0.8027, 3.2758, 3.1365
])
_lowerCAmelCase = torch.tensor([
-0.6531, -0.6891, -0.3172, -0.5375, -0.9140, -0.5367, -0.1175, -0.7869,
-0.3808, -0.4513, -0.2098, -0.0083, 0.3183, 0.5140, 0.2247, -0.1304,
-0.1302, -0.2802, -0.2084, -0.2025, -0.4967, -0.4873, -0.0861, 0.6925,
0.0250, 0.1290, -0.1543, 0.6316, 1.0460, 1.4943
])
_lowerCAmelCase = torch.tensor([
0.0911, 0.1107, 0.0182, 0.0435, -0.0805, -0.0608, 0.0381, 0.2172,
-0.0280, 0.1327, -0.0299, -0.0255, -0.0050, -0.1170, -0.1046, 0.0309,
0.1367, 0.1728, -0.0533, -0.0748, -0.0534, 0.1624, 0.0384, -0.1805,
-0.0707, 0.0642, 0.0220, -0.0134, -0.1333, -0.1505
])
_lowerCAmelCase = torch.tensor([
0.1321, 0.1337, 0.0440, 0.0622, -0.0591, -0.0370, 0.0503, 0.2133,
-0.0177, 0.1415, -0.0116, -0.0112, 0.0044, -0.0980, -0.0789, 0.0395,
0.1502, 0.1785, -0.0488, -0.0514, -0.0404, 0.1539, 0.0454, -0.1559,
-0.0665, 0.0659, 0.0383, -0.0005, -0.1266, -0.1386
])
_lowerCAmelCase = torch.tensor([
0.1154, 0.1218, 0.0307, 0.0526, -0.0711, -0.0541, 0.0366, 0.2078,
-0.0267, 0.1317, -0.0226, -0.0193, -0.0014, -0.1055, -0.0902, 0.0330,
0.1391, 0.1709, -0.0562, -0.0693, -0.0560, 0.1482, 0.0381, -0.1683,
-0.0681, 0.0661, 0.0331, -0.0046, -0.1268, -0.1431
])
_lowerCAmelCase = torch.tensor([
0.1192, 0.1240, 0.0414, 0.0606, -0.0557, -0.0412, 0.0430, 0.2042,
-0.0200, 0.1385, -0.0115, -0.0132, 0.0017, -0.0965, -0.0802, 0.0398,
0.1433, 0.1747, -0.0458, -0.0533, -0.0407, 0.1545, 0.0419, -0.1574,
-0.0645, 0.0626, 0.0341, -0.0010, -0.1199, -0.1390
])
_lowerCAmelCase = torch.tensor([
0.1075, 0.1074, 0.0205, 0.0431, -0.0774, -0.0607, 0.0298, 0.2042,
-0.0320, 0.1267, -0.0281, -0.0250, -0.0064, -0.1091, -0.0946, 0.0290,
0.1328, 0.1650, -0.0580, -0.0738, -0.0586, 0.1440, 0.0337, -0.1746,
-0.0712, 0.0605, 0.0250, -0.0099, -0.1316, -0.1473
])
_lowerCAmelCase = torch.tensor([
-1.4572, -2.0481, -0.0414, -0.6005, 1.4136, 0.5848, 0.4028, -2.7330,
1.2212, -2.1228, 0.2155, 0.4039, 0.7662, 2.0535, 0.7477, -0.3243,
-2.1758, -2.7648, 1.6947, 0.7026, 1.2338, -1.6078, -0.8682, 2.2810,
1.8574, -0.5718, -0.5586, -0.0186, 2.3415, 2.1251])
_lowerCAmelCase = torch.tensor([
-1.3690, -1.9720, -0.4090, -0.6966, 1.4660, 0.9938, -0.1385, -2.7324,
0.7736, -1.8917, 0.2923, 0.4293, 0.1693, 1.4112, 1.1887, -0.3181,
-2.2160, -2.6381, 1.3170, 0.8163, 0.9240, -1.6544, -0.6099, 2.5259,
1.6430, -0.9090, -0.9392, -0.0126, 2.4268, 2.3266
])
_lowerCAmelCase = torch.tensor([
-1.3525, -1.9628, -0.3956, -0.6860, 1.4664, 1.0014, -0.1259, -2.7212,
0.7772, -1.8811, 0.2996, 0.4388, 0.1704, 1.4029, 1.1701, -0.3027,
-2.2053, -2.6287, 1.3350, 0.8131, 0.9274, -1.6292, -0.6098, 2.5131,
1.6505, -0.8958, -0.9298, -0.0151, 2.4257, 2.3355
])
_lowerCAmelCase = torch.tensor([
-2.0585, -2.7897, -0.2850, -0.8940, 1.9052, 0.5702, 0.6345, -3.8959,
1.5932, -3.2319, 0.1974, 0.0287, 1.7566, 2.6543, 0.8387, -0.5351,
-3.2736, -4.3375, 2.9029, 1.6390, 1.4640, -2.1701, -1.9013, 2.9341,
3.4981, -0.6255, -1.1644, -0.1591, 3.7097, 3.2066
])
_lowerCAmelCase = torch.tensor([
-2.3139, -2.5594, -0.0197, -0.6785, 1.7001, 1.1606, 0.3075, -2.1740,
1.8071, -2.5630, -0.0926, -0.3811, 1.2116, 2.6246, 1.2731, -0.5398,
-2.8153, -3.6140, 2.3893, 1.3262, 1.6258, -2.1856, -1.3267, 2.8395,
2.3779, -1.0623, -1.2468, 0.8959, 3.3367, 3.2243
])
_lowerCAmelCase = torch.tensor([
-2.0628, -2.7667, -0.2089, -0.8263, 2.0539, 0.5992, 0.6495, -3.8336,
1.6025, -3.2817, 0.1721, -0.0633, 1.7516, 2.7039, 0.8100, -0.5908,
-3.2113, -4.4343, 2.9257, 1.3632, 1.5562, -2.1489, -1.9894, 3.0560,
3.3396, -0.7328, -1.0417, 0.0383, 3.7093, 3.2343
])
_lowerCAmelCase = torch.tensor([
-1.4574, -2.0569, -0.0473, -0.6117, 1.4018, 0.5769, 0.4129, -2.7344,
1.2241, -2.1397, 0.2000, 0.3937, 0.7616, 2.0453, 0.7324, -0.3391,
-2.1746, -2.7744, 1.6963, 0.6921, 1.2187, -1.6172, -0.8877, 2.2439,
1.8471, -0.5839, -0.5605, -0.0464, 2.3250, 2.1219
])
# fmt: on
_lowerCAmelCase = api.list_models(filter="diffusers")
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
_lowerCAmelCase = "/home/patrick/google_checkpoints/" + mod.modelId.split("/")[-1]
print(f'Started running {mod.modelId}!!!')
if mod.modelId.startswith("CompVis"):
_lowerCAmelCase = UNetaDModel.from_pretrained(local_checkpoint, subfolder="unet")
else:
_lowerCAmelCase = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
_lowerCAmelCase = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
_lowerCAmelCase = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
_lowerCAmelCase = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results["_".join("_".join(mod.modelId.split("/")).split("-"))], atol=1E-3
)
print(f'{mod.modelId} has passed successfully!!!')
| 10 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ : Optional[int] = logging.get_logger(__name__)
UpperCamelCase_ : List[Any] = {
"""studio-ousia/luke-base""": """https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json""",
"""studio-ousia/luke-large""": """https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json""",
}
class _lowercase ( lowerCAmelCase ):
_a : Optional[int] = '''luke'''
def __init__( self : List[Any] , a : int=5_0_2_6_7 , a : Optional[int]=5_0_0_0_0_0 , a : Union[str, Any]=7_6_8 , a : List[str]=2_5_6 , a : Optional[Any]=1_2 , a : Dict=1_2 , a : List[str]=3_0_7_2 , a : List[str]="gelu" , a : Union[str, Any]=0.1 , a : str=0.1 , a : Union[str, Any]=5_1_2 , a : int=2 , a : List[str]=0.0_2 , a : Optional[int]=1e-12 , a : int=True , a : Dict=None , a : Any=1 , a : Any=0 , a : Optional[int]=2 , **a : Optional[Any] , ):
"""simple docstring"""
super().__init__(pad_token_id=a , bos_token_id=a , eos_token_id=a , **a )
__snake_case : Tuple =vocab_size
__snake_case : Dict =entity_vocab_size
__snake_case : str =hidden_size
__snake_case : int =entity_emb_size
__snake_case : List[str] =num_hidden_layers
__snake_case : Any =num_attention_heads
__snake_case : Union[str, Any] =hidden_act
__snake_case : str =intermediate_size
__snake_case : Union[str, Any] =hidden_dropout_prob
__snake_case : List[Any] =attention_probs_dropout_prob
__snake_case : Any =max_position_embeddings
__snake_case : Tuple =type_vocab_size
__snake_case : Tuple =initializer_range
__snake_case : List[str] =layer_norm_eps
__snake_case : Tuple =use_entity_aware_attention
__snake_case : Any =classifier_dropout
| 717 |
"""simple docstring"""
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class _lowercase ( lowerCAmelCase ):
_a : Optional[Any] = ['''vqvae''']
def __init__( self : Optional[Any] , a : AutoencoderKL , a : UNetaDConditionModel , a : Mel , a : Union[DDIMScheduler, DDPMScheduler] , ):
"""simple docstring"""
super().__init__()
self.register_modules(unet=a , scheduler=a , mel=a , vqvae=a )
def _UpperCamelCase ( self : Any ):
"""simple docstring"""
return 5_0 if isinstance(self.scheduler , a ) else 1_0_0_0
@torch.no_grad()
def __call__( self : Optional[int] , a : int = 1 , a : str = None , a : np.ndarray = None , a : int = 0 , a : int = 0 , a : int = None , a : torch.Generator = None , a : float = 0 , a : float = 0 , a : torch.Generator = None , a : float = 0 , a : torch.Tensor = None , a : torch.Tensor = None , a : Optional[int]=True , ):
"""simple docstring"""
__snake_case : List[Any] =steps or self.get_default_steps()
self.scheduler.set_timesteps(a )
__snake_case : int =step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
__snake_case : Any =(self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
__snake_case : List[str] =randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=a , device=self.device , )
__snake_case : int =noise
__snake_case : List[str] =None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(a , a )
__snake_case : List[Any] =self.mel.audio_slice_to_image(a )
__snake_case : Tuple =np.frombuffer(input_image.tobytes() , dtype='''uint8''' ).reshape(
(input_image.height, input_image.width) )
__snake_case : str =(input_image / 2_5_5) * 2 - 1
__snake_case : Optional[int] =torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
__snake_case : Tuple =self.vqvae.encode(torch.unsqueeze(a , 0 ) ).latent_dist.sample(
generator=a )[0]
__snake_case : Optional[int] =self.vqvae.config.scaling_factor * input_images
if start_step > 0:
__snake_case : str =self.scheduler.add_noise(a , a , self.scheduler.timesteps[start_step - 1] )
__snake_case : Optional[Any] =(
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
__snake_case : int =int(mask_start_secs * pixels_per_second )
__snake_case : Any =int(mask_end_secs * pixels_per_second )
__snake_case : Union[str, Any] =self.scheduler.add_noise(a , a , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , a ):
__snake_case : List[str] =self.unet(a , a , a )['''sample''']
else:
__snake_case : Union[str, Any] =self.unet(a , a )['''sample''']
if isinstance(self.scheduler , a ):
__snake_case : List[str] =self.scheduler.step(
model_output=a , timestep=a , sample=a , eta=a , generator=a , )['''prev_sample''']
else:
__snake_case : List[Any] =self.scheduler.step(
model_output=a , timestep=a , sample=a , generator=a , )['''prev_sample''']
if mask is not None:
if mask_start > 0:
__snake_case : Any =mask[:, step, :, :mask_start]
if mask_end > 0:
__snake_case : Optional[Any] =mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
__snake_case : str =1 / self.vqvae.config.scaling_factor * images
__snake_case : Optional[int] =self.vqvae.decode(a )['''sample''']
__snake_case : int =(images / 2 + 0.5).clamp(0 , 1 )
__snake_case : Optional[Any] =images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
__snake_case : Optional[int] =(images * 2_5_5).round().astype('''uint8''' )
__snake_case : Tuple =list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(a , mode='''RGB''' ).convert('''L''' ) for _ in images) )
__snake_case : Union[str, Any] =[self.mel.image_to_audio(a ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(a )[:, np.newaxis, :] ) , **ImagePipelineOutput(a ) )
@torch.no_grad()
def _UpperCamelCase ( self : int , a : List[Image.Image] , a : int = 5_0 ):
"""simple docstring"""
assert isinstance(self.scheduler , a )
self.scheduler.set_timesteps(a )
__snake_case : Dict =np.array(
[np.frombuffer(image.tobytes() , dtype='''uint8''' ).reshape((1, image.height, image.width) ) for image in images] )
__snake_case : Any =(sample / 2_5_5) * 2 - 1
__snake_case : Any =torch.Tensor(a ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
__snake_case : Union[str, Any] =t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
__snake_case : Dict =self.scheduler.alphas_cumprod[t]
__snake_case : str =(
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
__snake_case : int =1 - alpha_prod_t
__snake_case : Tuple =self.unet(a , a )['''sample''']
__snake_case : str =(1 - alpha_prod_t_prev) ** 0.5 * model_output
__snake_case : Dict =(sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
__snake_case : Optional[int] =sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def _UpperCamelCase ( a : torch.Tensor , a : torch.Tensor , a : float ):
"""simple docstring"""
__snake_case : List[Any] =acos(torch.dot(torch.flatten(a ) , torch.flatten(a ) ) / torch.norm(a ) / torch.norm(a ) )
return sin((1 - alpha) * theta ) * xa / sin(a ) + sin(alpha * theta ) * xa / sin(a )
| 497 | 0 |
import math
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> bool:
'''simple docstring'''
__UpperCAmelCase : str = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(lowercase_ )
def __SCREAMING_SNAKE_CASE ( lowercase_ = 1 / 12345 ) -> int:
'''simple docstring'''
__UpperCAmelCase : Tuple = 0
__UpperCAmelCase : List[str] = 0
__UpperCAmelCase : List[Any] = 3
while True:
__UpperCAmelCase : List[Any] = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(lowercase_ ):
__UpperCAmelCase : List[Any] = int(lowercase_ )
total_partitions += 1
if check_partition_perfect(lowercase_ ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(lowercase_ )
integer += 1
if __name__ == "__main__":
print(F'{solution() = }')
| 462 |
import unittest
from knapsack import greedy_knapsack as kp
class lowerCamelCase ( unittest.TestCase ):
def A( self):
__UpperCAmelCase : Optional[Any] = [1_0, 2_0, 3_0, 4_0, 5_0, 6_0]
__UpperCAmelCase : str = [2, 4, 6, 8, 1_0, 1_2]
__UpperCAmelCase : List[Any] = 1_0_0
self.assertEqual(kp.calc_profit(lowercase__ , lowercase__ , lowercase__) , 2_1_0)
def A( self):
self.assertRaisesRegex(lowercase__ , '''max_weight must greater than zero.''')
def A( self):
self.assertRaisesRegex(lowercase__ , '''Weight can not be negative.''')
def A( self):
self.assertRaisesRegex(lowercase__ , '''Profit can not be negative.''')
def A( self):
self.assertRaisesRegex(lowercase__ , '''max_weight must greater than zero.''')
def A( self):
self.assertRaisesRegex(
lowercase__ , '''The length of profit and weight must be same.''')
if __name__ == "__main__":
unittest.main()
| 462 | 1 |
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class __lowercase :
def __init__( self : Dict , __lowerCamelCase : int , __lowerCamelCase : str=9_9 , __lowerCamelCase : Any=1_3 , __lowerCamelCase : Any=7 , __lowerCamelCase : Union[str, Any]=9 , __lowerCamelCase : Any=True , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : str=False , __lowerCamelCase : Tuple=3_2 , __lowerCamelCase : Dict=5 , __lowerCamelCase : Optional[Any]=4 , __lowerCamelCase : Union[str, Any]=3_7 , __lowerCamelCase : int=8 , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : Tuple=0.002 , __lowerCamelCase : str=1 , __lowerCamelCase : Optional[int]=0 , __lowerCamelCase : Tuple=0 , __lowerCamelCase : int=None , __lowerCamelCase : List[str]=None , ) -> Any:
"""simple docstring"""
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = encoder_seq_length
UpperCAmelCase = decoder_seq_length
# For common tests
UpperCAmelCase = self.decoder_seq_length
UpperCAmelCase = is_training
UpperCAmelCase = use_attention_mask
UpperCAmelCase = use_labels
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = d_ff
UpperCAmelCase = relative_attention_num_buckets
UpperCAmelCase = dropout_rate
UpperCAmelCase = initializer_factor
UpperCAmelCase = eos_token_id
UpperCAmelCase = pad_token_id
UpperCAmelCase = decoder_start_token_id
UpperCAmelCase = None
UpperCAmelCase = decoder_layers
def _lowercase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
return TaConfig.from_pretrained("""google/umt5-base""" )
def _lowercase ( self : Optional[int] , __lowerCamelCase : Tuple , __lowerCamelCase : Dict , __lowerCamelCase : Any , __lowerCamelCase : Any=None , __lowerCamelCase : List[Any]=None , __lowerCamelCase : str=None , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : Optional[int]=None , ) -> Any:
"""simple docstring"""
if attention_mask is None:
UpperCAmelCase = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
UpperCAmelCase = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
UpperCAmelCase = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=_a )
if decoder_head_mask is None:
UpperCAmelCase = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=_a )
if cross_attn_head_mask is None:
UpperCAmelCase = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=_a )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def _lowercase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
UpperCAmelCase = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
UpperCAmelCase = input_ids.clamp(self.pad_token_id + 1 )
UpperCAmelCase = decoder_input_ids.clamp(self.pad_token_id + 1 )
UpperCAmelCase = self.get_config()
UpperCAmelCase = config.num_attention_heads
UpperCAmelCase = self.prepare_inputs_dict(_a , _a , _a )
return config, input_dict
def _lowercase ( self : Dict ) -> Any:
"""simple docstring"""
UpperCAmelCase = self.prepare_config_and_inputs()
return config, inputs_dict
def _lowercase ( self : str ) -> List[str]:
"""simple docstring"""
return TaConfig(
vocab_size=1_6_6 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def _lowercase ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def _lowercase ( self : str , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any , __lowerCamelCase : Optional[int] , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[Any] , ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = UMTaModel(config=_a )
model.to(_a )
model.eval()
UpperCAmelCase = model(
input_ids=_a , decoder_input_ids=_a , attention_mask=_a , decoder_attention_mask=_a , )
UpperCAmelCase = model(input_ids=_a , decoder_input_ids=_a )
UpperCAmelCase = result.last_hidden_state
UpperCAmelCase = result.past_key_values
UpperCAmelCase = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(_a ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def _lowercase ( self : Dict , __lowerCamelCase : int , __lowerCamelCase : Any , __lowerCamelCase : Dict , __lowerCamelCase : Any , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Union[str, Any] , ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = UMTaModel(config=_a ).get_decoder().to(_a ).eval()
# first forward pass
UpperCAmelCase = model(_a , use_cache=_a )
UpperCAmelCase = model(_a )
UpperCAmelCase = model(_a , use_cache=_a )
self.parent.assertTrue(len(_a ) == len(_a ) )
self.parent.assertTrue(len(_a ) == len(_a ) + 1 )
UpperCAmelCase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCAmelCase = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
UpperCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCAmelCase = model(_a )["""last_hidden_state"""]
UpperCAmelCase = model(_a , past_key_values=_a )["""last_hidden_state"""]
# select random slice
UpperCAmelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCAmelCase = output_from_no_past[:, -1, random_slice_idx].detach()
UpperCAmelCase = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_a , _a , atol=1e-3 ) )
def _lowercase ( self : Tuple , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[Any] , ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = UMTaModel(config=_a ).to(_a ).half().eval()
UpperCAmelCase = model(**_a )["""last_hidden_state"""]
self.parent.assertFalse(torch.isnan(_a ).any().item() )
@require_torch
class __lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
UpperCamelCase = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
UpperCamelCase = (UMTaForConditionalGeneration,) if is_torch_available() else ()
UpperCamelCase = (
{
"""conversational""": UMTaForConditionalGeneration,
"""feature-extraction""": UMTaModel,
"""summarization""": UMTaForConditionalGeneration,
"""text2text-generation""": UMTaForConditionalGeneration,
"""translation""": UMTaForConditionalGeneration,
"""question-answering""": UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
UpperCamelCase = True
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = True
UpperCamelCase = True
# The small UMT5 model needs higher percentages for CPU/MP tests
UpperCamelCase = [0.8, 0.9]
def _lowercase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase = UMTaModelTester(self )
@unittest.skip("""Test has a segmentation fault on torch 1.8.0""" )
def _lowercase ( self : int ) -> Any:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
UpperCAmelCase = UMTaModel(config_and_inputs[0] ).to(_a )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
_a , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , F"""{tmpdirname}/t5_test.onnx""" , export_params=_a , opset_version=9 , input_names=["""input_ids""", """decoder_input_ids"""] , )
@unittest.skipIf(torch_device == """cpu""" , """Cant do half precision""" )
def _lowercase ( self : str ) -> str:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*_a )
def _lowercase ( self : Dict ) -> List[str]:
"""simple docstring"""
UpperCAmelCase = ["""encoder_attentions""", """decoder_attentions""", """cross_attentions"""]
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
UpperCAmelCase = config_and_inputs[0]
UpperCAmelCase = UMTaForConditionalGeneration(_a ).eval()
model.to(_a )
UpperCAmelCase = {
"""head_mask""": torch.zeros(config.num_layers , config.num_heads , device=_a ),
"""decoder_head_mask""": torch.zeros(config.num_decoder_layers , config.num_heads , device=_a ),
"""cross_attn_head_mask""": torch.zeros(config.num_decoder_layers , config.num_heads , device=_a ),
}
for attn_name, (name, mask) in zip(_a , head_masking.items() ):
UpperCAmelCase = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
UpperCAmelCase = torch.ones(
config.num_decoder_layers , config.num_heads , device=_a )
UpperCAmelCase = model.generate(
config_and_inputs[1]["""input_ids"""] , num_beams=1 , max_length=3 , output_attentions=_a , return_dict_in_generate=_a , **_a , )
# We check the state of decoder_attentions and cross_attentions just from the last step
UpperCAmelCase = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip("""Does not work on the tiny model as we keep hitting edge cases.""" )
def _lowercase ( self : List[Any] ) -> Dict:
"""simple docstring"""
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class __lowercase ( unittest.TestCase ):
@slow
@unittest.skip(
"""Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged""" )
def _lowercase ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = UMTaForConditionalGeneration.from_pretrained("""google/umt5-small""" , return_dict=_a ).to(_a )
UpperCAmelCase = AutoTokenizer.from_pretrained("""google/umt5-small""" , use_fast=_a , legacy=_a )
UpperCAmelCase = [
"""Bonjour monsieur <extra_id_0> bien <extra_id_1>.""",
"""No se como puedo <extra_id_0>.""",
"""This is the reason why we <extra_id_0> them.""",
"""The <extra_id_0> walks in <extra_id_1>, seats""",
"""A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.""",
]
UpperCAmelCase = tokenizer(_a , return_tensors="""pt""" , padding=_a ).input_ids
# fmt: off
UpperCAmelCase = torch.tensor(
[
[ 3_8_5_3_0, 2_1_0_7_0_3, 2_5_6_2_9_9, 1_4_1_0, 2_5_6_2_9_8, 2_7_4, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 8_2_6, 3_2_1, 6_7_1, 2_5_9_2_2, 2_5_6_2_9_9, 2_7_4, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1_4_6_0, 3_3_9, 3_1_2, 1_9_0_1_4, 1_0_6_2_0, 7_5_8, 2_5_6_2_9_9, 2_3_5_5,2_7_4, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 5_1_7, 2_5_6_2_9_9, 1_4_8_6_9, 2_8_1, 3_0_1, 2_5_6_2_9_8, 2_7_5, 1_1_9_9_8_3,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 3_2_0, 2_5_6_2_9_9, 1_4_8_6_9, 2_8_1, 2_2_3_4, 2_8_9, 2_2_7_5, 3_3_3,6_1_3_9_1, 2_8_9, 2_5_6_2_9_8, 5_4_3, 2_5_6_2_9_7, 1_6_8_7_1_4, 3_2_9, 2_5_6_2_9_6,2_7_4, 1],
] )
# fmt: on
torch.testing.assert_allclose(_a , _a )
UpperCAmelCase = model.generate(input_ids.to(_a ) )
UpperCAmelCase = [
"""<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>""",
"""<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
"""<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
"""<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
"""<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
]
UpperCAmelCase = tokenizer.batch_decode(_a )
self.assertEqual(_a , _a )
| 712 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
__a = {"""tokenization_herbert""": ["""HerbertTokenizer"""]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ["""HerbertTokenizerFast"""]
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
__a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 627 | 0 |
"""simple docstring"""
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> float:
'''simple docstring'''
return price * (1 + tax_rate)
if __name__ == "__main__":
print(f'''{price_plus_tax(100, 0.25) = }''')
print(f'''{price_plus_tax(125.50, 0.05) = }''') | 46 |
"""simple docstring"""
from __future__ import annotations
from typing import Any
class snake_case :
def __init__( self :Optional[Any] , _lowerCamelCase :int ):
__SCREAMING_SNAKE_CASE : int = num_of_nodes
__SCREAMING_SNAKE_CASE : list[list[int]] = []
__SCREAMING_SNAKE_CASE : dict[int, int] = {}
def SCREAMING_SNAKE_CASE_ ( self :int , _lowerCamelCase :int , _lowerCamelCase :int , _lowerCamelCase :int ):
self.m_edges.append([u_node, v_node, weight] )
def SCREAMING_SNAKE_CASE_ ( self :Tuple , _lowerCamelCase :int ):
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def SCREAMING_SNAKE_CASE_ ( self :List[str] , _lowerCamelCase :int ):
if self.m_component[u_node] != u_node:
for k in self.m_component:
__SCREAMING_SNAKE_CASE : Optional[Any] = self.find_component(_lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self :Union[str, Any] , _lowerCamelCase :list[int] , _lowerCamelCase :int , _lowerCamelCase :int ):
if component_size[u_node] <= component_size[v_node]:
__SCREAMING_SNAKE_CASE : List[Any] = v_node
component_size[v_node] += component_size[u_node]
self.set_component(_lowerCamelCase )
elif component_size[u_node] >= component_size[v_node]:
__SCREAMING_SNAKE_CASE : Dict = self.find_component(_lowerCamelCase )
component_size[u_node] += component_size[v_node]
self.set_component(_lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self :List[str] ):
__SCREAMING_SNAKE_CASE : Optional[int] = []
__SCREAMING_SNAKE_CASE : str = 0
__SCREAMING_SNAKE_CASE : list[Any] = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
__SCREAMING_SNAKE_CASE : str = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = edge
__SCREAMING_SNAKE_CASE : Optional[Any] = self.m_component[u]
__SCREAMING_SNAKE_CASE : int = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
__SCREAMING_SNAKE_CASE : Optional[Any] = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : int = edge
__SCREAMING_SNAKE_CASE : Tuple = self.m_component[u]
__SCREAMING_SNAKE_CASE : int = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
print(f'''Added edge [{u} - {v}]\nAdded weight: {w}\n''' )
num_of_components -= 1
__SCREAMING_SNAKE_CASE : Optional[Any] = [-1] * self.m_num_of_nodes
print(f'''The total weight of the minimal spanning tree is: {mst_weight}''' )
def lowerCAmelCase_ ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 674 | 0 |
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowercase__ ( __A ):
def __init__( self , _lowercase , _lowercase=13 , _lowercase=7 , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=False , _lowercase=False , _lowercase=False , _lowercase=2 , _lowercase=99 , _lowercase=0 , _lowercase=32 , _lowercase=5 , _lowercase=4 , _lowercase=0.1 , _lowercase=0.1 , _lowercase=512 , _lowercase=12 , _lowercase=2 , _lowercase=0.02 , _lowercase=3 , _lowercase=4 , _lowercase="last" , _lowercase=None , _lowercase=None , ):
lowerCAmelCase_ : Optional[Any] = parent
lowerCAmelCase_ : List[Any] = batch_size
lowerCAmelCase_ : str = seq_length
lowerCAmelCase_ : Optional[int] = is_training
lowerCAmelCase_ : List[str] = use_input_lengths
lowerCAmelCase_ : Any = use_token_type_ids
lowerCAmelCase_ : Optional[Any] = use_labels
lowerCAmelCase_ : List[Any] = gelu_activation
lowerCAmelCase_ : int = sinusoidal_embeddings
lowerCAmelCase_ : Any = causal
lowerCAmelCase_ : Optional[int] = asm
lowerCAmelCase_ : str = n_langs
lowerCAmelCase_ : Any = vocab_size
lowerCAmelCase_ : Optional[Any] = n_special
lowerCAmelCase_ : Optional[Any] = hidden_size
lowerCAmelCase_ : List[Any] = num_hidden_layers
lowerCAmelCase_ : List[Any] = num_attention_heads
lowerCAmelCase_ : Optional[int] = hidden_dropout_prob
lowerCAmelCase_ : Optional[int] = attention_probs_dropout_prob
lowerCAmelCase_ : Optional[int] = max_position_embeddings
lowerCAmelCase_ : Optional[Any] = type_vocab_size
lowerCAmelCase_ : Optional[int] = type_sequence_label_size
lowerCAmelCase_ : Union[str, Any] = initializer_range
lowerCAmelCase_ : Dict = num_labels
lowerCAmelCase_ : Any = num_choices
lowerCAmelCase_ : str = summary_type
lowerCAmelCase_ : List[Any] = use_proj
lowerCAmelCase_ : Optional[Any] = scope
def UpperCAmelCase__ ( self ):
lowerCAmelCase_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase_ : Any = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase_ : List[str] = None
if self.use_input_lengths:
lowerCAmelCase_ : Optional[int] = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
lowerCAmelCase_ : Dict = None
if self.use_token_type_ids:
lowerCAmelCase_ : str = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
lowerCAmelCase_ : int = None
lowerCAmelCase_ : Union[str, Any] = None
lowerCAmelCase_ : Dict = None
if self.use_labels:
lowerCAmelCase_ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase_ : Dict = ids_tensor([self.batch_size] , 2 ).float()
lowerCAmelCase_ : int = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase_ : List[Any] = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def UpperCAmelCase__ ( self ):
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , ):
lowerCAmelCase_ : List[Any] = FlaubertModel(config=_lowercase )
model.to(_lowercase )
model.eval()
lowerCAmelCase_ : Dict = model(_lowercase , lengths=_lowercase , langs=_lowercase )
lowerCAmelCase_ : Optional[int] = model(_lowercase , langs=_lowercase )
lowerCAmelCase_ : Any = model(_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , ):
lowerCAmelCase_ : List[Any] = FlaubertWithLMHeadModel(_lowercase )
model.to(_lowercase )
model.eval()
lowerCAmelCase_ : Optional[Any] = model(_lowercase , token_type_ids=_lowercase , labels=_lowercase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , ):
lowerCAmelCase_ : int = FlaubertForQuestionAnsweringSimple(_lowercase )
model.to(_lowercase )
model.eval()
lowerCAmelCase_ : Optional[int] = model(_lowercase )
lowerCAmelCase_ : int = model(_lowercase , start_positions=_lowercase , end_positions=_lowercase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , ):
lowerCAmelCase_ : int = FlaubertForQuestionAnswering(_lowercase )
model.to(_lowercase )
model.eval()
lowerCAmelCase_ : Optional[Any] = model(_lowercase )
lowerCAmelCase_ : str = model(
_lowercase , start_positions=_lowercase , end_positions=_lowercase , cls_index=_lowercase , is_impossible=_lowercase , p_mask=_lowercase , )
lowerCAmelCase_ : Optional[Any] = model(
_lowercase , start_positions=_lowercase , end_positions=_lowercase , cls_index=_lowercase , is_impossible=_lowercase , )
((lowerCAmelCase_) , ) : int = result_with_labels.to_tuple()
lowerCAmelCase_ : Tuple = model(_lowercase , start_positions=_lowercase , end_positions=_lowercase )
((lowerCAmelCase_) , ) : List[Any] = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , ):
lowerCAmelCase_ : List[str] = FlaubertForSequenceClassification(_lowercase )
model.to(_lowercase )
model.eval()
lowerCAmelCase_ : int = model(_lowercase )
lowerCAmelCase_ : Dict = model(_lowercase , labels=_lowercase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , ):
lowerCAmelCase_ : Dict = self.num_labels
lowerCAmelCase_ : Tuple = FlaubertForTokenClassification(_lowercase )
model.to(_lowercase )
model.eval()
lowerCAmelCase_ : List[Any] = model(_lowercase , attention_mask=_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , ):
lowerCAmelCase_ : str = self.num_choices
lowerCAmelCase_ : Optional[int] = FlaubertForMultipleChoice(config=_lowercase )
model.to(_lowercase )
model.eval()
lowerCAmelCase_ : Tuple = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase_ : int = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase_ : Optional[int] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase_ : Optional[int] = model(
_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase__ ( self ):
lowerCAmelCase_ : Tuple = self.prepare_config_and_inputs()
(
(
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) ,
) : List[str] = config_and_inputs
lowerCAmelCase_ : Union[str, Any] = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""lengths""": input_lengths,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_torch
class lowercase__ ( __A , __A , unittest.TestCase ):
__UpperCamelCase = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
__UpperCamelCase = (
{
"""feature-extraction""": FlaubertModel,
"""fill-mask""": FlaubertWithLMHeadModel,
"""question-answering""": FlaubertForQuestionAnsweringSimple,
"""text-classification""": FlaubertForSequenceClassification,
"""token-classification""": FlaubertForTokenClassification,
"""zero-shot""": FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("""Fast""" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase=False ):
lowerCAmelCase_ : List[str] = super()._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
lowerCAmelCase_ : Tuple = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_lowercase )
lowerCAmelCase_ : Any = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_lowercase )
return inputs_dict
def UpperCAmelCase__ ( self ):
lowerCAmelCase_ : Union[str, Any] = FlaubertModelTester(self )
lowerCAmelCase_ : int = ConfigTester(self , config_class=_lowercase , emb_dim=37 )
def UpperCAmelCase__ ( self ):
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self ):
lowerCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*_lowercase )
def UpperCAmelCase__ ( self ):
lowerCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*_lowercase )
def UpperCAmelCase__ ( self ):
lowerCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*_lowercase )
def UpperCAmelCase__ ( self ):
lowerCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*_lowercase )
def UpperCAmelCase__ ( self ):
lowerCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*_lowercase )
def UpperCAmelCase__ ( self ):
lowerCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*_lowercase )
def UpperCAmelCase__ ( self ):
lowerCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*_lowercase )
@slow
def UpperCAmelCase__ ( self ):
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ : Optional[Any] = FlaubertModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
@slow
@require_torch_gpu
def UpperCAmelCase__ ( self ):
lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
lowerCAmelCase_ : List[Any] = True
lowerCAmelCase_ : Optional[int] = model_class(config=_lowercase )
lowerCAmelCase_ : Any = self._prepare_for_class(_lowercase , _lowercase )
lowerCAmelCase_ : Optional[Any] = torch.jit.trace(
_lowercase , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(_lowercase , os.path.join(_lowercase , """traced_model.pt""" ) )
lowerCAmelCase_ : Any = torch.jit.load(os.path.join(_lowercase , """traced_model.pt""" ) , map_location=_lowercase )
loaded(inputs_dict["""input_ids"""].to(_lowercase ) , inputs_dict["""attention_mask"""].to(_lowercase ) )
@require_torch
class lowercase__ ( unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self ):
lowerCAmelCase_ : Tuple = FlaubertModel.from_pretrained("""flaubert/flaubert_base_cased""" )
lowerCAmelCase_ : List[Any] = torch.tensor([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
with torch.no_grad():
lowerCAmelCase_ : Dict = model(_lowercase )[0]
lowerCAmelCase_ : Tuple = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , _lowercase )
lowerCAmelCase_ : Dict = torch.tensor(
[[[-2.6251, -1.4298, -0.0227], [-2.8510, -1.6387, 0.2258], [-2.8114, -1.1832, -0.3066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _lowercase , atol=1e-4 ) )
| 440 |
def _lowerCAmelCase ( _a : int , _a : bool = False ) -> bool:
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 10 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3_31_70_44_06_46_79_88_73_85_96_19_81 and not allow_probable:
raise ValueError(
"""Warning: upper bound of deterministic test is exceeded. """
"""Pass allow_probable=True to allow probabilistic test. """
"""A return value of True indicates a probable prime.""" )
# array bounds provided by analysis
lowerCAmelCase_ : Optional[int] = [
20_47,
1_37_36_53,
25_32_60_01,
32_15_03_17_51,
2_15_23_02_89_87_47,
3_47_47_49_66_03_83,
3_41_55_00_71_72_83_21,
1,
3_82_51_23_05_65_46_41_30_51,
1,
1,
31_86_65_85_78_34_03_11_51_16_74_61,
3_31_70_44_06_46_79_88_73_85_96_19_81,
]
lowerCAmelCase_ : Dict = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41]
for idx, _p in enumerate(_a , 1 ):
if n < _p:
# then we have our last prime to check
lowerCAmelCase_ : str = primes[:idx]
break
lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
lowerCAmelCase_ : Tuple = False
for r in range(_a ):
lowerCAmelCase_ : Tuple = pow(_a , d * 2**r , _a )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
lowerCAmelCase_ : List[Any] = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def _lowerCAmelCase ( ) -> None:
assert not miller_rabin(5_61 )
assert miller_rabin(5_63 )
# 2047
assert not miller_rabin(83_82_01 )
assert miller_rabin(83_82_07 )
# 1_373_653
assert not miller_rabin(17_31_60_01 )
assert miller_rabin(17_31_60_17 )
# 25_326_001
assert not miller_rabin(30_78_38_66_41 )
assert miller_rabin(30_78_38_66_53 )
# 3_215_031_751
assert not miller_rabin(1_71_30_45_57_48_01 )
assert miller_rabin(1_71_30_45_57_48_19 )
# 2_152_302_898_747
assert not miller_rabin(2_77_97_99_72_83_07 )
assert miller_rabin(2_77_97_99_72_83_27 )
# 3_474_749_660_383
assert not miller_rabin(1_13_85_00_23_90_94_41 )
assert miller_rabin(1_13_85_00_23_90_95_27 )
# 341_550_071_728_321
assert not miller_rabin(1_27_50_41_01_88_48_80_43_51 )
assert miller_rabin(1_27_50_41_01_88_48_80_43_91 )
# 3_825_123_056_546_413_051
assert not miller_rabin(7_96_66_46_44_58_50_77_87_79_18_67 )
assert miller_rabin(7_96_66_46_44_58_50_77_87_79_19_51 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(55_28_40_67_74_46_64_78_97_66_03_33 )
assert miller_rabin(55_28_40_67_74_46_64_78_97_66_03_59 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin()
| 440 | 1 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__magic_name__ : Tuple = logging.get_logger(__name__)
__magic_name__ : Union[str, Any] = {"""tokenizer_file""": """tokenizer.json"""}
__magic_name__ : str = {
"""tokenizer_file""": {
"""bigscience/tokenizer""": """https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json""",
"""bigscience/bloom-560m""": """https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json""",
"""bigscience/bloom-1b1""": """https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json""",
"""bigscience/bloom-1b7""": """https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json""",
"""bigscience/bloom-3b""": """https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json""",
"""bigscience/bloom-7b1""": """https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json""",
"""bigscience/bloom""": """https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json""",
},
}
class SCREAMING_SNAKE_CASE__ (_a ):
lowercase_ : Optional[int] = VOCAB_FILES_NAMES
lowercase_ : Dict = PRETRAINED_VOCAB_FILES_MAP
lowercase_ : Union[str, Any] = ["input_ids", "attention_mask"]
lowercase_ : Optional[int] = None
def __init__( self : Union[str, Any] , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : Tuple=None , __lowerCamelCase : int=None , __lowerCamelCase : List[str]="<unk>" , __lowerCamelCase : Any="<s>" , __lowerCamelCase : Dict="</s>" , __lowerCamelCase : str="<pad>" , __lowerCamelCase : List[Any]=False , __lowerCamelCase : Any=False , **__lowerCamelCase : Union[str, Any] , ):
"""simple docstring"""
super().__init__(
__lowerCamelCase , __lowerCamelCase , tokenizer_file=__lowerCamelCase , unk_token=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , pad_token=__lowerCamelCase , add_prefix_space=__lowerCamelCase , clean_up_tokenization_spaces=__lowerCamelCase , **__lowerCamelCase , )
lowerCAmelCase__ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , __lowerCamelCase ) != add_prefix_space:
lowerCAmelCase__ = getattr(__lowerCamelCase , pre_tok_state.pop('''type''' ) )
lowerCAmelCase__ = add_prefix_space
lowerCAmelCase__ = pre_tok_class(**__lowerCamelCase )
lowerCAmelCase__ = add_prefix_space
def A__ ( self : Union[str, Any] , *__lowerCamelCase : List[str] , **__lowerCamelCase : List[Any] ):
"""simple docstring"""
lowerCAmelCase__ = kwargs.get('''is_split_into_words''' , __lowerCamelCase )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"""
''' pretokenized inputs.''' )
return super()._batch_encode_plus(*__lowerCamelCase , **__lowerCamelCase )
def A__ ( self : List[Any] , *__lowerCamelCase : Dict , **__lowerCamelCase : int ):
"""simple docstring"""
lowerCAmelCase__ = kwargs.get('''is_split_into_words''' , __lowerCamelCase )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"""
''' pretokenized inputs.''' )
return super()._encode_plus(*__lowerCamelCase , **__lowerCamelCase )
def A__ ( self : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ):
"""simple docstring"""
lowerCAmelCase__ = self._tokenizer.model.save(__lowerCamelCase , name=__lowerCamelCase )
return tuple(__lowerCamelCase )
def A__ ( self : List[str] , __lowerCamelCase : "Conversation" ):
"""simple docstring"""
lowerCAmelCase__ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) + [self.eos_token_id] )
if len(__lowerCamelCase ) > self.model_max_length:
lowerCAmelCase__ = input_ids[-self.model_max_length :]
return input_ids
| 615 |
import logging
from transformers.configuration_utils import PretrainedConfig
__magic_name__ : Tuple = logging.getLogger(__name__)
class SCREAMING_SNAKE_CASE__ (_a ):
lowercase_ : Tuple = "masked_bert"
def __init__( self : List[Any] , __lowerCamelCase : Optional[Any]=3_05_22 , __lowerCamelCase : Dict=7_68 , __lowerCamelCase : Tuple=12 , __lowerCamelCase : int=12 , __lowerCamelCase : Tuple=30_72 , __lowerCamelCase : List[Any]="gelu" , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : Dict=0.1 , __lowerCamelCase : Optional[int]=5_12 , __lowerCamelCase : List[Any]=2 , __lowerCamelCase : Union[str, Any]=0.02 , __lowerCamelCase : Optional[int]=1e-12 , __lowerCamelCase : Optional[int]=0 , __lowerCamelCase : List[str]="topK" , __lowerCamelCase : Union[str, Any]="constant" , __lowerCamelCase : Union[str, Any]=0.0 , **__lowerCamelCase : Tuple , ):
"""simple docstring"""
super().__init__(pad_token_id=__lowerCamelCase , **__lowerCamelCase )
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = max_position_embeddings
lowerCAmelCase__ = type_vocab_size
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = layer_norm_eps
lowerCAmelCase__ = pruning_method
lowerCAmelCase__ = mask_init
lowerCAmelCase__ = mask_scale
| 615 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
UpperCAmelCase__ : int ={"tokenization_herbert": ["HerbertTokenizer"]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Tuple =["HerbertTokenizerFast"]
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
UpperCAmelCase__ : Tuple =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 700 |
from __future__ import annotations
UpperCAmelCase__ : str =tuple[int, int, int]
UpperCAmelCase__ : str =tuple[str, str, str]
# used alphabet --------------------------
# from string.ascii_uppercase
UpperCAmelCase__ : int ='''ABCDEFGHIJKLMNOPQRSTUVWXYZ'''
# -------------------------- default selection --------------------------
# rotors --------------------------
UpperCAmelCase__ : Union[str, Any] ='''EGZWVONAHDCLFQMSIPJBYUKXTR'''
UpperCAmelCase__ : Union[str, Any] ='''FOBHMDKEXQNRAULPGSJVTYICZW'''
UpperCAmelCase__ : Union[str, Any] ='''ZJXESIUQLHAVRMDOYGTNFWPBKC'''
# reflector --------------------------
UpperCAmelCase__ : Optional[Any] ={
'''A''': '''N''',
'''N''': '''A''',
'''B''': '''O''',
'''O''': '''B''',
'''C''': '''P''',
'''P''': '''C''',
'''D''': '''Q''',
'''Q''': '''D''',
'''E''': '''R''',
'''R''': '''E''',
'''F''': '''S''',
'''S''': '''F''',
'''G''': '''T''',
'''T''': '''G''',
'''H''': '''U''',
'''U''': '''H''',
'''I''': '''V''',
'''V''': '''I''',
'''J''': '''W''',
'''W''': '''J''',
'''K''': '''X''',
'''X''': '''K''',
'''L''': '''Y''',
'''Y''': '''L''',
'''M''': '''Z''',
'''Z''': '''M''',
}
# -------------------------- extra rotors --------------------------
UpperCAmelCase__ : Dict ='''RMDJXFUWGISLHVTCQNKYPBEZOA'''
UpperCAmelCase__ : Any ='''SGLCPQWZHKXAREONTFBVIYJUDM'''
UpperCAmelCase__ : List[Any] ='''HVSICLTYKQUBXDWAJZOMFGPREN'''
UpperCAmelCase__ : Tuple ='''RZWQHFMVDBKICJLNTUXAGYPSOE'''
UpperCAmelCase__ : Optional[Any] ='''LFKIJODBEGAMQPXVUHYSTCZRWN'''
UpperCAmelCase__ : Dict ='''KOAEGVDHXPQZMLFTYWJNBRCIUS'''
def _lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> tuple[RotorPositionT, RotorSelectionT, dict[str, str]]:
# Checks if there are 3 unique rotors
if (unique_rotsel := len(set(_UpperCAmelCase ) )) < 3:
lowerCamelCase =F"""Please use 3 unique rotors (not {unique_rotsel})"""
raise Exception(_UpperCAmelCase )
# Checks if rotor positions are valid
lowerCamelCase , lowerCamelCase , lowerCamelCase =rotpos
if not 0 < rotorposa <= len(_UpperCAmelCase ):
lowerCamelCase =F"""First rotor position is not within range of 1..26 ({rotorposa}"""
raise ValueError(_UpperCAmelCase )
if not 0 < rotorposa <= len(_UpperCAmelCase ):
lowerCamelCase =F"""Second rotor position is not within range of 1..26 ({rotorposa})"""
raise ValueError(_UpperCAmelCase )
if not 0 < rotorposa <= len(_UpperCAmelCase ):
lowerCamelCase =F"""Third rotor position is not within range of 1..26 ({rotorposa})"""
raise ValueError(_UpperCAmelCase )
# Validates string and returns dict
lowerCamelCase =_plugboard(_UpperCAmelCase )
return rotpos, rotsel, pbdict
def _lowercase ( _UpperCAmelCase ) -> dict[str, str]:
# tests the input string if it
# a) is type string
# b) has even length (so pairs can be made)
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
lowerCamelCase =F"""Plugboard setting isn't type string ({type(_UpperCAmelCase )})"""
raise TypeError(_UpperCAmelCase )
elif len(_UpperCAmelCase ) % 2 != 0:
lowerCamelCase =F"""Odd number of symbols ({len(_UpperCAmelCase )})"""
raise Exception(_UpperCAmelCase )
elif pbstring == "":
return {}
pbstring.replace(""" """ , """""" )
# Checks if all characters are unique
lowerCamelCase =set()
for i in pbstring:
if i not in abc:
lowerCamelCase =F"""'{i}' not in list of symbols"""
raise Exception(_UpperCAmelCase )
elif i in tmppbl:
lowerCamelCase =F"""Duplicate symbol ({i})"""
raise Exception(_UpperCAmelCase )
else:
tmppbl.add(_UpperCAmelCase )
del tmppbl
# Created the dictionary
lowerCamelCase ={}
for j in range(0 , len(_UpperCAmelCase ) - 1 , 2 ):
lowerCamelCase =pbstring[j + 1]
lowerCamelCase =pbstring[j]
return pb
def _lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = (rotora, rotora, rotora) , _UpperCAmelCase = "" , ) -> str:
lowerCamelCase =text.upper()
lowerCamelCase , lowerCamelCase , lowerCamelCase =_validator(
_UpperCAmelCase , _UpperCAmelCase , plugb.upper() )
lowerCamelCase , lowerCamelCase , lowerCamelCase =rotor_position
lowerCamelCase , lowerCamelCase , lowerCamelCase =rotor_selection
rotorposa -= 1
rotorposa -= 1
rotorposa -= 1
lowerCamelCase =[]
# encryption/decryption process --------------------------
for symbol in text:
if symbol in abc:
# 1st plugboard --------------------------
if symbol in plugboard:
lowerCamelCase =plugboard[symbol]
# rotor ra --------------------------
lowerCamelCase =abc.index(_UpperCAmelCase ) + rotorposa
lowerCamelCase =rotora[index % len(_UpperCAmelCase )]
# rotor rb --------------------------
lowerCamelCase =abc.index(_UpperCAmelCase ) + rotorposa
lowerCamelCase =rotora[index % len(_UpperCAmelCase )]
# rotor rc --------------------------
lowerCamelCase =abc.index(_UpperCAmelCase ) + rotorposa
lowerCamelCase =rotora[index % len(_UpperCAmelCase )]
# reflector --------------------------
# this is the reason you don't need another machine to decipher
lowerCamelCase =reflector[symbol]
# 2nd rotors
lowerCamelCase =abc[rotora.index(_UpperCAmelCase ) - rotorposa]
lowerCamelCase =abc[rotora.index(_UpperCAmelCase ) - rotorposa]
lowerCamelCase =abc[rotora.index(_UpperCAmelCase ) - rotorposa]
# 2nd plugboard
if symbol in plugboard:
lowerCamelCase =plugboard[symbol]
# moves/resets rotor positions
rotorposa += 1
if rotorposa >= len(_UpperCAmelCase ):
lowerCamelCase =0
rotorposa += 1
if rotorposa >= len(_UpperCAmelCase ):
lowerCamelCase =0
rotorposa += 1
if rotorposa >= len(_UpperCAmelCase ):
lowerCamelCase =0
# else:
# pass
# Error could be also raised
# raise ValueError(
# 'Invalid symbol('+repr(symbol)+')')
result.append(_UpperCAmelCase )
return "".join(_UpperCAmelCase )
if __name__ == "__main__":
UpperCAmelCase__ : Union[str, Any] ='''This is my Python script that emulates the Enigma machine from WWII.'''
UpperCAmelCase__ : List[Any] =(1, 1, 1)
UpperCAmelCase__ : Optional[Any] ='''pictures'''
UpperCAmelCase__ : Any =(rotora, rotora, rotora)
UpperCAmelCase__ : str =enigma(message, rotor_pos, rotor_sel, pb)
print('''Encrypted message:''', en)
print('''Decrypted message:''', enigma(en, rotor_pos, rotor_sel, pb))
| 269 | 0 |
"""simple docstring"""
import collections
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
snake_case = logging.get_logger(__name__)
snake_case = '''▁'''
snake_case = {'''vocab_file''': '''prophetnet.tokenizer'''}
snake_case = {
'''vocab_file''': {
'''microsoft/xprophetnet-large-wiki100-cased''': (
'''https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer'''
),
}
}
snake_case = {
'''microsoft/xprophetnet-large-wiki100-cased''': {'''do_lower_case''': False},
}
snake_case = {
'''microsoft/xprophetnet-large-wiki100-cased''': 5_1_2,
}
def snake_case ( lowerCAmelCase_ ) -> List[str]:
_snake_case = collections.OrderedDict()
with open(lowerCAmelCase_ , '''r''' , encoding='''utf-8''' ) as reader:
_snake_case = reader.readlines()
for index, token in enumerate(lowerCAmelCase_ ):
_snake_case = token.rstrip('''\n''' )
_snake_case = index
return vocab
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
A__ : Optional[int] = VOCAB_FILES_NAMES
A__ : Tuple = PRETRAINED_VOCAB_FILES_MAP
A__ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : Optional[int] = ['''input_ids''', '''attention_mask''']
def __init__( self : Optional[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any="[SEP]" , __lowerCamelCase : List[Any]="[SEP]" , __lowerCamelCase : Optional[Any]="[SEP]" , __lowerCamelCase : Any="[UNK]" , __lowerCamelCase : str="[PAD]" , __lowerCamelCase : Union[str, Any]="[CLS]" , __lowerCamelCase : Tuple="[MASK]" , __lowerCamelCase : Optional[Dict[str, Any]] = None , **__lowerCamelCase : List[Any] , ):
"""simple docstring"""
_snake_case = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , sep_token=__lowerCamelCase , unk_token=__lowerCamelCase , pad_token=__lowerCamelCase , cls_token=__lowerCamelCase , mask_token=__lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCamelCase , )
try:
import sentencepiece as spm
except ImportError:
logger.warning(
'''You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece'''
''' pip install sentencepiece''' )
raise
_snake_case = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__lowerCamelCase ) )
_snake_case = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# put special tokens and [unused] tokens into the vocab
_snake_case = {'''[PAD]''': 0, '''[CLS]''': 1, '''[SEP]''': 2, '''[UNK]''': 3, '''[MASK]''': 4}
for i in range(1_0 ):
_snake_case = f"""[unused{i}]"""
_snake_case = 5 + i
# The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab
_snake_case = 1_2
_snake_case = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
for k in self.fairseq_tokens_to_ids.keys():
self.unique_no_split_tokens.append(__lowerCamelCase )
def __getstate__( self : List[str] ):
"""simple docstring"""
_snake_case = self.__dict__.copy()
_snake_case = None
return state
def __setstate__( self : Optional[int] , __lowerCamelCase : Any ):
"""simple docstring"""
_snake_case = d
try:
import sentencepiece as spm
except ImportError:
logger.warning(
'''You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece'''
''' pip install sentencepiece''' )
raise
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_snake_case = {}
_snake_case = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __UpperCAmelCase ( self : int , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase )
if token_ids_a is None:
return ([0] * len(__lowerCamelCase )) + [1]
return ([0] * len(__lowerCamelCase )) + [1] + ([0] * len(__lowerCamelCase )) + [1]
def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ):
"""simple docstring"""
_snake_case = [self.sep_token_id]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0]
return len(token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
return len(self.sp_model ) + self.fairseq_offset
def __UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
_snake_case = {self.convert_ids_to_tokens(__lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : str ):
"""simple docstring"""
return self.sp_model.encode(__lowerCamelCase , out_type=__lowerCamelCase )
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : Optional[Any] ):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_snake_case = self.sp_model.PieceToId(__lowerCamelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __UpperCAmelCase ( self : str , __lowerCamelCase : Tuple ):
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __UpperCAmelCase ( self : Optional[Any] , __lowerCamelCase : Optional[Any] ):
"""simple docstring"""
_snake_case = ''''''.join(__lowerCamelCase ).replace(__lowerCamelCase , ''' ''' ).strip()
return out_string
def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(__lowerCamelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
_snake_case = os.path.join(
__lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCamelCase , '''wb''' ) as fi:
_snake_case = self.sp_model.serialized_model_proto()
fi.write(__lowerCamelCase )
return (out_vocab_file,)
def __UpperCAmelCase ( self : Union[str, Any] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ):
"""simple docstring"""
if token_ids_a is None:
return token_ids_a + [self.sep_token_id]
_snake_case = [self.sep_token_id]
return token_ids_a + sep + token_ids_a + sep
| 103 |
class UpperCamelCase_ :
'''simple docstring'''
def __init__( self , a ) -> Tuple:
snake_case_ = n
snake_case_ = [None] * self.n
snake_case_ = 0 # index of the first element
snake_case_ = 0
snake_case_ = 0
def __len__( self ) -> int:
return self.size
def _UpperCamelCase ( self ) -> bool:
return self.size == 0
def _UpperCamelCase ( self ) -> List[Any]:
return False if self.is_empty() else self.array[self.front]
def _UpperCamelCase ( self , a ) -> Dict:
if self.size >= self.n:
raise Exception('QUEUE IS FULL' )
snake_case_ = data
snake_case_ = (self.rear + 1) % self.n
self.size += 1
return self
def _UpperCamelCase ( self ) -> List[Any]:
if self.size == 0:
raise Exception('UNDERFLOW' )
snake_case_ = self.array[self.front]
snake_case_ = None
snake_case_ = (self.front + 1) % self.n
self.size -= 1
return temp
| 198 | 0 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class _lowercase ( __UpperCAmelCase , unittest.TestCase ):
lowercase_ = ShapEImgaImgPipeline
lowercase_ = ['image']
lowercase_ = ['image']
lowercase_ = [
'num_images_per_prompt',
'num_inference_steps',
'generator',
'latents',
'guidance_scale',
'frame_size',
'output_type',
'return_dict',
]
lowercase_ = False
@property
def _UpperCamelCase ( self ) -> str:
return 32
@property
def _UpperCamelCase ( self ) -> List[str]:
return 32
@property
def _UpperCamelCase ( self ) -> List[Any]:
return self.time_input_dim * 4
@property
def _UpperCamelCase ( self ) -> Any:
return 8
@property
def _UpperCamelCase ( self ) -> Dict:
torch.manual_seed(0 )
lowerCamelCase : int = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
lowerCamelCase : Union[str, Any] = CLIPVisionModel(UpperCAmelCase_ )
return model
@property
def _UpperCamelCase ( self ) -> List[Any]:
lowerCamelCase : int = CLIPImageProcessor(
crop_size=224 , do_center_crop=UpperCAmelCase_ , do_normalize=UpperCAmelCase_ , do_resize=UpperCAmelCase_ , image_mean=[0.48145466, 0.4578275, 0.40821073] , image_std=[0.26862954, 0.26130258, 0.27577711] , resample=3 , size=224 , )
return image_processor
@property
def _UpperCamelCase ( self ) -> Dict:
torch.manual_seed(0 )
lowerCamelCase : Dict = {
'num_attention_heads': 2,
'attention_head_dim': 16,
'embedding_dim': self.time_input_dim,
'num_embeddings': 32,
'embedding_proj_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'num_layers': 1,
'clip_embed_dim': self.time_input_dim * 2,
'additional_embeddings': 0,
'time_embed_act_fn': 'gelu',
'norm_in_type': 'layer',
'embedding_proj_norm_type': 'layer',
'encoder_hid_proj_type': None,
'added_emb_type': None,
}
lowerCamelCase : Any = PriorTransformer(**UpperCAmelCase_ )
return model
@property
def _UpperCamelCase ( self ) -> Union[str, Any]:
torch.manual_seed(0 )
lowerCamelCase : Dict = {
'param_shapes': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'd_latent': self.time_input_dim,
'd_hidden': self.renderer_dim,
'n_output': 12,
'background': (
0.1,
0.1,
0.1,
),
}
lowerCamelCase : Any = ShapERenderer(**UpperCAmelCase_ )
return model
def _UpperCamelCase ( self ) -> Optional[int]:
lowerCamelCase : Optional[int] = self.dummy_prior
lowerCamelCase : List[Any] = self.dummy_image_encoder
lowerCamelCase : int = self.dummy_image_processor
lowerCamelCase : List[Any] = self.dummy_renderer
lowerCamelCase : Dict = HeunDiscreteScheduler(
beta_schedule='exp' , num_train_timesteps=1024 , prediction_type='sample' , use_karras_sigmas=UpperCAmelCase_ , clip_sample=UpperCAmelCase_ , clip_sample_range=1.0 , )
lowerCamelCase : Union[str, Any] = {
'prior': prior,
'image_encoder': image_encoder,
'image_processor': image_processor,
'renderer': renderer,
'scheduler': scheduler,
}
return components
def _UpperCamelCase ( self , UpperCAmelCase_ , UpperCAmelCase_=0 ) -> Optional[Any]:
lowerCamelCase : Optional[int] = floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ )
if str(UpperCAmelCase_ ).startswith('mps' ):
lowerCamelCase : Dict = torch.manual_seed(UpperCAmelCase_ )
else:
lowerCamelCase : Optional[int] = torch.Generator(device=UpperCAmelCase_ ).manual_seed(UpperCAmelCase_ )
lowerCamelCase : Tuple = {
'image': input_image,
'generator': generator,
'num_inference_steps': 1,
'frame_size': 32,
'output_type': 'np',
}
return inputs
def _UpperCamelCase ( self ) -> Any:
lowerCamelCase : Optional[Any] = 'cpu'
lowerCamelCase : List[Any] = self.get_dummy_components()
lowerCamelCase : Optional[Any] = self.pipeline_class(**UpperCAmelCase_ )
lowerCamelCase : List[Any] = pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
lowerCamelCase : str = pipe(**self.get_dummy_inputs(UpperCAmelCase_ ) )
lowerCamelCase : Optional[Any] = output.images[0]
lowerCamelCase : int = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
lowerCamelCase : List[Any] = np.array(
[
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _UpperCamelCase ( self ) -> List[str]:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def _UpperCamelCase ( self ) -> int:
lowerCamelCase : int = torch_device == 'cpu'
lowerCamelCase : Any = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=UpperCAmelCase_ , relax_max_difference=UpperCAmelCase_ , )
def _UpperCamelCase ( self ) -> Union[str, Any]:
lowerCamelCase : Union[str, Any] = self.get_dummy_components()
lowerCamelCase : Tuple = self.pipeline_class(**UpperCAmelCase_ )
lowerCamelCase : Union[str, Any] = pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
lowerCamelCase : Dict = 1
lowerCamelCase : Optional[Any] = 2
lowerCamelCase : int = self.get_dummy_inputs(UpperCAmelCase_ )
for key in inputs.keys():
if key in self.batch_params:
lowerCamelCase : Optional[int] = batch_size * [inputs[key]]
lowerCamelCase : Union[str, Any] = pipe(**UpperCAmelCase_ , num_images_per_prompt=UpperCAmelCase_ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class _lowercase ( unittest.TestCase ):
def _UpperCamelCase ( self ) -> List[str]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCamelCase ( self ) -> int:
lowerCamelCase : List[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/shap_e/corgi.png' )
lowerCamelCase : Tuple = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_img2img_out.npy' )
lowerCamelCase : List[Any] = ShapEImgaImgPipeline.from_pretrained('openai/shap-e-img2img' )
lowerCamelCase : Any = pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
lowerCamelCase : List[str] = torch.Generator(device=UpperCAmelCase_ ).manual_seed(0 )
lowerCamelCase : int = pipe(
UpperCAmelCase_ , generator=UpperCAmelCase_ , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type='np' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(UpperCAmelCase_ , UpperCAmelCase_ )
| 700 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
'caidas/swin2sr-classicalsr-x2-64': (
'https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json'
),
}
class _lowercase ( __UpperCAmelCase ):
lowercase_ = 'swin2sr'
lowercase_ = {
'hidden_size': 'embed_dim',
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self , UpperCAmelCase_=64 , UpperCAmelCase_=1 , UpperCAmelCase_=3 , UpperCAmelCase_=180 , UpperCAmelCase_=[6, 6, 6, 6, 6, 6] , UpperCAmelCase_=[6, 6, 6, 6, 6, 6] , UpperCAmelCase_=8 , UpperCAmelCase_=2.0 , UpperCAmelCase_=True , UpperCAmelCase_=0.0 , UpperCAmelCase_=0.0 , UpperCAmelCase_=0.1 , UpperCAmelCase_="gelu" , UpperCAmelCase_=False , UpperCAmelCase_=0.02 , UpperCAmelCase_=1E-5 , UpperCAmelCase_=2 , UpperCAmelCase_=1.0 , UpperCAmelCase_="1conv" , UpperCAmelCase_="pixelshuffle" , **UpperCAmelCase_ , ) -> Union[str, Any]:
super().__init__(**UpperCAmelCase_ )
lowerCamelCase : int = image_size
lowerCamelCase : Tuple = patch_size
lowerCamelCase : Union[str, Any] = num_channels
lowerCamelCase : List[Any] = embed_dim
lowerCamelCase : int = depths
lowerCamelCase : Any = len(UpperCAmelCase_ )
lowerCamelCase : Tuple = num_heads
lowerCamelCase : Optional[int] = window_size
lowerCamelCase : str = mlp_ratio
lowerCamelCase : Tuple = qkv_bias
lowerCamelCase : Optional[Any] = hidden_dropout_prob
lowerCamelCase : Dict = attention_probs_dropout_prob
lowerCamelCase : Optional[Any] = drop_path_rate
lowerCamelCase : Optional[int] = hidden_act
lowerCamelCase : str = use_absolute_embeddings
lowerCamelCase : Dict = layer_norm_eps
lowerCamelCase : Dict = initializer_range
lowerCamelCase : Optional[int] = upscale
lowerCamelCase : List[Any] = img_range
lowerCamelCase : Optional[Any] = resi_connection
lowerCamelCase : Union[str, Any] = upsampler
| 133 | 0 |
import math
def a_ ( __lowercase : int ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__lowercase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def a_ ( __lowercase : float = 0.1 ) -> int:
_snake_case = 3
_snake_case = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(__lowercase )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod() | 686 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : Dict = logging.get_logger(__name__)
_lowerCamelCase : Union[str, Any] = {
'''caidas/swin2sr-classicalsr-x2-64''': (
'''https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json'''
),
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Dict = "swin2sr"
_UpperCAmelCase : Optional[int] = {
"hidden_size": "embed_dim",
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : Optional[int] , lowercase : List[Any]=64 , lowercase : int=1 , lowercase : Union[str, Any]=3 , lowercase : Dict=180 , lowercase : List[Any]=[6, 6, 6, 6, 6, 6] , lowercase : Dict=[6, 6, 6, 6, 6, 6] , lowercase : List[Any]=8 , lowercase : List[str]=2.0 , lowercase : Tuple=True , lowercase : Union[str, Any]=0.0 , lowercase : Dict=0.0 , lowercase : Optional[int]=0.1 , lowercase : int="gelu" , lowercase : List[str]=False , lowercase : List[Any]=0.02 , lowercase : List[Any]=1E-5 , lowercase : Optional[int]=2 , lowercase : Tuple=1.0 , lowercase : List[Any]="1conv" , lowercase : List[Any]="pixelshuffle" , **lowercase : List[str] , ):
'''simple docstring'''
super().__init__(**lowercase )
_snake_case = image_size
_snake_case = patch_size
_snake_case = num_channels
_snake_case = embed_dim
_snake_case = depths
_snake_case = len(lowercase )
_snake_case = num_heads
_snake_case = window_size
_snake_case = mlp_ratio
_snake_case = qkv_bias
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = drop_path_rate
_snake_case = hidden_act
_snake_case = use_absolute_embeddings
_snake_case = layer_norm_eps
_snake_case = initializer_range
_snake_case = upscale
_snake_case = img_range
_snake_case = resi_connection
_snake_case = upsampler | 686 | 1 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
_lowercase : Any =logging.get_logger(__name__)
_lowercase : Union[str, Any] ={
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"adapter_layer": "encoder.layers.*.adapter_layer",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
"pooling_layer.linear": "projector",
"pooling_layer.projection": "classifier",
}
_lowercase : Union[str, Any] =[
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
"projector",
"classifier",
]
def __UpperCAmelCase ( UpperCamelCase__ :Dict ) -> Optional[int]:
snake_case__ : Any = {}
with open(UpperCamelCase__ , '''r''' ) as file:
for line_number, line in enumerate(UpperCamelCase__ ):
snake_case__ : Optional[int] = line.strip()
if line:
snake_case__ : Tuple = line.split()
snake_case__ : List[str] = line_number
snake_case__ : Any = words[0]
snake_case__ : Tuple = value
return result
def __UpperCAmelCase ( UpperCamelCase__ :Optional[Any] , UpperCamelCase__ :str , UpperCamelCase__ :Any , UpperCamelCase__ :Tuple , UpperCamelCase__ :Tuple ) -> Optional[Any]:
for attribute in key.split('''.''' ):
snake_case__ : Optional[int] = getattr(UpperCamelCase__ , UpperCamelCase__ )
snake_case__ : Optional[Any] = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(UpperCamelCase__ ):
snake_case__ : Optional[Any] = PARAM_MAPPING[full_name.split('''.''' )[-1]]
snake_case__ : Optional[int] = '''param'''
if weight_type is not None and weight_type != "param":
snake_case__ : Any = getattr(UpperCamelCase__ , UpperCamelCase__ ).shape
elif weight_type is not None and weight_type == "param":
snake_case__ : Union[str, Any] = hf_pointer
for attribute in hf_param_name.split('''.''' ):
snake_case__ : Dict = getattr(UpperCamelCase__ , UpperCamelCase__ )
snake_case__ : Any = shape_pointer.shape
# let's reduce dimension
snake_case__ : Optional[Any] = value[0]
else:
snake_case__ : int = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''' )
if weight_type == "weight":
snake_case__ : Dict = value
elif weight_type == "weight_g":
snake_case__ : Optional[Any] = value
elif weight_type == "weight_v":
snake_case__ : Any = value
elif weight_type == "bias":
snake_case__ : Union[str, Any] = value
elif weight_type == "param":
for attribute in hf_param_name.split('''.''' ):
snake_case__ : List[str] = getattr(UpperCamelCase__ , UpperCamelCase__ )
snake_case__ : int = value
else:
snake_case__ : Any = value
logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def __UpperCAmelCase ( UpperCamelCase__ :str , UpperCamelCase__ :List[str] , UpperCamelCase__ :Optional[int] , UpperCamelCase__ :Union[str, Any] , UpperCamelCase__ :Tuple ) -> int:
snake_case__ : Union[str, Any] = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(UpperCamelCase__ ):
snake_case__ : List[str] = PARAM_MAPPING[full_name.split('''.''' )[-1]]
snake_case__ : str = '''param'''
if weight_type is not None and weight_type != "param":
snake_case__ : Dict = '''.'''.join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
snake_case__ : Tuple = '''.'''.join([key, hf_param_name] )
else:
snake_case__ : List[str] = key
snake_case__ : str = value if '''lm_head''' in full_key else value[0]
_lowercase : Tuple ={
"W_a": "linear_1.weight",
"W_b": "linear_2.weight",
"b_a": "linear_1.bias",
"b_b": "linear_2.bias",
"ln_W": "norm.weight",
"ln_b": "norm.bias",
}
def __UpperCAmelCase ( UpperCamelCase__ :List[Any] , UpperCamelCase__ :str , UpperCamelCase__ :List[str]=None , UpperCamelCase__ :Tuple=None ) -> List[Any]:
snake_case__ : str = False
for key, mapped_key in MAPPING.items():
snake_case__ : Optional[int] = '''wav2vec2.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
snake_case__ : List[Any] = True
if "*" in mapped_key:
snake_case__ : List[Any] = name.split(UpperCamelCase__ )[0].split('''.''' )[-2]
snake_case__ : Optional[int] = mapped_key.replace('''*''' , UpperCamelCase__ )
if "weight_g" in name:
snake_case__ : Dict = '''weight_g'''
elif "weight_v" in name:
snake_case__ : Dict = '''weight_v'''
elif "bias" in name:
snake_case__ : Dict = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
snake_case__ : Tuple = '''weight'''
else:
snake_case__ : Optional[int] = None
if hf_dict is not None:
rename_dict(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
set_recursively(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return is_used
return is_used
def __UpperCAmelCase ( UpperCamelCase__ :Any , UpperCamelCase__ :Union[str, Any] , UpperCamelCase__ :Tuple ) -> Optional[int]:
snake_case__ : List[str] = []
snake_case__ : int = fairseq_model.state_dict()
snake_case__ : Dict = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
snake_case__ : Any = False
if "conv_layers" in name:
load_conv_layer(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , hf_model.config.feat_extract_norm == '''group''' , )
snake_case__ : Tuple = True
else:
snake_case__ : List[str] = load_wavaveca_layer(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if not is_used:
unused_weights.append(UpperCamelCase__ )
logger.warning(F'''Unused weights: {unused_weights}''' )
def __UpperCAmelCase ( UpperCamelCase__ :List[str] , UpperCamelCase__ :int , UpperCamelCase__ :Tuple , UpperCamelCase__ :Dict , UpperCamelCase__ :str ) -> str:
snake_case__ : List[Any] = full_name.split('''conv_layers.''' )[-1]
snake_case__ : Dict = name.split('''.''' )
snake_case__ : Dict = int(items[0] )
snake_case__ : Any = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
snake_case__ : int = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
snake_case__ : Tuple = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
snake_case__ : Any = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
snake_case__ : List[str] = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(UpperCamelCase__ )
@torch.no_grad()
def __UpperCAmelCase ( UpperCamelCase__ :List[Any] , UpperCamelCase__ :int , UpperCamelCase__ :int=None , UpperCamelCase__ :List[Any]=None , UpperCamelCase__ :List[Any]=True , UpperCamelCase__ :Optional[int]=False ) -> Dict:
if config_path is not None:
snake_case__ : Tuple = WavaVecaConfig.from_pretrained(UpperCamelCase__ )
else:
snake_case__ : Optional[int] = WavaVecaConfig()
if is_seq_class:
snake_case__ : Optional[int] = read_txt_into_dict(UpperCamelCase__ )
snake_case__ : int = idalabel
snake_case__ : Dict = WavaVecaForSequenceClassification(UpperCamelCase__ )
snake_case__ : str = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , )
feature_extractor.save_pretrained(UpperCamelCase__ )
elif is_finetuned:
if dict_path:
snake_case__ : Optional[int] = Dictionary.load(UpperCamelCase__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
snake_case__ : List[str] = target_dict.pad_index
snake_case__ : List[str] = target_dict.bos_index
snake_case__ : Tuple = target_dict.eos_index
snake_case__ : str = len(target_dict.symbols )
snake_case__ : int = os.path.join(UpperCamelCase__ , '''vocab.json''' )
if not os.path.isdir(UpperCamelCase__ ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(UpperCamelCase__ ) )
return
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
snake_case__ : Optional[int] = target_dict.indices
# fairseq has the <pad> and <s> switched
snake_case__ : List[str] = 0
snake_case__ : Optional[Any] = 1
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
snake_case__ : Optional[int] = WavaVecaCTCTokenizer(
UpperCamelCase__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=UpperCamelCase__ , )
snake_case__ : Any = True if config.feat_extract_norm == '''layer''' else False
snake_case__ : Optional[int] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , )
snake_case__ : List[str] = WavaVecaProcessor(feature_extractor=UpperCamelCase__ , tokenizer=UpperCamelCase__ )
processor.save_pretrained(UpperCamelCase__ )
snake_case__ : str = WavaVecaForCTC(UpperCamelCase__ )
else:
snake_case__ : Union[str, Any] = WavaVecaForPreTraining(UpperCamelCase__ )
if is_finetuned or is_seq_class:
snake_case__ , snake_case__ , snake_case__ : str = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
snake_case__ : Optional[int] = argparse.Namespace(task='''audio_pretraining''' )
snake_case__ : int = fairseq.tasks.setup_task(UpperCamelCase__ )
snake_case__ , snake_case__ , snake_case__ : Tuple = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=UpperCamelCase__ )
snake_case__ : str = model[0].eval()
recursively_load_weights(UpperCamelCase__ , UpperCamelCase__ , not is_finetuned )
hf_wavavec.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
_lowercase : Union[str, Any] =argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
parser.add_argument(
"--is_seq_class",
action="store_true",
help="Whether the model to convert is a fine-tuned sequence classification model or not",
)
_lowercase : Dict =parser.parse_args()
_lowercase : int =not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 574 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import (
DiffusionPipeline,
UnCLIPImageVariationPipeline,
UnCLIPScheduler,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, load_image, require_torch_gpu, skip_mps
from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _SCREAMING_SNAKE_CASE (lowercase__, unittest.TestCase ):
A__ = UnCLIPImageVariationPipeline
A__ = IMAGE_VARIATION_PARAMS - {'height', 'width', 'guidance_scale'}
A__ = IMAGE_VARIATION_BATCH_PARAMS
A__ = [
'generator',
'return_dict',
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
A__ = False
@property
def lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
return 32
@property
def lowerCAmelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
return 32
@property
def lowerCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
return self.time_input_dim
@property
def lowerCAmelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
return self.time_input_dim * 4
@property
def lowerCAmelCase ( self : Dict ) -> str:
"""simple docstring"""
return 100
@property
def lowerCAmelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
snake_case__ : Union[str, Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def lowerCAmelCase ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
torch.manual_seed(0 )
snake_case__ : Any = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(__UpperCamelCase )
@property
def lowerCAmelCase ( self : str ) -> str:
"""simple docstring"""
torch.manual_seed(0 )
snake_case__ : Any = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , )
return CLIPVisionModelWithProjection(__UpperCamelCase )
@property
def lowerCAmelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
torch.manual_seed(0 )
snake_case__ : Optional[Any] = {
'''clip_embeddings_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''cross_attention_dim''': self.cross_attention_dim,
}
snake_case__ : Tuple = UnCLIPTextProjModel(**__UpperCamelCase )
return model
@property
def lowerCAmelCase ( self : int ) -> List[str]:
"""simple docstring"""
torch.manual_seed(0 )
snake_case__ : Optional[Any] = {
'''sample_size''': 32,
# RGB in channels
'''in_channels''': 3,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 6,
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': '''identity''',
}
snake_case__ : Tuple = UNetaDConditionModel(**__UpperCamelCase )
return model
@property
def lowerCAmelCase ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
return {
"sample_size": 64,
"layers_per_block": 1,
"down_block_types": ("ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D"),
"up_block_types": ("ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D"),
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"in_channels": 6,
"out_channels": 3,
}
@property
def lowerCAmelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
torch.manual_seed(0 )
snake_case__ : Union[str, Any] = UNetaDModel(**self.dummy_super_res_kwargs )
return model
@property
def lowerCAmelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
torch.manual_seed(1 )
snake_case__ : int = UNetaDModel(**self.dummy_super_res_kwargs )
return model
def lowerCAmelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
snake_case__ : List[Any] = self.dummy_decoder
snake_case__ : int = self.dummy_text_proj
snake_case__ : Union[str, Any] = self.dummy_text_encoder
snake_case__ : str = self.dummy_tokenizer
snake_case__ : List[str] = self.dummy_super_res_first
snake_case__ : str = self.dummy_super_res_last
snake_case__ : Optional[int] = UnCLIPScheduler(
variance_type='''learned_range''' , prediction_type='''epsilon''' , num_train_timesteps=1000 , )
snake_case__ : int = UnCLIPScheduler(
variance_type='''fixed_small_log''' , prediction_type='''epsilon''' , num_train_timesteps=1000 , )
snake_case__ : int = CLIPImageProcessor(crop_size=32 , size=32 )
snake_case__ : str = self.dummy_image_encoder
return {
"decoder": decoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_proj": text_proj,
"feature_extractor": feature_extractor,
"image_encoder": image_encoder,
"super_res_first": super_res_first,
"super_res_last": super_res_last,
"decoder_scheduler": decoder_scheduler,
"super_res_scheduler": super_res_scheduler,
}
def lowerCAmelCase ( self : Optional[int] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Tuple=0 , __UpperCamelCase : Dict=True ) -> str:
"""simple docstring"""
snake_case__ : List[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCamelCase ) ).to(__UpperCamelCase )
if str(__UpperCamelCase ).startswith('''mps''' ):
snake_case__ : List[str] = torch.manual_seed(__UpperCamelCase )
else:
snake_case__ : Optional[int] = torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase )
if pil_image:
snake_case__ : Any = input_image * 0.5 + 0.5
snake_case__ : Optional[Any] = input_image.clamp(0 , 1 )
snake_case__ : Tuple = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
snake_case__ : List[str] = DiffusionPipeline.numpy_to_pil(__UpperCamelCase )[0]
return {
"image": input_image,
"generator": generator,
"decoder_num_inference_steps": 2,
"super_res_num_inference_steps": 2,
"output_type": "np",
}
def lowerCAmelCase ( self : str ) -> List[Any]:
"""simple docstring"""
snake_case__ : Tuple = '''cpu'''
snake_case__ : Dict = self.get_dummy_components()
snake_case__ : List[str] = self.pipeline_class(**__UpperCamelCase )
snake_case__ : str = pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
snake_case__ : Dict = self.get_dummy_inputs(__UpperCamelCase , pil_image=__UpperCamelCase )
snake_case__ : List[str] = pipe(**__UpperCamelCase )
snake_case__ : str = output.images
snake_case__ : Dict = self.get_dummy_inputs(__UpperCamelCase , pil_image=__UpperCamelCase )
snake_case__ : Any = pipe(
**__UpperCamelCase , return_dict=__UpperCamelCase , )[0]
snake_case__ : Dict = image[0, -3:, -3:, -1]
snake_case__ : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case__ : str = np.array(
[
0.9997,
0.0002,
0.9997,
0.9997,
0.9969,
0.0023,
0.9997,
0.9969,
0.9970,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCAmelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
snake_case__ : int = '''cpu'''
snake_case__ : List[str] = self.get_dummy_components()
snake_case__ : Dict = self.pipeline_class(**__UpperCamelCase )
snake_case__ : Any = pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
snake_case__ : Optional[Any] = self.get_dummy_inputs(__UpperCamelCase , pil_image=__UpperCamelCase )
snake_case__ : Optional[int] = pipe(**__UpperCamelCase )
snake_case__ : str = output.images
snake_case__ : str = self.get_dummy_inputs(__UpperCamelCase , pil_image=__UpperCamelCase )
snake_case__ : List[str] = pipe(
**__UpperCamelCase , return_dict=__UpperCamelCase , )[0]
snake_case__ : Tuple = image[0, -3:, -3:, -1]
snake_case__ : Optional[int] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case__ : Optional[Any] = np.array([0.9997, 0.0003, 0.9997, 0.9997, 0.9970, 0.0024, 0.9997, 0.9971, 0.9971] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCAmelCase ( self : str ) -> Tuple:
"""simple docstring"""
snake_case__ : Union[str, Any] = '''cpu'''
snake_case__ : Union[str, Any] = self.get_dummy_components()
snake_case__ : Dict = self.pipeline_class(**__UpperCamelCase )
snake_case__ : List[str] = pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
snake_case__ : List[Any] = self.get_dummy_inputs(__UpperCamelCase , pil_image=__UpperCamelCase )
snake_case__ : int = [
pipeline_inputs['''image'''],
pipeline_inputs['''image'''],
]
snake_case__ : List[Any] = pipe(**__UpperCamelCase )
snake_case__ : Tuple = output.images
snake_case__ : Union[str, Any] = self.get_dummy_inputs(__UpperCamelCase , pil_image=__UpperCamelCase )
snake_case__ : List[Any] = [
tuple_pipeline_inputs['''image'''],
tuple_pipeline_inputs['''image'''],
]
snake_case__ : int = pipe(
**__UpperCamelCase , return_dict=__UpperCamelCase , )[0]
snake_case__ : str = image[0, -3:, -3:, -1]
snake_case__ : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (2, 64, 64, 3)
snake_case__ : int = np.array(
[
0.9997,
0.9989,
0.0008,
0.0021,
0.9960,
0.0018,
0.0014,
0.0002,
0.9933,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCAmelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
snake_case__ : Any = torch.device('''cpu''' )
class _SCREAMING_SNAKE_CASE :
A__ = 1
snake_case__ : Optional[Any] = self.get_dummy_components()
snake_case__ : List[str] = self.pipeline_class(**__UpperCamelCase )
snake_case__ : List[str] = pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
snake_case__ : List[Any] = torch.Generator(device=__UpperCamelCase ).manual_seed(0 )
snake_case__ : str = pipe.decoder.dtype
snake_case__ : Tuple = 1
snake_case__ : List[Any] = (
batch_size,
pipe.decoder.config.in_channels,
pipe.decoder.config.sample_size,
pipe.decoder.config.sample_size,
)
snake_case__ : Union[str, Any] = pipe.prepare_latents(
__UpperCamelCase , dtype=__UpperCamelCase , device=__UpperCamelCase , generator=__UpperCamelCase , latents=__UpperCamelCase , scheduler=DummyScheduler() )
snake_case__ : Optional[Any] = (
batch_size,
pipe.super_res_first.config.in_channels // 2,
pipe.super_res_first.config.sample_size,
pipe.super_res_first.config.sample_size,
)
snake_case__ : Optional[int] = pipe.prepare_latents(
__UpperCamelCase , dtype=__UpperCamelCase , device=__UpperCamelCase , generator=__UpperCamelCase , latents=__UpperCamelCase , scheduler=DummyScheduler() )
snake_case__ : Optional[Any] = self.get_dummy_inputs(__UpperCamelCase , pil_image=__UpperCamelCase )
snake_case__ : Any = pipe(
**__UpperCamelCase , decoder_latents=__UpperCamelCase , super_res_latents=__UpperCamelCase ).images
snake_case__ : Optional[int] = self.get_dummy_inputs(__UpperCamelCase , pil_image=__UpperCamelCase )
# Don't pass image, instead pass embedding
snake_case__ : List[str] = pipeline_inputs.pop('''image''' )
snake_case__ : Dict = pipe.image_encoder(__UpperCamelCase ).image_embeds
snake_case__ : Optional[Any] = pipe(
**__UpperCamelCase , decoder_latents=__UpperCamelCase , super_res_latents=__UpperCamelCase , image_embeddings=__UpperCamelCase , ).images
# make sure passing text embeddings manually is identical
assert np.abs(img_out_a - img_out_a ).max() < 1e-4
@skip_mps
def lowerCAmelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
snake_case__ : Optional[Any] = torch_device == '''cpu'''
# Check is relaxed because there is not a torch 2.0 sliced attention added kv processor
snake_case__ : str = 1e-2
self._test_attention_slicing_forward_pass(
test_max_difference=__UpperCamelCase , expected_max_diff=__UpperCamelCase )
@skip_mps
def lowerCAmelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
snake_case__ : Optional[Any] = torch_device == '''cpu'''
snake_case__ : List[Any] = True
snake_case__ : int = [
'''decoder_num_inference_steps''',
'''super_res_num_inference_steps''',
]
self._test_inference_batch_single_identical(
test_max_difference=__UpperCamelCase , relax_max_difference=__UpperCamelCase , additional_params_copy_to_batched_inputs=__UpperCamelCase , )
def lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : Optional[int] = [
'''decoder_num_inference_steps''',
'''super_res_num_inference_steps''',
]
if torch_device == "mps":
# TODO: MPS errors with larger batch sizes
snake_case__ : Dict = [2, 3]
self._test_inference_batch_consistent(
batch_sizes=__UpperCamelCase , additional_params_copy_to_batched_inputs=__UpperCamelCase , )
else:
self._test_inference_batch_consistent(
additional_params_copy_to_batched_inputs=__UpperCamelCase )
@skip_mps
def lowerCAmelCase ( self : Any ) -> List[str]:
"""simple docstring"""
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def lowerCAmelCase ( self : str ) -> List[str]:
"""simple docstring"""
return super().test_save_load_local()
@skip_mps
def lowerCAmelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
return super().test_save_load_optional_components()
@slow
@require_torch_gpu
class _SCREAMING_SNAKE_CASE (unittest.TestCase ):
def lowerCAmelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : Tuple = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unclip/cat.png''' )
snake_case__ : Union[str, Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/unclip/karlo_v1_alpha_cat_variation_fp16.npy''' )
snake_case__ : Dict = UnCLIPImageVariationPipeline.from_pretrained(
'''kakaobrain/karlo-v1-alpha-image-variations''' , torch_dtype=torch.floataa )
snake_case__ : Dict = pipeline.to(__UpperCamelCase )
pipeline.set_progress_bar_config(disable=__UpperCamelCase )
snake_case__ : Dict = torch.Generator(device='''cpu''' ).manual_seed(0 )
snake_case__ : Optional[int] = pipeline(
__UpperCamelCase , generator=__UpperCamelCase , output_type='''np''' , )
snake_case__ : Optional[int] = output.images[0]
assert image.shape == (256, 256, 3)
assert_mean_pixel_difference(__UpperCamelCase , __UpperCamelCase , 15 )
| 574 | 1 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase_ ( self : List[Any] ):
SCREAMING_SNAKE_CASE_ = TFCamembertModel.from_pretrained('jplu/tf-camembert-base' )
SCREAMING_SNAKE_CASE_ = tf.convert_to_tensor(
[[5, 121, 11, 660, 16, 730, 25_543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase )['last_hidden_state']
SCREAMING_SNAKE_CASE_ = tf.TensorShape((1, 10, 768) )
self.assertEqual(output.shape , _lowerCAmelCase )
# compare the actual values for a slice.
SCREAMING_SNAKE_CASE_ = tf.convert_to_tensor(
[[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) ) | 31 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class a :
def __init__( self : Dict , lowercase_ : str , lowercase_ : Union[str, Any]=13 , lowercase_ : Union[str, Any]=7 , lowercase_ : Tuple=True , lowercase_ : int=True , lowercase_ : Dict=True , lowercase_ : Any=True , lowercase_ : Union[str, Any]=99 , lowercase_ : Tuple=[1, 1, 2] , lowercase_ : List[Any]=1 , lowercase_ : int=32 , lowercase_ : List[Any]=4 , lowercase_ : Tuple=8 , lowercase_ : Union[str, Any]=37 , lowercase_ : Union[str, Any]="gelu_new" , lowercase_ : str=0.1 , lowercase_ : Tuple=0.1 , lowercase_ : Optional[int]=0.0 , lowercase_ : Optional[int]=512 , lowercase_ : int=3 , lowercase_ : Dict=0.02 , lowercase_ : Union[str, Any]=3 , lowercase_ : Dict=4 , lowercase_ : List[str]=None , lowercase_ : Tuple=False , ):
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = seq_length
snake_case_ = is_training
snake_case_ = use_input_mask
snake_case_ = use_token_type_ids
snake_case_ = use_labels
snake_case_ = vocab_size
snake_case_ = block_sizes
snake_case_ = num_decoder_layers
snake_case_ = d_model
snake_case_ = n_head
snake_case_ = d_head
snake_case_ = d_inner
snake_case_ = hidden_act
snake_case_ = hidden_dropout
snake_case_ = attention_dropout
snake_case_ = activation_dropout
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = 2
snake_case_ = num_labels
snake_case_ = num_choices
snake_case_ = scope
snake_case_ = initializer_std
# Used in the tests to check the size of the first attention layer
snake_case_ = n_head
# Used in the tests to check the size of the first hidden state
snake_case_ = self.d_model
# Used in the tests to check the number of output hidden states/attentions
snake_case_ = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
snake_case_ = self.num_hidden_layers + 2
def A_ ( self : Union[str, Any] ):
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ = None
if self.use_input_mask:
snake_case_ = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ = None
if self.use_token_type_ids:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ = None
snake_case_ = None
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case_ = ids_tensor([self.batch_size] , self.num_choices )
snake_case_ = FunnelConfig(
vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def A_ ( self : Optional[Any] , lowercase_ : int , lowercase_ : Tuple , lowercase_ : List[str] , lowercase_ : Union[str, Any] , lowercase_ : Optional[int] , lowercase_ : List[Any] , lowercase_ : Optional[int] , ):
snake_case_ = TFFunnelModel(config=lowercase_ )
snake_case_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
snake_case_ = model(lowercase_ )
snake_case_ = [input_ids, input_mask]
snake_case_ = model(lowercase_ )
snake_case_ = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
snake_case_ = False
snake_case_ = TFFunnelModel(config=lowercase_ )
snake_case_ = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
snake_case_ = False
snake_case_ = TFFunnelModel(config=lowercase_ )
snake_case_ = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
def A_ ( self : List[Any] , lowercase_ : Tuple , lowercase_ : Dict , lowercase_ : List[Any] , lowercase_ : Dict , lowercase_ : int , lowercase_ : int , lowercase_ : List[Any] , ):
snake_case_ = TFFunnelBaseModel(config=lowercase_ )
snake_case_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
snake_case_ = model(lowercase_ )
snake_case_ = [input_ids, input_mask]
snake_case_ = model(lowercase_ )
snake_case_ = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
snake_case_ = False
snake_case_ = TFFunnelBaseModel(config=lowercase_ )
snake_case_ = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model) )
snake_case_ = False
snake_case_ = TFFunnelBaseModel(config=lowercase_ )
snake_case_ = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
def A_ ( self : Any , lowercase_ : Optional[int] , lowercase_ : str , lowercase_ : int , lowercase_ : Optional[int] , lowercase_ : Optional[int] , lowercase_ : Dict , lowercase_ : Union[str, Any] , ):
snake_case_ = TFFunnelForPreTraining(config=lowercase_ )
snake_case_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
snake_case_ = model(lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length) )
def A_ ( self : Any , lowercase_ : Union[str, Any] , lowercase_ : Tuple , lowercase_ : str , lowercase_ : Tuple , lowercase_ : List[str] , lowercase_ : Union[str, Any] , lowercase_ : Union[str, Any] , ):
snake_case_ = TFFunnelForMaskedLM(config=lowercase_ )
snake_case_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
snake_case_ = model(lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A_ ( self : Dict , lowercase_ : Dict , lowercase_ : str , lowercase_ : Optional[Any] , lowercase_ : Tuple , lowercase_ : Optional[Any] , lowercase_ : Tuple , lowercase_ : Tuple , ):
snake_case_ = self.num_labels
snake_case_ = TFFunnelForSequenceClassification(config=lowercase_ )
snake_case_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
snake_case_ = model(lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A_ ( self : int , lowercase_ : Optional[int] , lowercase_ : Any , lowercase_ : List[str] , lowercase_ : Optional[int] , lowercase_ : str , lowercase_ : str , lowercase_ : int , ):
snake_case_ = self.num_choices
snake_case_ = TFFunnelForMultipleChoice(config=lowercase_ )
snake_case_ = tf.tile(tf.expand_dims(lowercase_ , 1 ) , (1, self.num_choices, 1) )
snake_case_ = tf.tile(tf.expand_dims(lowercase_ , 1 ) , (1, self.num_choices, 1) )
snake_case_ = tf.tile(tf.expand_dims(lowercase_ , 1 ) , (1, self.num_choices, 1) )
snake_case_ = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
snake_case_ = model(lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A_ ( self : Any , lowercase_ : str , lowercase_ : Dict , lowercase_ : int , lowercase_ : Optional[int] , lowercase_ : List[str] , lowercase_ : Any , lowercase_ : Optional[Any] , ):
snake_case_ = self.num_labels
snake_case_ = TFFunnelForTokenClassification(config=lowercase_ )
snake_case_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
snake_case_ = model(lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A_ ( self : Optional[Any] , lowercase_ : Any , lowercase_ : Dict , lowercase_ : Any , lowercase_ : int , lowercase_ : Tuple , lowercase_ : List[str] , lowercase_ : Optional[Any] , ):
snake_case_ = TFFunnelForQuestionAnswering(config=lowercase_ )
snake_case_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
snake_case_ = model(lowercase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A_ ( self : Any ):
snake_case_ = self.prepare_config_and_inputs()
(
(
snake_case_
) ,(
snake_case_
) ,(
snake_case_
) ,(
snake_case_
) ,(
snake_case_
) ,(
snake_case_
) ,(
snake_case_
) ,
) = config_and_inputs
snake_case_ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class a ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
snake_case_ = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
snake_case_ = (
{
"feature-extraction": (TFFunnelBaseModel, TFFunnelModel),
"fill-mask": TFFunnelForMaskedLM,
"question-answering": TFFunnelForQuestionAnswering,
"text-classification": TFFunnelForSequenceClassification,
"token-classification": TFFunnelForTokenClassification,
"zero-shot": TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
snake_case_ = False
snake_case_ = False
def A_ ( self : int ):
snake_case_ = TFFunnelModelTester(self )
snake_case_ = ConfigTester(self , config_class=lowercase_ )
def A_ ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
def A_ ( self : List[Any] ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def A_ ( self : List[str] ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowercase_ )
def A_ ( self : str ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowercase_ )
def A_ ( self : List[str] ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase_ )
def A_ ( self : List[Any] ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowercase_ )
@require_tf
class a ( _lowerCamelCase , unittest.TestCase ):
snake_case_ = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
snake_case_ = False
snake_case_ = False
def A_ ( self : Union[str, Any] ):
snake_case_ = TFFunnelModelTester(self , base=lowercase_ )
snake_case_ = ConfigTester(self , config_class=lowercase_ )
def A_ ( self : Dict ):
self.config_tester.run_common_tests()
def A_ ( self : List[str] ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*lowercase_ )
def A_ ( self : Union[str, Any] ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowercase_ )
def A_ ( self : str ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowercase_ )
| 640 | 0 |
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def A ( lowercase , lowercase , lowercase , lowercase ) -> str:
'''simple docstring'''
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), f'''Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), f'''Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})'''
def A ( lowercase , lowercase , lowercase , lowercase , lowercase=True ) -> List[Any]:
'''simple docstring'''
model.train()
UpperCamelCase = model(lowercase )
UpperCamelCase = F.mse_loss(lowercase , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(lowercase )
def A ( lowercase , lowercase=False ) -> Union[str, Any]:
'''simple docstring'''
set_seed(42 )
UpperCamelCase = RegressionModel()
UpperCamelCase = deepcopy(lowercase )
UpperCamelCase = RegressionDataset(length=80 )
UpperCamelCase = DataLoader(lowercase , batch_size=16 )
model.to(accelerator.device )
if sched:
UpperCamelCase = AdamW(params=model.parameters() , lr=1e-3 )
UpperCamelCase = AdamW(params=ddp_model.parameters() , lr=1e-3 )
UpperCamelCase = LambdaLR(lowercase , lr_lambda=lambda lowercase : epoch**0.6_5 )
UpperCamelCase = LambdaLR(lowercase , lr_lambda=lambda lowercase : epoch**0.6_5 )
# Make a copy of `model`
if sched:
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = accelerator.prepare(lowercase , lowercase , lowercase , lowercase )
else:
UpperCamelCase , UpperCamelCase = accelerator.prepare(lowercase , lowercase )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def A ( lowercase ) -> str:
'''simple docstring'''
UpperCamelCase , UpperCamelCase , UpperCamelCase = get_training_setup(lowercase )
# Use a single batch
UpperCamelCase , UpperCamelCase = next(iter(lowercase ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
UpperCamelCase , UpperCamelCase = accelerator.gather((ddp_input, ddp_target) )
UpperCamelCase , UpperCamelCase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowercase , lowercase , lowercase , lowercase )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(lowercase ):
step_model(lowercase , lowercase , lowercase , lowercase )
else:
# Sync grads
step_model(lowercase , lowercase , lowercase , lowercase )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(lowercase , lowercase , lowercase , lowercase )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1_337 + iteration )
UpperCamelCase = ddp_input[torch.randperm(len(lowercase ) )]
def A ( lowercase ) -> List[Any]:
'''simple docstring'''
UpperCamelCase , UpperCamelCase , UpperCamelCase = get_training_setup(lowercase )
# Use a single batch
UpperCamelCase , UpperCamelCase = next(iter(lowercase ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
UpperCamelCase , UpperCamelCase = accelerator.gather((ddp_input, ddp_target) )
UpperCamelCase , UpperCamelCase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowercase , lowercase , lowercase , lowercase )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(lowercase ):
step_model(lowercase , lowercase , lowercase , lowercase )
else:
# Sync grads
step_model(lowercase , lowercase , lowercase , lowercase )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f'''Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1_337 + iteration )
UpperCamelCase = ddp_input[torch.randperm(len(lowercase ) )]
def A ( lowercase=False , lowercase=False ) -> str:
'''simple docstring'''
UpperCamelCase = Accelerator(
split_batches=lowercase , dispatch_batches=lowercase , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
UpperCamelCase , UpperCamelCase , UpperCamelCase = get_training_setup(lowercase )
for iteration, batch in enumerate(lowercase ):
UpperCamelCase , UpperCamelCase = batch.values()
# Gather the distributed inputs and targs for the base model
UpperCamelCase , UpperCamelCase = accelerator.gather((ddp_input, ddp_target) )
UpperCamelCase , UpperCamelCase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowercase , lowercase , lowercase , lowercase , lowercase )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(lowercase ):
step_model(lowercase , lowercase , lowercase , lowercase )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(lowercase ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f'''Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f'''Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1_337 + iteration )
UpperCamelCase = ddp_input[torch.randperm(len(lowercase ) )]
GradientState._reset_state()
def A ( lowercase=False , lowercase=False ) -> List[str]:
'''simple docstring'''
UpperCamelCase = Accelerator(
split_batches=lowercase , dispatch_batches=lowercase , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = get_training_setup(lowercase , lowercase )
for iteration, batch in enumerate(lowercase ):
UpperCamelCase , UpperCamelCase = batch.values()
# Gather the distributed inputs and targs for the base model
UpperCamelCase , UpperCamelCase = accelerator.gather((ddp_input, ddp_target) )
UpperCamelCase , UpperCamelCase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(lowercase , lowercase , lowercase , lowercase , lowercase )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(lowercase )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(lowercase ):
step_model(lowercase , lowercase , lowercase , lowercase )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), f'''Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]['lr']}\nDDP opt: {ddp_opt.param_groups[0]['lr']}\n'''
UpperCamelCase = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(lowercase ))
if accelerator.num_processes > 1:
check_model_parameters(lowercase , lowercase , lowercase , lowercase )
# Shuffle ddp_input on each iteration
torch.manual_seed(1_337 + iteration )
GradientState._reset_state()
def A ( ) -> int:
'''simple docstring'''
UpperCamelCase = Accelerator()
UpperCamelCase = RegressionDataset(length=80 )
UpperCamelCase = DataLoader(lowercase , batch_size=16 )
UpperCamelCase = RegressionDataset(length=96 )
UpperCamelCase = DataLoader(lowercase , batch_size=16 )
UpperCamelCase , UpperCamelCase = accelerator.prepare(lowercase , lowercase )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(lowercase ):
assert id(accelerator.gradient_state.active_dataloader ) == id(lowercase )
if iteration < len(lowercase ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(lowercase ):
assert id(accelerator.gradient_state.active_dataloader ) == id(lowercase )
if batch_num < len(lowercase ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def A ( ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase = Accelerator()
UpperCamelCase = accelerator.state
if state.local_process_index == 0:
print('**Test `accumulate` gradient accumulation with dataloader break**' )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print('**Test NOOP `no_sync` context manager**' )
test_noop_sync(lowercase )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print('**Test Distributed `no_sync` context manager**' )
test_distributed_sync(lowercase )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
'**Test `accumulate` gradient accumulation, ' , f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation(lowercase , lowercase )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version('<' , '2.0' ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
'**Test `accumulate` gradient accumulation with optimizer and scheduler, ' , '`split_batches=False`, `dispatch_batches=False`**' , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
'**Test `accumulate` gradient accumulation with optimizer and scheduler, ' , f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation_with_opt_and_scheduler(lowercase , lowercase )
def A ( lowercase ) -> Any:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 3 |
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize('dataset_size' , [None, 400 * 2**20, 600 * 2**20] )
@pytest.mark.parametrize('input_in_memory_max_size' , ['default', 0, 100 * 2**20, 900 * 2**20] )
def A ( lowercase , lowercase , lowercase ) -> Union[str, Any]:
'''simple docstring'''
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , 'IN_MEMORY_MAX_SIZE' , lowercase )
UpperCamelCase = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
UpperCamelCase = dataset_size < in_memory_max_size
else:
UpperCamelCase = False
UpperCamelCase = is_small_dataset(lowercase )
assert result == expected
| 3 | 1 |
'''simple docstring'''
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
lowerCamelCase : List[str] = ['gpt2']
lowerCamelCase : int = 'gpt2'
if is_tf_available():
class A__ ( tf.Module ):
def __init__( self : List[str] , _a : Any ) -> Tuple:
'''simple docstring'''
super().__init__()
_SCREAMING_SNAKE_CASE =tokenizer
_SCREAMING_SNAKE_CASE =AutoConfig.from_pretrained(lowerCAmelCase__ )
_SCREAMING_SNAKE_CASE =TFGPTaLMHeadModel.from_config(lowerCAmelCase__ )
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name='text' ),) )
def A ( self : int , _a : List[str] ) -> List[str]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.tokenizer(lowerCAmelCase__ )
_SCREAMING_SNAKE_CASE =tokenized['''input_ids'''].to_tensor()
_SCREAMING_SNAKE_CASE =tf.cast(input_ids_dense > 0 , tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
_SCREAMING_SNAKE_CASE =self.model(input_ids=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )['''logits''']
return outputs
@require_tf
@require_keras_nlp
class A__ ( unittest.TestCase ):
def A ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
super().setUp()
_SCREAMING_SNAKE_CASE =[GPTaTokenizer.from_pretrained(lowerCAmelCase__ ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
_SCREAMING_SNAKE_CASE =[TFGPTaTokenizer.from_pretrained(lowerCAmelCase__ ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
_SCREAMING_SNAKE_CASE =[
'''This is a straightforward English test sentence.''',
'''This one has some weird characters\rto\nsee\r\nif those\u00E9break things.''',
'''Now we\'re going to add some Chinese: 一 二 三 一二三''',
'''And some much more rare Chinese: 齉 堃 齉堃''',
'''Je vais aussi écrire en français pour tester les accents''',
'''Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ''',
]
_SCREAMING_SNAKE_CASE =list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def A ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in self.test_sentences:
_SCREAMING_SNAKE_CASE =tokenizer([test_inputs] , return_tensors='tf' )
_SCREAMING_SNAKE_CASE =tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
_SCREAMING_SNAKE_CASE =python_outputs[key].numpy()
_SCREAMING_SNAKE_CASE =tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(lowerCAmelCase__ , tf.intaa ) == tf_outputs_values ) )
@slow
def A ( self : Dict ) -> Optional[int]:
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
_SCREAMING_SNAKE_CASE =tf.function(lowerCAmelCase__ )
for test_inputs in self.test_sentences:
_SCREAMING_SNAKE_CASE =tf.constant(lowerCAmelCase__ )
_SCREAMING_SNAKE_CASE =compiled_tokenizer(lowerCAmelCase__ )
_SCREAMING_SNAKE_CASE =tf_tokenizer(lowerCAmelCase__ )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def A ( self : List[str] ) -> List[str]:
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
_SCREAMING_SNAKE_CASE =ModelToSave(tokenizer=lowerCAmelCase__ )
_SCREAMING_SNAKE_CASE =tf.convert_to_tensor([self.test_sentences[0]] )
_SCREAMING_SNAKE_CASE =model.serving(lowerCAmelCase__ ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
_SCREAMING_SNAKE_CASE =Path(lowerCAmelCase__ ) / '''saved.model'''
tf.saved_model.save(lowerCAmelCase__ , lowerCAmelCase__ , signatures={'serving_default': model.serving} )
_SCREAMING_SNAKE_CASE =tf.saved_model.load(lowerCAmelCase__ )
_SCREAMING_SNAKE_CASE =loaded_model.signatures['''serving_default'''](lowerCAmelCase__ )['''output_0''']
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def A ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
_SCREAMING_SNAKE_CASE =tf.convert_to_tensor([self.test_sentences[0]] )
_SCREAMING_SNAKE_CASE =tf_tokenizer(lowerCAmelCase__ ) # Build model with some sample inputs
_SCREAMING_SNAKE_CASE =tf_tokenizer.get_config()
_SCREAMING_SNAKE_CASE =TFGPTaTokenizer.from_config(lowerCAmelCase__ )
_SCREAMING_SNAKE_CASE =model_from_config(lowerCAmelCase__ )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def A ( self : int ) -> str:
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
_SCREAMING_SNAKE_CASE =12_3123
for max_length in [3, 5, 1024]:
_SCREAMING_SNAKE_CASE =tf.convert_to_tensor([self.test_sentences[0]] )
_SCREAMING_SNAKE_CASE =tf_tokenizer(lowerCAmelCase__ , max_length=lowerCAmelCase__ )
_SCREAMING_SNAKE_CASE =out['''input_ids'''].numpy().shape[1]
assert out_length == max_length
| 405 |
from datetime import datetime
import requests
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Optional[int] = '''https://downloadgram.net/wp-json/wppress/video-downloader/video?url='''
__SCREAMING_SNAKE_CASE : Tuple = requests.get(base_url + url ).json()[0]['''urls'''][0]['''src''']
return requests.get(lowercase__ ).content
if __name__ == "__main__":
__lowerCAmelCase : int =input('Enter Video/IGTV url: ').strip()
__lowerCAmelCase : Union[str, Any] =f"""{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4"""
with open(file_name, 'wb') as fp:
fp.write(download_video(url))
print(f"""Done. Video saved to disk as {file_name}.""")
| 696 | 0 |
'''simple docstring'''
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
UpperCamelCase__ : Dict = logging.get_logger(__name__)
class _UpperCamelCase ( a__ ):
'''simple docstring'''
_A : str = "linear"
_A : List[str] = "cosine"
_A : List[str] = "cosine_with_restarts"
_A : List[str] = "polynomial"
_A : int = "constant"
_A : Optional[int] = "constant_with_warmup"
_A : List[Any] = "piecewise_constant"
def lowerCAmelCase_ ( _lowerCamelCase: List[Any] , _lowerCamelCase: List[Any] = -1 ):
return LambdaLR(_lowerCamelCase , lambda _lowerCamelCase : 1 , last_epoch=_lowerCamelCase )
def lowerCAmelCase_ ( _lowerCamelCase: str , _lowerCamelCase: List[str] , _lowerCamelCase: List[str] = -1 ):
def lr_lambda(_lowerCamelCase: Optional[int] ):
if current_step < num_warmup_steps:
return float(_lowerCamelCase ) / float(max(1.0 , _lowerCamelCase ) )
return 1.0
return LambdaLR(_lowerCamelCase , _lowerCamelCase , last_epoch=_lowerCamelCase )
def lowerCAmelCase_ ( _lowerCamelCase: List[Any] , _lowerCamelCase: Dict , _lowerCamelCase: int = -1 ):
__SCREAMING_SNAKE_CASE : Optional[Any] = {}
__SCREAMING_SNAKE_CASE : int = step_rules.split(""",""" )
for rule_str in rule_list[:-1]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = rule_str.split(""":""" )
__SCREAMING_SNAKE_CASE : Optional[Any] = int(_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Dict = float(_lowerCamelCase )
__SCREAMING_SNAKE_CASE : int = value
__SCREAMING_SNAKE_CASE : Dict = float(rule_list[-1] )
def create_rules_function(_lowerCamelCase: str , _lowerCamelCase: Optional[int] ):
def rule_func(_lowerCamelCase: List[Any] ) -> float:
__SCREAMING_SNAKE_CASE : Optional[int] = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(_lowerCamelCase ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
__SCREAMING_SNAKE_CASE : List[str] = create_rules_function(_lowerCamelCase , _lowerCamelCase )
return LambdaLR(_lowerCamelCase , _lowerCamelCase , last_epoch=_lowerCamelCase )
def lowerCAmelCase_ ( _lowerCamelCase: Tuple , _lowerCamelCase: Optional[Any] , _lowerCamelCase: Tuple , _lowerCamelCase: Optional[Any]=-1 ):
def lr_lambda(_lowerCamelCase: Optional[int] ):
if current_step < num_warmup_steps:
return float(_lowerCamelCase ) / float(max(1 , _lowerCamelCase ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def lowerCAmelCase_ ( _lowerCamelCase: Any , _lowerCamelCase: str , _lowerCamelCase: int , _lowerCamelCase: Dict = 0.5 , _lowerCamelCase: int = -1 ):
def lr_lambda(_lowerCamelCase: str ):
if current_step < num_warmup_steps:
return float(_lowerCamelCase ) / float(max(1 , _lowerCamelCase ) )
__SCREAMING_SNAKE_CASE : List[str] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(_lowerCamelCase ) * 2.0 * progress )) )
return LambdaLR(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def lowerCAmelCase_ ( _lowerCamelCase: Tuple , _lowerCamelCase: Tuple , _lowerCamelCase: List[Any] , _lowerCamelCase: Dict = 1 , _lowerCamelCase: Union[str, Any] = -1 ):
def lr_lambda(_lowerCamelCase: Tuple ):
if current_step < num_warmup_steps:
return float(_lowerCamelCase ) / float(max(1 , _lowerCamelCase ) )
__SCREAMING_SNAKE_CASE : Optional[int] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(_lowerCamelCase ) * progress) % 1.0) )) )
return LambdaLR(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def lowerCAmelCase_ ( _lowerCamelCase: int , _lowerCamelCase: Any , _lowerCamelCase: Optional[Any] , _lowerCamelCase: List[Any]=1E-7 , _lowerCamelCase: str=1.0 , _lowerCamelCase: Union[str, Any]=-1 ):
__SCREAMING_SNAKE_CASE : Tuple = optimizer.defaults["""lr"""]
if not (lr_init > lr_end):
raise ValueError(F"lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})" )
def lr_lambda(_lowerCamelCase: Tuple ):
if current_step < num_warmup_steps:
return float(_lowerCamelCase ) / float(max(1 , _lowerCamelCase ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
__SCREAMING_SNAKE_CASE : List[str] = lr_init - lr_end
__SCREAMING_SNAKE_CASE : Optional[Any] = num_training_steps - num_warmup_steps
__SCREAMING_SNAKE_CASE : int = 1 - (current_step - num_warmup_steps) / decay_steps
__SCREAMING_SNAKE_CASE : Tuple = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
UpperCamelCase__ : Union[str, Any] = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def lowerCAmelCase_ ( _lowerCamelCase: Union[str, Any] , _lowerCamelCase: int , _lowerCamelCase: Union[str, Any] = None , _lowerCamelCase: Union[str, Any] = None , _lowerCamelCase: List[Any] = None , _lowerCamelCase: Tuple = 1 , _lowerCamelCase: Optional[int] = 1.0 , _lowerCamelCase: int = -1 , ):
__SCREAMING_SNAKE_CASE : Dict = SchedulerType(_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Union[str, Any] = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(_lowerCamelCase , last_epoch=_lowerCamelCase )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(_lowerCamelCase , step_rules=_lowerCamelCase , last_epoch=_lowerCamelCase )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(F"{name} requires `num_warmup_steps`, please provide that argument." )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(_lowerCamelCase , num_warmup_steps=_lowerCamelCase , last_epoch=_lowerCamelCase )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(F"{name} requires `num_training_steps`, please provide that argument." )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
_lowerCamelCase , num_warmup_steps=_lowerCamelCase , num_training_steps=_lowerCamelCase , num_cycles=_lowerCamelCase , last_epoch=_lowerCamelCase , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
_lowerCamelCase , num_warmup_steps=_lowerCamelCase , num_training_steps=_lowerCamelCase , power=_lowerCamelCase , last_epoch=_lowerCamelCase , )
return schedule_func(
_lowerCamelCase , num_warmup_steps=_lowerCamelCase , num_training_steps=_lowerCamelCase , last_epoch=_lowerCamelCase ) | 710 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase__ : List[Any] = {
'''configuration_time_series_transformer''': [
'''TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TimeSeriesTransformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : List[Any] = [
'''TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimeSeriesTransformerForPrediction''',
'''TimeSeriesTransformerModel''',
'''TimeSeriesTransformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
UpperCamelCase__ : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 178 | 0 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class lowerCAmelCase ( UpperCamelCase_ ):
A_ : int = """openai/whisper-base"""
A_ : str = (
"""This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the """
"""transcribed text."""
)
A_ : Union[str, Any] = """transcriber"""
A_ : Tuple = WhisperProcessor
A_ : Dict = WhisperForConditionalGeneration
A_ : Tuple = ["""audio"""]
A_ : Optional[int] = ["""text"""]
def _A ( self : Optional[int] , a__ : Optional[int] ):
'''simple docstring'''
return self.pre_processor(a__ , return_tensors="pt" ).input_features
def _A ( self : List[str] , a__ : Union[str, Any] ):
'''simple docstring'''
return self.model.generate(inputs=a__ )
def _A ( self : Optional[Any] , a__ : Dict ):
'''simple docstring'''
return self.pre_processor.batch_decode(a__ , skip_special_tokens=a__ )[0]
| 378 |
'''simple docstring'''
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print("""Googling.....""")
snake_case = """https://www.google.com/search?q=""" + """ """.join(sys.argv[1:])
snake_case = requests.get(url, headers={"""UserAgent""": UserAgent().random})
# res.raise_for_status()
with open("""project1a.html""", """wb""") as out_file: # only for knowing the class
for data in res.iter_content(1_00_00):
out_file.write(data)
snake_case = BeautifulSoup(res.text, """html.parser""")
snake_case = list(soup.select(""".eZt8xd"""))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get("""href"""))
else:
webbrowser.open(f'https://google.com{link.get("href")}')
| 378 | 1 |
"""simple docstring"""
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class a ( unittest.TestCase ):
UpperCamelCase : List[str] = JukeboxTokenizer
UpperCamelCase : Union[str, Any] = {
'artist': 'Zac Brown Band',
'genres': 'Country',
'lyrics': 'I met a traveller from an antique land,\n Who said "Two vast and trunkless legs of stone\n Stand in the desert. . . . Near them, on the sand,\n Half sunk a shattered visage lies, whose frown,\n And wrinkled lip, and sneer of cold command,\n Tell that its sculptor well those passions read\n Which yet survive, stamped on these lifeless things,\n The hand that mocked them, and the heart that fed;\n And on the pedestal, these words appear:\n My name is Ozymandias, King of Kings;\n Look on my Works, ye Mighty, and despair!\n Nothing beside remains. Round the decay\n Of that colossal Wreck, boundless and bare\n The lone and level sands stretch far away\n ',
}
@require_torch
def lowerCamelCase__ ( self : List[Any] ) -> str:
'''simple docstring'''
import torch
SCREAMING_SNAKE_CASE_: Any =JukeboxTokenizer.from_pretrained("""openai/jukebox-1b-lyrics""" )
SCREAMING_SNAKE_CASE_: List[str] =tokenizer(**self.metas )["""input_ids"""]
# fmt: off
SCREAMING_SNAKE_CASE_: Any =[
torch.tensor([[
0, 0, 0, 7169, 507, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]] ),
torch.tensor([[0, 0, 0, 1069, 11]] ),
torch.tensor([[0, 0, 0, 1069, 11]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
@require_torch
def lowerCamelCase__ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
import torch
SCREAMING_SNAKE_CASE_: Optional[int] =JukeboxTokenizer.from_pretrained("""openai/jukebox-5b-lyrics""" )
SCREAMING_SNAKE_CASE_: Dict =tokenizer(**self.metas )["""input_ids"""]
# fmt: off
SCREAMING_SNAKE_CASE_: int =[
torch.tensor([[
0, 0, 0, 1069, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]] ),
torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
| 36 |
"""simple docstring"""
def __magic_name__ ( lowercase , lowercase ):
return int((input_a, input_a).count(0 ) == 0 )
def __magic_name__ ( ):
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 36 | 1 |
"""simple docstring"""
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
_SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.14.0""", """To fix: pip install -r examples/pytorch/audio-classification/requirements.txt""")
def __UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 1_60_00 ) -> Optional[Any]:
"""simple docstring"""
__snake_case = int(round(sample_rate * max_length ) )
if len(SCREAMING_SNAKE_CASE ) <= sample_length:
return wav
__snake_case = randint(0 , len(SCREAMING_SNAKE_CASE ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class __magic_name__ :
_SCREAMING_SNAKE_CASE : Optional[str] = field(default=lowercase__ , metadata={'help': 'Name of a dataset from the datasets package'} )
_SCREAMING_SNAKE_CASE : Optional[str] = field(
default=lowercase__ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
_SCREAMING_SNAKE_CASE : Optional[str] = field(
default=lowercase__ , metadata={'help': 'A file containing the training audio paths and labels.'} )
_SCREAMING_SNAKE_CASE : Optional[str] = field(
default=lowercase__ , metadata={'help': 'A file containing the validation audio paths and labels.'} )
_SCREAMING_SNAKE_CASE : str = field(
default='train' , metadata={
'help': 'The name of the training data set split to use (via the datasets library). Defaults to \'train\''
} , )
_SCREAMING_SNAKE_CASE : str = field(
default='validation' , metadata={
'help': (
'The name of the training data set split to use (via the datasets library). Defaults to \'validation\''
)
} , )
_SCREAMING_SNAKE_CASE : str = field(
default='audio' , metadata={'help': 'The name of the dataset column containing the audio data. Defaults to \'audio\''} , )
_SCREAMING_SNAKE_CASE : str = field(
default='label' , metadata={'help': 'The name of the dataset column containing the labels. Defaults to \'label\''} )
_SCREAMING_SNAKE_CASE : Optional[int] = field(
default=lowercase__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
_SCREAMING_SNAKE_CASE : Optional[int] = field(
default=lowercase__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
_SCREAMING_SNAKE_CASE : float = field(
default=20 , metadata={'help': 'Audio clips will be randomly cut to this length during training if the value is set.'} , )
@dataclass
class __magic_name__ :
_SCREAMING_SNAKE_CASE : str = field(
default='facebook/wav2vec2-base' , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} , )
_SCREAMING_SNAKE_CASE : Optional[str] = field(
default=lowercase__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
_SCREAMING_SNAKE_CASE : Optional[str] = field(
default=lowercase__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from the Hub'} )
_SCREAMING_SNAKE_CASE : str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
_SCREAMING_SNAKE_CASE : Optional[str] = field(
default=lowercase__ , metadata={'help': 'Name or path of preprocessor config.'} )
_SCREAMING_SNAKE_CASE : bool = field(
default=lowercase__ , metadata={'help': 'Whether to freeze the feature encoder layers of the model.'} )
_SCREAMING_SNAKE_CASE : bool = field(
default=lowercase__ , metadata={'help': 'Whether to generate an attention mask in the feature extractor.'} )
_SCREAMING_SNAKE_CASE : bool = field(
default=lowercase__ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
_SCREAMING_SNAKE_CASE : Optional[bool] = field(
default=lowercase__ , metadata={'help': 'Whether to freeze the feature extractor layers of the model.'} )
_SCREAMING_SNAKE_CASE : bool = field(
default=lowercase__ , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , )
def lowerCAmelCase ( self : int ):
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
"The argument `--freeze_feature_extractor` is deprecated and "
"will be removed in a future version. Use `--freeze_feature_encoder`"
"instead. Setting `freeze_feature_encoder==True`." , snake_case_ , )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
"The argument `--freeze_feature_extractor` is deprecated and "
"should not be used in combination with `--freeze_feature_encoder`."
"Only make use of `--freeze_feature_encoder`." )
def __UpperCamelCase ( ) -> Union[str, Any]:
"""simple docstring"""
__snake_case = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__snake_case , __snake_case , __snake_case = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__snake_case , __snake_case , __snake_case = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_audio_classification" , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__snake_case = training_args.get_process_log_level()
logger.setLevel(SCREAMING_SNAKE_CASE )
transformers.utils.logging.set_verbosity(SCREAMING_SNAKE_CASE )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} '''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
__snake_case = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__snake_case = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"Use --overwrite_output_dir to train from scratch." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Initialize our dataset and prepare it for the audio classification task.
__snake_case = DatasetDict()
__snake_case = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , )
__snake_case = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F'''--audio_column_name {data_args.audio_column_name} not found in dataset \'{data_args.dataset_name}\'. '''
"Make sure to set `--audio_column_name` to the correct audio column - one of "
F'''{", ".join(raw_datasets["train"].column_names )}.''' )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F'''--label_column_name {data_args.label_column_name} not found in dataset \'{data_args.dataset_name}\'. '''
"Make sure to set `--label_column_name` to the correct text column - one of "
F'''{", ".join(raw_datasets["train"].column_names )}.''' )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
__snake_case = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
__snake_case = raw_datasets.cast_column(
data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
__snake_case = feature_extractor.model_input_names[0]
def train_transforms(SCREAMING_SNAKE_CASE ):
__snake_case = []
for audio in batch[data_args.audio_column_name]:
__snake_case = random_subsample(
audio["array"] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(SCREAMING_SNAKE_CASE )
__snake_case = feature_extractor(SCREAMING_SNAKE_CASE , sampling_rate=feature_extractor.sampling_rate )
__snake_case = {model_input_name: inputs.get(SCREAMING_SNAKE_CASE )}
__snake_case = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(SCREAMING_SNAKE_CASE ):
__snake_case = [audio["array"] for audio in batch[data_args.audio_column_name]]
__snake_case = feature_extractor(SCREAMING_SNAKE_CASE , sampling_rate=feature_extractor.sampling_rate )
__snake_case = {model_input_name: inputs.get(SCREAMING_SNAKE_CASE )}
__snake_case = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
__snake_case = raw_datasets["train"].features[data_args.label_column_name].names
__snake_case , __snake_case = {}, {}
for i, label in enumerate(SCREAMING_SNAKE_CASE ):
__snake_case = str(SCREAMING_SNAKE_CASE )
__snake_case = label
# Load the accuracy metric from the datasets package
__snake_case = evaluate.load("accuracy" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(SCREAMING_SNAKE_CASE ):
__snake_case = np.argmax(eval_pred.predictions , axis=1 )
return metric.compute(predictions=SCREAMING_SNAKE_CASE , references=eval_pred.label_ids )
__snake_case = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(SCREAMING_SNAKE_CASE ) , labelaid=SCREAMING_SNAKE_CASE , idalabel=SCREAMING_SNAKE_CASE , finetuning_task="audio-classification" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__snake_case = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
__snake_case = (
raw_datasets["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(SCREAMING_SNAKE_CASE , output_all_columns=SCREAMING_SNAKE_CASE )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
__snake_case = (
raw_datasets["eval"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(SCREAMING_SNAKE_CASE , output_all_columns=SCREAMING_SNAKE_CASE )
# Initialize our trainer
__snake_case = Trainer(
model=SCREAMING_SNAKE_CASE , args=SCREAMING_SNAKE_CASE , train_dataset=raw_datasets["train"] if training_args.do_train else None , eval_dataset=raw_datasets["eval"] if training_args.do_eval else None , compute_metrics=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE , )
# Training
if training_args.do_train:
__snake_case = None
if training_args.resume_from_checkpoint is not None:
__snake_case = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__snake_case = last_checkpoint
__snake_case = trainer.train(resume_from_checkpoint=SCREAMING_SNAKE_CASE )
trainer.save_model()
trainer.log_metrics("train" , train_result.metrics )
trainer.save_metrics("train" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
__snake_case = trainer.evaluate()
trainer.log_metrics("eval" , SCREAMING_SNAKE_CASE )
trainer.save_metrics("eval" , SCREAMING_SNAKE_CASE )
# Write model card and (optionally) push to hub
__snake_case = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "audio-classification",
"dataset": data_args.dataset_name,
"tags": ["audio-classification"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**SCREAMING_SNAKE_CASE )
else:
trainer.create_model_card(**SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 163 |
"""simple docstring"""
def __UpperCamelCase ( SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
if not all(char in "01" for char in bin_string ):
raise ValueError("Non-binary value was passed to the function" )
if not bin_string:
raise ValueError("Empty string was passed to the function" )
__snake_case = ""
while len(SCREAMING_SNAKE_CASE ) % 3 != 0:
__snake_case = "0" + bin_string
__snake_case = [
bin_string[index : index + 3]
for index in range(len(SCREAMING_SNAKE_CASE ) )
if index % 3 == 0
]
for bin_group in bin_string_in_3_list:
__snake_case = 0
for index, val in enumerate(SCREAMING_SNAKE_CASE ):
oct_val += int(2 ** (2 - index) * int(SCREAMING_SNAKE_CASE ) )
oct_string += str(SCREAMING_SNAKE_CASE )
return oct_string
if __name__ == "__main__":
from doctest import testmod
testmod()
| 163 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {
"""hustvl/yolos-small""": """https://huggingface.co/hustvl/yolos-small/resolve/main/config.json""",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class _SCREAMING_SNAKE_CASE (_UpperCamelCase ):
lowerCAmelCase = "yolos"
def __init__( self : Dict , UpperCamelCase : Optional[Any]=7_6_8 , UpperCamelCase : List[Any]=1_2 , UpperCamelCase : Any=1_2 , UpperCamelCase : List[Any]=3_0_7_2 , UpperCamelCase : Optional[int]="gelu" , UpperCamelCase : Dict=0.0 , UpperCamelCase : Optional[Any]=0.0 , UpperCamelCase : Any=0.0_2 , UpperCamelCase : Optional[int]=1E-12 , UpperCamelCase : List[Any]=[5_1_2, 8_6_4] , UpperCamelCase : List[str]=1_6 , UpperCamelCase : str=3 , UpperCamelCase : Optional[Any]=True , UpperCamelCase : Optional[Any]=1_0_0 , UpperCamelCase : List[str]=True , UpperCamelCase : Any=False , UpperCamelCase : List[str]=1 , UpperCamelCase : str=5 , UpperCamelCase : Optional[Any]=2 , UpperCamelCase : Tuple=5 , UpperCamelCase : Any=2 , UpperCamelCase : Union[str, Any]=0.1 , **UpperCamelCase : List[str] , )->List[str]:
super().__init__(**__a )
__SCREAMING_SNAKE_CASE : Dict = hidden_size
__SCREAMING_SNAKE_CASE : Any = num_hidden_layers
__SCREAMING_SNAKE_CASE : str = num_attention_heads
__SCREAMING_SNAKE_CASE : Dict = intermediate_size
__SCREAMING_SNAKE_CASE : List[str] = hidden_act
__SCREAMING_SNAKE_CASE : List[str] = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : str = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : Tuple = initializer_range
__SCREAMING_SNAKE_CASE : List[str] = layer_norm_eps
__SCREAMING_SNAKE_CASE : Tuple = image_size
__SCREAMING_SNAKE_CASE : Tuple = patch_size
__SCREAMING_SNAKE_CASE : Dict = num_channels
__SCREAMING_SNAKE_CASE : Any = qkv_bias
__SCREAMING_SNAKE_CASE : str = num_detection_tokens
__SCREAMING_SNAKE_CASE : str = use_mid_position_embeddings
__SCREAMING_SNAKE_CASE : List[str] = auxiliary_loss
# Hungarian matcher
__SCREAMING_SNAKE_CASE : List[Any] = class_cost
__SCREAMING_SNAKE_CASE : int = bbox_cost
__SCREAMING_SNAKE_CASE : Optional[int] = giou_cost
# Loss coefficients
__SCREAMING_SNAKE_CASE : List[Any] = bbox_loss_coefficient
__SCREAMING_SNAKE_CASE : str = giou_loss_coefficient
__SCREAMING_SNAKE_CASE : Dict = eos_coefficient
class _SCREAMING_SNAKE_CASE (_UpperCamelCase ):
lowerCAmelCase = version.parse("""1.11""" )
@property
def __snake_case ( self : str )->Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def __snake_case ( self : Optional[Any] )->float:
return 1E-4
@property
def __snake_case ( self : Dict )->int:
return 1_2
| 707 |
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
_lowerCamelCase = logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE (UpperCamelCase ):
def __init__( self : int , *UpperCamelCase : Optional[int] , **UpperCamelCase : Optional[int] )->None:
warnings.warn(
"The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use LayoutLMv2ImageProcessor instead." , UpperCamelCase , )
super().__init__(*UpperCamelCase , **UpperCamelCase )
| 447 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__A = {
"configuration_xlm": ["XLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLMConfig", "XLMOnnxConfig"],
"tokenization_xlm": ["XLMTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"XLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLMForMultipleChoice",
"XLMForQuestionAnswering",
"XLMForQuestionAnsweringSimple",
"XLMForSequenceClassification",
"XLMForTokenClassification",
"XLMModel",
"XLMPreTrainedModel",
"XLMWithLMHeadModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLMForMultipleChoice",
"TFXLMForQuestionAnsweringSimple",
"TFXLMForSequenceClassification",
"TFXLMForTokenClassification",
"TFXLMMainLayer",
"TFXLMModel",
"TFXLMPreTrainedModel",
"TFXLMWithLMHeadModel",
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 68 |
'''simple docstring'''
import numpy
class _UpperCamelCase :
'''simple docstring'''
def __init__( self , _a , _a ):
"""simple docstring"""
a__ = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
a__ = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
a__ = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
a__ = numpy.random.rand(3 , 1 )
# Real output values provided.
a__ = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
a__ = numpy.zeros(output_array.shape )
def lowercase__ ( self ):
"""simple docstring"""
a__ = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
a__ = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
a__ = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def lowercase__ ( self ):
"""simple docstring"""
a__ = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
a__ = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
a__ = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def lowercase__ ( self , _a , _a , _a ):
"""simple docstring"""
for iteration in range(1 , iterations + 1 ):
a__ = self.feedforward()
self.back_propagation()
if give_loss:
a__ = numpy.mean(numpy.square(output - self.feedforward() ) )
print(F'''Iteration {iteration} Loss: {loss}''' )
def lowercase__ ( self , _a ):
"""simple docstring"""
a__ = input_arr
a__ = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
a__ = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
a__ = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def lowerCAmelCase_ ( a : numpy.ndarray ):
return 1 / (1 + numpy.exp(-value ))
def lowerCAmelCase_ ( a : numpy.ndarray ):
return (value) * (1 - (value))
def lowerCAmelCase_ ( ):
a__ = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
a__ = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
a__ = TwoHiddenLayerNeuralNetwork(
input_array=a , output_array=a )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=a , iterations=10 , give_loss=a )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 394 | 0 |
"""simple docstring"""
from collections import deque
class A__ :
'''simple docstring'''
def __init__( self: Optional[int] , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: int) -> None:
"""simple docstring"""
__lowerCAmelCase : Dict = process_name # process name
__lowerCAmelCase : List[str] = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
__lowerCAmelCase : Any = arrival_time
__lowerCAmelCase : List[str] = burst_time # remaining burst time
__lowerCAmelCase : Union[str, Any] = 0 # total time of the process wait in ready queue
__lowerCAmelCase : Optional[int] = 0 # time from arrival time to completion time
class A__ :
'''simple docstring'''
def __init__( self: List[str] , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: list[int] , _SCREAMING_SNAKE_CASE: deque[Process] , _SCREAMING_SNAKE_CASE: int , ) -> None:
"""simple docstring"""
__lowerCAmelCase : int = number_of_queues
# time slice of queues that round robin algorithm applied
__lowerCAmelCase : Dict = time_slices
# unfinished process is in this ready_queue
__lowerCAmelCase : Dict = queue
# current time
__lowerCAmelCase : Union[str, Any] = current_time
# finished process is in this sequence queue
__lowerCAmelCase : deque[Process] = deque()
def _SCREAMING_SNAKE_CASE ( self: Tuple) -> list[str]:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = []
for i in range(len(self.finish_queue)):
sequence.append(self.finish_queue[i].process_name)
return sequence
def _SCREAMING_SNAKE_CASE ( self: Optional[int] , _SCREAMING_SNAKE_CASE: list[Process]) -> list[int]:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = []
for i in range(len(_SCREAMING_SNAKE_CASE)):
waiting_times.append(queue[i].waiting_time)
return waiting_times
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: list[Process]) -> list[int]:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = []
for i in range(len(_SCREAMING_SNAKE_CASE)):
turnaround_times.append(queue[i].turnaround_time)
return turnaround_times
def _SCREAMING_SNAKE_CASE ( self: Tuple , _SCREAMING_SNAKE_CASE: list[Process]) -> list[int]:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = []
for i in range(len(_SCREAMING_SNAKE_CASE)):
completion_times.append(queue[i].stop_time)
return completion_times
def _SCREAMING_SNAKE_CASE ( self: Optional[Any] , _SCREAMING_SNAKE_CASE: deque[Process]) -> list[int]:
"""simple docstring"""
return [q.burst_time for q in queue]
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: Process) -> int:
"""simple docstring"""
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def _SCREAMING_SNAKE_CASE ( self: List[str] , _SCREAMING_SNAKE_CASE: deque[Process]) -> deque[Process]:
"""simple docstring"""
__lowerCAmelCase : deque[Process] = deque() # sequence deque of finished process
while len(_SCREAMING_SNAKE_CASE) != 0:
__lowerCAmelCase : Union[str, Any] = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(_SCREAMING_SNAKE_CASE)
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
__lowerCAmelCase : Optional[int] = 0
# set the process's turnaround time because it is finished
__lowerCAmelCase : List[str] = self.current_time - cp.arrival_time
# set the completion time
__lowerCAmelCase : Any = self.current_time
# add the process to queue that has finished queue
finished.append(_SCREAMING_SNAKE_CASE)
self.finish_queue.extend(_SCREAMING_SNAKE_CASE) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def _SCREAMING_SNAKE_CASE ( self: List[Any] , _SCREAMING_SNAKE_CASE: deque[Process] , _SCREAMING_SNAKE_CASE: int) -> tuple[deque[Process], deque[Process]]:
"""simple docstring"""
__lowerCAmelCase : deque[Process] = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(_SCREAMING_SNAKE_CASE)):
__lowerCAmelCase : Tuple = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(_SCREAMING_SNAKE_CASE)
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
__lowerCAmelCase : Dict = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(_SCREAMING_SNAKE_CASE)
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
__lowerCAmelCase : Optional[Any] = 0
# set the finish time
__lowerCAmelCase : Optional[int] = self.current_time
# update the process' turnaround time because it is finished
__lowerCAmelCase : str = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(_SCREAMING_SNAKE_CASE)
self.finish_queue.extend(_SCREAMING_SNAKE_CASE) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def _SCREAMING_SNAKE_CASE ( self: Any) -> deque[Process]:
"""simple docstring"""
for i in range(self.number_of_queues - 1):
__lowerCAmelCase , __lowerCAmelCase : Optional[int] = self.round_robin(
self.ready_queue , self.time_slices[i])
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue)
return self.finish_queue
if __name__ == "__main__":
import doctest
__snake_case : List[Any] = Process('P1', 0, 53)
__snake_case : str = Process('P2', 0, 17)
__snake_case : Optional[Any] = Process('P3', 0, 68)
__snake_case : Tuple = Process('P4', 0, 24)
__snake_case : Tuple = 3
__snake_case : List[Any] = [17, 25]
__snake_case : List[str] = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={'queue': deque([Pa, Pa, Pa, Pa])})
__snake_case : str = Process('P1', 0, 53)
__snake_case : Any = Process('P2', 0, 17)
__snake_case : str = Process('P3', 0, 68)
__snake_case : Dict = Process('P4', 0, 24)
__snake_case : Optional[int] = 3
__snake_case : Union[str, Any] = [17, 25]
__snake_case : str = deque([Pa, Pa, Pa, Pa])
__snake_case : int = MLFQ(number_of_queues, time_slices, queue, 0)
__snake_case : Optional[int] = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
F"""waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print completion times of processes(P1, P2, P3, P4)
print(
F"""completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
F"""turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print sequence of finished processes
print(
F"""sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}"""
) | 615 |
"""simple docstring"""
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def _lowercase ( __snake_case ,__snake_case ) -> Dict:
assert isinstance(__snake_case ,__snake_case )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" ,[False, True] )
def _lowercase ( __snake_case ,__snake_case ,__snake_case ) -> Any:
__lowerCAmelCase : Dict = tmp_path / "cache"
__lowerCAmelCase : Union[str, Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__lowerCAmelCase : Optional[Any] = ParquetDatasetReader(__snake_case ,cache_dir=__snake_case ,keep_in_memory=__snake_case ).read()
_check_parquet_dataset(__snake_case ,__snake_case )
@pytest.mark.parametrize(
"features" ,[
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] ,)
def _lowercase ( __snake_case ,__snake_case ,__snake_case ) -> str:
__lowerCAmelCase : Tuple = tmp_path / "cache"
__lowerCAmelCase : Dict = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
__lowerCAmelCase : List[Any] = features.copy() if features else default_expected_features
__lowerCAmelCase : Dict = (
Features({feature: Value(__snake_case ) for feature, dtype in features.items()} ) if features is not None else None
)
__lowerCAmelCase : Optional[int] = ParquetDatasetReader(__snake_case ,features=__snake_case ,cache_dir=__snake_case ).read()
_check_parquet_dataset(__snake_case ,__snake_case )
@pytest.mark.parametrize("split" ,[None, NamedSplit("train" ), "train", "test"] )
def _lowercase ( __snake_case ,__snake_case ,__snake_case ) -> Any:
__lowerCAmelCase : List[str] = tmp_path / "cache"
__lowerCAmelCase : List[str] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
__lowerCAmelCase : List[Any] = ParquetDatasetReader(__snake_case ,cache_dir=__snake_case ,split=__snake_case ).read()
_check_parquet_dataset(__snake_case ,__snake_case )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" ,[str, list] )
def _lowercase ( __snake_case ,__snake_case ,__snake_case ) -> Dict:
if issubclass(__snake_case ,__snake_case ):
__lowerCAmelCase : List[Any] = parquet_path
elif issubclass(__snake_case ,__snake_case ):
__lowerCAmelCase : List[Any] = [parquet_path]
__lowerCAmelCase : str = tmp_path / "cache"
__lowerCAmelCase : List[str] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
__lowerCAmelCase : Tuple = ParquetDatasetReader(__snake_case ,cache_dir=__snake_case ).read()
_check_parquet_dataset(__snake_case ,__snake_case )
def _lowercase ( __snake_case ,__snake_case ,__snake_case=("train",) ) -> int:
assert isinstance(__snake_case ,__snake_case )
for split in splits:
__lowerCAmelCase : str = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" ,[False, True] )
def _lowercase ( __snake_case ,__snake_case ,__snake_case ) -> str:
__lowerCAmelCase : Any = tmp_path / "cache"
__lowerCAmelCase : int = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__lowerCAmelCase : str = ParquetDatasetReader(
{"train": parquet_path} ,cache_dir=__snake_case ,keep_in_memory=__snake_case ).read()
_check_parquet_datasetdict(__snake_case ,__snake_case )
@pytest.mark.parametrize(
"features" ,[
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] ,)
def _lowercase ( __snake_case ,__snake_case ,__snake_case ) -> Dict:
__lowerCAmelCase : List[Any] = tmp_path / "cache"
__lowerCAmelCase : Dict = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
__lowerCAmelCase : str = features.copy() if features else default_expected_features
__lowerCAmelCase : str = (
Features({feature: Value(__snake_case ) for feature, dtype in features.items()} ) if features is not None else None
)
__lowerCAmelCase : str = ParquetDatasetReader({"train": parquet_path} ,features=__snake_case ,cache_dir=__snake_case ).read()
_check_parquet_datasetdict(__snake_case ,__snake_case )
@pytest.mark.parametrize("split" ,[None, NamedSplit("train" ), "train", "test"] )
def _lowercase ( __snake_case ,__snake_case ,__snake_case ) -> str:
if split:
__lowerCAmelCase : Optional[int] = {split: parquet_path}
else:
__lowerCAmelCase : str = "train"
__lowerCAmelCase : Optional[Any] = {"train": parquet_path, "test": parquet_path}
__lowerCAmelCase : Tuple = tmp_path / "cache"
__lowerCAmelCase : int = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
__lowerCAmelCase : Union[str, Any] = ParquetDatasetReader(__snake_case ,cache_dir=__snake_case ).read()
_check_parquet_datasetdict(__snake_case ,__snake_case ,splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def _lowercase ( __snake_case ,__snake_case ) -> Union[str, Any]:
__lowerCAmelCase : int = ParquetDatasetWriter(__snake_case ,tmp_path / "foo.parquet" )
assert writer.write() > 0
__lowerCAmelCase : str = pq.ParquetFile(tmp_path / "foo.parquet" )
__lowerCAmelCase : int = pf.read()
assert dataset.data.table == output_table
def _lowercase ( __snake_case ,__snake_case ) -> int:
__lowerCAmelCase : int = str(shared_datadir / "test_image_rgb.jpg" )
__lowerCAmelCase : Any = {"image": [image_path]}
__lowerCAmelCase : List[Any] = Features({"image": Image()} )
__lowerCAmelCase : Tuple = Dataset.from_dict(__snake_case ,features=__snake_case )
__lowerCAmelCase : Dict = ParquetDatasetWriter(__snake_case ,tmp_path / "foo.parquet" )
assert writer.write() > 0
__lowerCAmelCase : List[Any] = Dataset.from_parquet(str(tmp_path / "foo.parquet" ) )
assert dataset.features == reloaded_dataset.features
__lowerCAmelCase : List[Any] = ParquetDatasetReader(str(tmp_path / "foo.parquet" ) ,streaming=__snake_case ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
"feature, expected" ,[
(Features({"foo": Value("int32" )} ), None),
(Features({"image": Image(), "foo": Value("int32" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({"nested": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] ,)
def _lowercase ( __snake_case ,__snake_case ) -> List[Any]:
assert get_writer_batch_size(__snake_case ) == expected | 615 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_UpperCAmelCase : List[str] = {"""configuration_yolos""": ["""YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP""", """YolosConfig""", """YolosOnnxConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Tuple = ["""YolosFeatureExtractor"""]
_UpperCAmelCase : List[Any] = ["""YolosImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Dict = [
"""YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""YolosForObjectDetection""",
"""YolosModel""",
"""YolosPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 362 |
"""simple docstring"""
def lowercase__(A ) ->bool:
"""simple docstring"""
return number & 1 == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 218 | 0 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def SCREAMING_SNAKE_CASE( UpperCamelCase = "https://www.worldometers.info/coronavirus" ) -> dict:
UpperCAmelCase_ : List[str] = BeautifulSoup(requests.get(lowerCamelCase_ ).text ,'html.parser' )
UpperCAmelCase_ : Dict = soup.findAll('h1' )
UpperCAmelCase_ : Dict = soup.findAll('div' ,{'class': 'maincounter-number'} )
keys += soup.findAll('span' ,{'class': 'panel-title'} )
values += soup.findAll('div' ,{'class': 'number-table-main'} )
return {key.text.strip(): value.text.strip() for key, value in zip(lowerCamelCase_ ,lowerCamelCase_ )}
if __name__ == "__main__":
print("\033[1m" + "COVID-19 Status of the World" + "\033[0m\n")
for key, value in world_covidaa_stats().items():
print(F'{key}\n{value}\n')
| 706 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"tanreinama/GPTSAN-2.8B-spout_is_uniform": (
"https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json"
),
}
class lowercase ( a_ ):
_lowerCamelCase : Any= "gptsan-japanese"
_lowerCamelCase : List[Any]= [
"past_key_values",
]
_lowerCamelCase : Tuple= {
"hidden_size": "d_model",
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , _snake_case=3_6000 , _snake_case=1280 , _snake_case=1024 , _snake_case=8192 , _snake_case=4096 , _snake_case=128 , _snake_case=10 , _snake_case=0 , _snake_case=16 , _snake_case=16 , _snake_case=128 , _snake_case=0.0 , _snake_case=1e-5 , _snake_case=False , _snake_case=0.0 , _snake_case="float32" , _snake_case=False , _snake_case=False , _snake_case=False , _snake_case=0.002 , _snake_case=False , _snake_case=True , _snake_case=3_5998 , _snake_case=3_5995 , _snake_case=3_5999 , **_snake_case , ) -> Optional[int]:
UpperCAmelCase_ : Dict = vocab_size
UpperCAmelCase_ : Optional[int] = max_position_embeddings
UpperCAmelCase_ : Any = d_model
UpperCAmelCase_ : int = d_ff
UpperCAmelCase_ : Tuple = d_ext
UpperCAmelCase_ : Tuple = d_spout
UpperCAmelCase_ : List[Any] = num_switch_layers
UpperCAmelCase_ : Tuple = num_ext_layers
UpperCAmelCase_ : Optional[int] = num_switch_layers + num_ext_layers
UpperCAmelCase_ : Tuple = num_heads
UpperCAmelCase_ : List[str] = num_experts
UpperCAmelCase_ : List[str] = expert_capacity
UpperCAmelCase_ : Optional[int] = dropout_rate
UpperCAmelCase_ : List[Any] = layer_norm_epsilon
UpperCAmelCase_ : Any = router_bias
UpperCAmelCase_ : Optional[Any] = router_jitter_noise
UpperCAmelCase_ : str = router_dtype
UpperCAmelCase_ : List[str] = router_ignore_padding_tokens
UpperCAmelCase_ : Optional[int] = output_hidden_states
UpperCAmelCase_ : Union[str, Any] = output_attentions
UpperCAmelCase_ : Optional[int] = initializer_factor
UpperCAmelCase_ : Optional[int] = output_router_logits
UpperCAmelCase_ : List[str] = use_cache
super().__init__(
separator_token_id=_snake_case , pad_token_id=_snake_case , eos_token_id=_snake_case , **_snake_case , )
| 471 | 0 |
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
return int(input_a == input_a == 0 )
def lowerCamelCase__ ( ):
"""simple docstring"""
print("Truth Table of NOR Gate:" )
print("| Input 1 | Input 2 | Output |" )
print(F'''| 0 | 0 | {nor_gate(0 , 0 )} |''' )
print(F'''| 0 | 1 | {nor_gate(0 , 1 )} |''' )
print(F'''| 1 | 0 | {nor_gate(1 , 0 )} |''' )
print(F'''| 1 | 1 | {nor_gate(1 , 1 )} |''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 62 |
'''simple docstring'''
def A__ ( ):
return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )]
UpperCamelCase : Dict = generate_large_matrix()
UpperCamelCase : Any = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def A__ ( __lowerCAmelCase : list[list[int]] ):
assert all(row == sorted(__lowerCAmelCase , reverse=__lowerCAmelCase ) for row in grid )
assert all(list(__lowerCAmelCase ) == sorted(__lowerCAmelCase , reverse=__lowerCAmelCase ) for col in zip(*__lowerCAmelCase ) )
def A__ ( __lowerCAmelCase : list[int] ):
lowerCamelCase__ = 0
lowerCamelCase__ = len(__lowerCAmelCase ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
lowerCamelCase__ = (left + right) // 2
lowerCamelCase__ = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
lowerCamelCase__ = mid + 1
else:
lowerCamelCase__ = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(__lowerCAmelCase )
def A__ ( __lowerCAmelCase : list[list[int]] ):
lowerCamelCase__ = 0
lowerCamelCase__ = len(grid[0] )
for i in range(len(__lowerCAmelCase ) ):
lowerCamelCase__ = find_negative_index(grid[i][:bound] )
total += bound
return (len(__lowerCAmelCase ) * len(grid[0] )) - total
def A__ ( __lowerCAmelCase : list[list[int]] ):
return len([number for row in grid for number in row if number < 0] )
def A__ ( __lowerCAmelCase : list[list[int]] ):
lowerCamelCase__ = 0
for row in grid:
for i, number in enumerate(__lowerCAmelCase ):
if number < 0:
total += len(__lowerCAmelCase ) - i
break
return total
def A__ ( ):
from timeit import timeit
print("""Running benchmarks""" )
lowerCamelCase__ = (
"""from __main__ import count_negatives_binary_search, """
"""count_negatives_brute_force, count_negatives_brute_force_with_break, grid"""
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
lowerCamelCase__ = timeit(F'''{func}(grid=grid)''' , setup=__lowerCAmelCase , number=500 )
print(F'''{func}() took {time:0.4f} seconds''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 50 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ : List[str] = {
"""configuration_timesformer""": ["""TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TimesformerConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Dict = [
"""TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TimesformerModel""",
"""TimesformerForVideoClassification""",
"""TimesformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
lowerCamelCase__ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 710 |
from scipy.stats import spearmanr
import datasets
lowerCamelCase__ : Union[str, Any] = """
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply that as data in dataset x increases, so
does data in dataset y. Negative correlations imply that as x increases,
y decreases. Correlations of -1 or +1 imply an exact monotonic relationship.
Unlike the Pearson correlation, the Spearman correlation does not
assume that both datasets are normally distributed.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
"""
lowerCamelCase__ : int = """
Args:
predictions (`List[float]`): Predicted labels, as returned by a model.
references (`List[float]`): Ground truth labels.
return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns
only the spearmanr score. Defaults to `False`.
Returns:
spearmanr (`float`): Spearman correlation coefficient.
p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.
Examples:
Example 1:
>>> spearmanr_metric = datasets.load_metric(\"spearmanr\")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])
>>> print(results)
{'spearmanr': -0.7}
Example 2:
>>> spearmanr_metric = datasets.load_metric(\"spearmanr\")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],
... predictions=[10, 9, 2.5, 6, 4],
... return_pvalue=True)
>>> print(results['spearmanr'])
-0.7
>>> print(round(results['spearmanr_pvalue'], 2))
0.19
"""
lowerCamelCase__ : str = R"""\
@book{kokoska2000crc,
title={CRC standard probability and statistics tables and formulae},
author={Kokoska, Stephen and Zwillinger, Daniel},
year={2000},
publisher={Crc Press}
}
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _snake_case ( datasets.Metric ):
def lowercase__ ( self):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""float"""),
"""references""": datasets.Value("""float"""),
}) , reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"""] , )
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False):
'''simple docstring'''
lowercase__ : List[str] = spearmanr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 495 | 0 |
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self : Any) -> List[str]:
"""simple docstring"""
_UpperCamelCase = Vector([1, 2, 3])
self.assertEqual(x.component(0) , 1)
self.assertEqual(x.component(2) , 3)
_UpperCamelCase = Vector()
def __UpperCAmelCase ( self : Union[str, Any]) -> int:
"""simple docstring"""
_UpperCamelCase = Vector([0, 0, 0, 0, 0, 1])
self.assertEqual(str(lowercase__) , "(0,0,0,0,0,1)")
def __UpperCAmelCase ( self : Dict) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = Vector([1, 2, 3, 4])
self.assertEqual(len(lowercase__) , 4)
def __UpperCAmelCase ( self : str) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = Vector([1, 2])
_UpperCamelCase = Vector([1, 2, 3, 4, 5])
_UpperCamelCase = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
_UpperCamelCase = Vector([1, -1, 1, -1, 2, -3, 4, -5])
self.assertAlmostEqual(x.euclidean_length() , 2.2_36 , 3)
self.assertAlmostEqual(y.euclidean_length() , 7.4_16 , 3)
self.assertEqual(z.euclidean_length() , 0)
self.assertAlmostEqual(w.euclidean_length() , 7.6_16 , 3)
def __UpperCAmelCase ( self : List[str]) -> str:
"""simple docstring"""
_UpperCamelCase = Vector([1, 2, 3])
_UpperCamelCase = Vector([1, 1, 1])
self.assertEqual((x + y).component(0) , 2)
self.assertEqual((x + y).component(1) , 3)
self.assertEqual((x + y).component(2) , 4)
def __UpperCAmelCase ( self : List[str]) -> Dict:
"""simple docstring"""
_UpperCamelCase = Vector([1, 2, 3])
_UpperCamelCase = Vector([1, 1, 1])
self.assertEqual((x - y).component(0) , 0)
self.assertEqual((x - y).component(1) , 1)
self.assertEqual((x - y).component(2) , 2)
def __UpperCAmelCase ( self : Dict) -> str:
"""simple docstring"""
_UpperCamelCase = Vector([1, 2, 3])
_UpperCamelCase = Vector([2, -1, 4]) # for test of dot product
_UpperCamelCase = Vector([1, -2, -1])
self.assertEqual(str(x * 3.0) , "(3.0,6.0,9.0)")
self.assertEqual((a * b) , 0)
def __UpperCAmelCase ( self : List[Any]) -> str:
"""simple docstring"""
self.assertEqual(str(zero_vector(10)).count("0") , 10)
def __UpperCAmelCase ( self : Tuple) -> List[str]:
"""simple docstring"""
self.assertEqual(str(unit_basis_vector(3 , 1)) , "(0,1,0)")
def __UpperCAmelCase ( self : Tuple) -> Tuple:
"""simple docstring"""
_UpperCamelCase = Vector([1, 2, 3])
_UpperCamelCase = Vector([1, 0, 1])
self.assertEqual(str(axpy(2 , lowercase__ , lowercase__)) , "(3,4,7)")
def __UpperCAmelCase ( self : List[str]) -> str:
"""simple docstring"""
_UpperCamelCase = Vector([1, 0, 0, 0, 0, 0])
_UpperCamelCase = x.copy()
self.assertEqual(str(lowercase__) , str(lowercase__))
def __UpperCAmelCase ( self : Optional[int]) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = Vector([1, 0, 0])
x.change_component(0 , 0)
x.change_component(1 , 1)
self.assertEqual(str(lowercase__) , "(0,1,0)")
def __UpperCAmelCase ( self : Union[str, Any]) -> str:
"""simple docstring"""
_UpperCamelCase = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
self.assertEqual("|1,2,3|\n|2,4,5|\n|6,7,8|\n" , str(lowercase__))
def __UpperCAmelCase ( self : str) -> Dict:
"""simple docstring"""
_UpperCamelCase = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
_UpperCamelCase = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]]
for x in range(a.height()):
for y in range(a.width()):
self.assertEqual(minors[x][y] , a.minor(lowercase__ , lowercase__))
def __UpperCAmelCase ( self : Union[str, Any]) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
_UpperCamelCase = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]]
for x in range(a.height()):
for y in range(a.width()):
self.assertEqual(cofactors[x][y] , a.cofactor(lowercase__ , lowercase__))
def __UpperCAmelCase ( self : Dict) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
self.assertEqual(-5 , a.determinant())
def __UpperCAmelCase ( self : Any) -> Tuple:
"""simple docstring"""
_UpperCamelCase = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3)
_UpperCamelCase = Vector([1, 2, 3])
self.assertEqual("(14,32,50)" , str(a * x))
self.assertEqual("|2,4,6|\n|8,10,12|\n|14,16,18|\n" , str(a * 2))
def __UpperCAmelCase ( self : Tuple) -> int:
"""simple docstring"""
_UpperCamelCase = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
a.change_component(0 , 2 , 5)
self.assertEqual("|1,2,5|\n|2,4,5|\n|6,7,8|\n" , str(lowercase__))
def __UpperCAmelCase ( self : Optional[Any]) -> int:
"""simple docstring"""
_UpperCamelCase = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
self.assertEqual(7 , a.component(2 , 1) , 0.01)
def __UpperCAmelCase ( self : Any) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
_UpperCamelCase = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3)
self.assertEqual("|2,4,10|\n|4,8,10|\n|12,14,18|\n" , str(a + b))
def __UpperCAmelCase ( self : Union[str, Any]) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
_UpperCamelCase = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3)
self.assertEqual("|0,0,-4|\n|0,0,0|\n|0,0,-2|\n" , str(a - b))
def __UpperCAmelCase ( self : Any) -> Tuple:
"""simple docstring"""
self.assertEqual(
"|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n" , str(square_zero_matrix(5)) , )
if __name__ == "__main__":
unittest.main()
| 547 |
'''simple docstring'''
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : List[Any] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = args.pruning_method
SCREAMING_SNAKE_CASE_ : List[Any] = args.threshold
SCREAMING_SNAKE_CASE_ : Tuple = args.model_name_or_path.rstrip("/" )
SCREAMING_SNAKE_CASE_ : Any = args.target_model_path
print(F"Load fine-pruned model from {model_name_or_path}" )
SCREAMING_SNAKE_CASE_ : List[Any] = torch.load(os.path.join(SCREAMING_SNAKE_CASE_ , "pytorch_model.bin" ) )
SCREAMING_SNAKE_CASE_ : List[Any] = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tensor
print(F"Copied layer {name}" )
elif "classifier" in name or "qa_output" in name:
SCREAMING_SNAKE_CASE_ : Dict = tensor
print(F"Copied layer {name}" )
elif "bias" in name:
SCREAMING_SNAKE_CASE_ : Dict = tensor
print(F"Copied layer {name}" )
else:
if pruning_method == "magnitude":
SCREAMING_SNAKE_CASE_ : Tuple = MagnitudeBinarizer.apply(inputs=SCREAMING_SNAKE_CASE_ , threshold=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ : Any = tensor * mask
print(F"Pruned layer {name}" )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
SCREAMING_SNAKE_CASE_ : Union[str, Any] = name[:-6]
SCREAMING_SNAKE_CASE_ : Any = model[F"{prefix_}mask_scores"]
SCREAMING_SNAKE_CASE_ : Dict = TopKBinarizer.apply(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ : Optional[int] = tensor * mask
print(F"Pruned layer {name}" )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
SCREAMING_SNAKE_CASE_ : List[Any] = name[:-6]
SCREAMING_SNAKE_CASE_ : Any = model[F"{prefix_}mask_scores"]
SCREAMING_SNAKE_CASE_ : List[Any] = ThresholdBinarizer.apply(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ : int = tensor * mask
print(F"Pruned layer {name}" )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
SCREAMING_SNAKE_CASE_ : Tuple = name[:-6]
SCREAMING_SNAKE_CASE_ : str = model[F"{prefix_}mask_scores"]
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : List[Any] = -0.1, 1.1
SCREAMING_SNAKE_CASE_ : Any = torch.sigmoid(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ : Tuple = s * (r - l) + l
SCREAMING_SNAKE_CASE_ : Any = s_bar.clamp(min=0.0 , max=1.0 )
SCREAMING_SNAKE_CASE_ : str = tensor * mask
print(F"Pruned layer {name}" )
else:
raise ValueError("Unknown pruning method" )
if target_model_path is None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = os.path.join(
os.path.dirname(SCREAMING_SNAKE_CASE_ ) , F"bertarized_{os.path.basename(SCREAMING_SNAKE_CASE_ )}" )
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
shutil.copytree(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
print(F"\nCreated folder {target_model_path}" )
torch.save(SCREAMING_SNAKE_CASE_ , os.path.join(SCREAMING_SNAKE_CASE_ , "pytorch_model.bin" ) )
print("\nPruned model saved! See you later!" )
if __name__ == "__main__":
snake_case_ = argparse.ArgumentParser()
parser.add_argument(
'--pruning_method',
choices=['l0', 'magnitude', 'topK', 'sigmoied_threshold'],
type=str,
required=True,
help=(
'Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,'
' sigmoied_threshold = Soft movement pruning)'
),
)
parser.add_argument(
'--threshold',
type=float,
required=False,
help=(
'For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.'
'For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.'
'Not needed for `l0`'
),
)
parser.add_argument(
'--model_name_or_path',
type=str,
required=True,
help='Folder containing the model that was previously fine-pruned',
)
parser.add_argument(
'--target_model_path',
default=None,
type=str,
required=False,
help='Folder containing the model that was previously fine-pruned',
)
snake_case_ = parser.parse_args()
main(args)
| 421 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCamelCase = {
'configuration_whisper': ['WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'WhisperConfig', 'WhisperOnnxConfig'],
'feature_extraction_whisper': ['WhisperFeatureExtractor'],
'processing_whisper': ['WhisperProcessor'],
'tokenization_whisper': ['WhisperTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = ['WhisperTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
'WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST',
'WhisperForConditionalGeneration',
'WhisperModel',
'WhisperPreTrainedModel',
'WhisperForAudioClassification',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
'TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFWhisperForConditionalGeneration',
'TFWhisperModel',
'TFWhisperPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
'FlaxWhisperForConditionalGeneration',
'FlaxWhisperModel',
'FlaxWhisperPreTrainedModel',
'FlaxWhisperForAudioClassification',
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
_lowerCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 59 |
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
_lowerCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Union[List, PIL.Image.Image, torch.Tensor] ):
warnings.warn(
"""The preprocess method is deprecated and will be removed in a future version. Please"""
""" use VaeImageProcessor.preprocess instead""" , UpperCamelCase__ , )
if isinstance(UpperCamelCase__ , torch.Tensor ):
return image
elif isinstance(UpperCamelCase__ , PIL.Image.Image ):
SCREAMING_SNAKE_CASE__ = [image]
if isinstance(image[0] , PIL.Image.Image ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = image[0].size
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
SCREAMING_SNAKE_CASE__ = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION["""lanczos"""] ) )[None, :] for i in image]
SCREAMING_SNAKE_CASE__ = np.concatenate(UpperCamelCase__ , axis=0 )
SCREAMING_SNAKE_CASE__ = np.array(UpperCamelCase__ ).astype(np.floataa ) / 2_5_5.0
SCREAMING_SNAKE_CASE__ = image.transpose(0 , 3 , 1 , 2 )
SCREAMING_SNAKE_CASE__ = 2.0 * image - 1.0
SCREAMING_SNAKE_CASE__ = torch.from_numpy(UpperCamelCase__ )
elif isinstance(image[0] , torch.Tensor ):
SCREAMING_SNAKE_CASE__ = torch.cat(UpperCamelCase__ , dim=0 )
return image
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Union[List, PIL.Image.Image, torch.Tensor] ):
if isinstance(UpperCamelCase__ , torch.Tensor ):
return mask
elif isinstance(UpperCamelCase__ , PIL.Image.Image ):
SCREAMING_SNAKE_CASE__ = [mask]
if isinstance(mask[0] , PIL.Image.Image ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = mask[0].size
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
SCREAMING_SNAKE_CASE__ = [np.array(m.convert("""L""" ).resize((w, h) , resample=PIL_INTERPOLATION["""nearest"""] ) )[None, :] for m in mask]
SCREAMING_SNAKE_CASE__ = np.concatenate(UpperCamelCase__ , axis=0 )
SCREAMING_SNAKE_CASE__ = mask.astype(np.floataa ) / 2_5_5.0
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = torch.from_numpy(UpperCamelCase__ )
elif isinstance(mask[0] , torch.Tensor ):
SCREAMING_SNAKE_CASE__ = torch.cat(UpperCamelCase__ , dim=0 )
return mask
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = 42
lowerCamelCase_ = 42
def __init__( self :Any , __A :List[Any] , __A :Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
self.register_modules(unet=__A , scheduler=__A )
@torch.no_grad()
def __call__( self :str , __A :Union[torch.Tensor, PIL.Image.Image] , __A :Union[torch.Tensor, PIL.Image.Image] , __A :int = 250 , __A :float = 0.0 , __A :int = 10 , __A :int = 10 , __A :Optional[Union[torch.Generator, List[torch.Generator]]] = None , __A :Optional[str] = "pil" , __A :bool = True , ) -> Union[ImagePipelineOutput, Tuple]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = image
SCREAMING_SNAKE_CASE__ = _preprocess_image(__A )
SCREAMING_SNAKE_CASE__ = original_image.to(device=self.device , dtype=self.unet.dtype )
SCREAMING_SNAKE_CASE__ = _preprocess_mask(__A )
SCREAMING_SNAKE_CASE__ = mask_image.to(device=self.device , dtype=self.unet.dtype )
SCREAMING_SNAKE_CASE__ = original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(__A , __A ) and len(__A ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(__A )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
SCREAMING_SNAKE_CASE__ = original_image.shape
SCREAMING_SNAKE_CASE__ = randn_tensor(__A , generator=__A , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(__A , __A , __A , self.device )
SCREAMING_SNAKE_CASE__ = eta
SCREAMING_SNAKE_CASE__ = self.scheduler.timesteps[0] + 1
SCREAMING_SNAKE_CASE__ = generator[0] if isinstance(__A , __A ) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
if t < t_last:
# predict the noise residual
SCREAMING_SNAKE_CASE__ = self.unet(__A , __A ).sample
# compute previous image: x_t -> x_t-1
SCREAMING_SNAKE_CASE__ = self.scheduler.step(__A , __A , __A , __A , __A , __A ).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
SCREAMING_SNAKE_CASE__ = self.scheduler.undo_step(__A , __A , __A )
SCREAMING_SNAKE_CASE__ = t
SCREAMING_SNAKE_CASE__ = (image / 2 + 0.5).clamp(0 , 1 )
SCREAMING_SNAKE_CASE__ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE__ = self.numpy_to_pil(__A )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__A ) | 59 | 1 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( _lowercase : float , _lowercase : float , _lowercase : float ) -> Optional[Any]:
'''simple docstring'''
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if resistance < 0:
raise ValueError('Resistance cannot be negative' )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError('Exactly one argument must be 0' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 266 |
def lowerCamelCase__ ( __A :int ,__A :float ,__A :float ):
"""simple docstring"""
return round(float(moles / volume ) * nfactor )
def lowerCamelCase__ ( __A :float ,__A :float ,__A :float ):
"""simple docstring"""
return round(float((moles * 0.0_821 * temperature) / (volume) ) )
def lowerCamelCase__ ( __A :float ,__A :float ,__A :float ):
"""simple docstring"""
return round(float((moles * 0.0_821 * temperature) / (pressure) ) )
def lowerCamelCase__ ( __A :float ,__A :float ,__A :float ):
"""simple docstring"""
return round(float((pressure * volume) / (0.0_821 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 268 | 0 |
"""simple docstring"""
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
SCREAMING_SNAKE_CASE__ : Dict =None
SCREAMING_SNAKE_CASE__ : int =logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : List[Any] ={'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
SCREAMING_SNAKE_CASE__ : Dict ={
'vocab_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/spiece.model',
't5-base': 'https://huggingface.co/t5-base/resolve/main/spiece.model',
't5-large': 'https://huggingface.co/t5-large/resolve/main/spiece.model',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/spiece.model',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/spiece.model',
},
'tokenizer_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/tokenizer.json',
't5-base': 'https://huggingface.co/t5-base/resolve/main/tokenizer.json',
't5-large': 'https://huggingface.co/t5-large/resolve/main/tokenizer.json',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/tokenizer.json',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/tokenizer.json',
},
}
# TODO(PVP) - this should be removed in Transformers v5
SCREAMING_SNAKE_CASE__ : str ={
't5-small': 512,
't5-base': 512,
't5-large': 512,
't5-3b': 512,
't5-11b': 512,
}
class _UpperCAmelCase ( a_ ):
"""simple docstring"""
__snake_case = VOCAB_FILES_NAMES
__snake_case = PRETRAINED_VOCAB_FILES_MAP
__snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case = ["""input_ids""", """attention_mask"""]
__snake_case = TaTokenizer
__snake_case = []
def __init__( self , _lowercase=None , _lowercase=None , _lowercase="</s>" , _lowercase="<unk>" , _lowercase="<pad>" , _lowercase=100 , _lowercase=None , **_lowercase , ) -> List[str]:
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
_lowerCamelCase : Dict = [F'''<extra_id_{i}>''' for i in range(_lowercase )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
_lowerCamelCase : Any = len(set(filter(lambda _lowercase : bool('''extra_id_''' in str(_lowercase ) ) , _lowercase ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
''' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'''
''' tokens''' )
super().__init__(
_lowercase , tokenizer_file=_lowercase , eos_token=_lowercase , unk_token=_lowercase , pad_token=_lowercase , extra_ids=_lowercase , additional_special_tokens=_lowercase , **_lowercase , )
_lowerCamelCase : Dict = vocab_file
_lowerCamelCase : Union[str, Any] = False if not self.vocab_file else True
_lowerCamelCase : List[str] = extra_ids
@staticmethod
def a__ ( _lowercase , _lowercase , _lowercase ) -> int:
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
_lowerCamelCase : Dict = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'''This tokenizer was incorrectly instantiated with a model max length of'''
F''' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'''
''' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'''
''' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'''
F''' {pretrained_model_name_or_path} automatically truncating your input to'''
F''' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'''
F''' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'''
''' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'''
''' instantiate this tokenizer with `model_max_length` set to your preferred value.''' , _lowercase , )
return max_model_length
def a__ ( self , _lowercase , _lowercase = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(_lowercase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_lowerCamelCase : int = os.path.join(
_lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowercase ):
copyfile(self.vocab_file , _lowercase )
logger.info(F'''Copy vocab file to {out_vocab_file}''' )
return (out_vocab_file,)
def a__ ( self , _lowercase , _lowercase = None ) -> List[int]:
_lowerCamelCase : List[str] = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
_lowerCamelCase : Tuple = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def a__ ( self , _lowercase , _lowercase = None ) -> List[int]:
_lowerCamelCase : int = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def a__ ( self ) -> Any:
return list(
set(filter(lambda _lowercase : bool(re.search(R'''<extra_id_\d+>''' , _lowercase ) ) is not None , self.additional_special_tokens ) ) )
def a__ ( self ) -> Optional[int]:
return [self.convert_tokens_to_ids(_lowercase ) for token in self.get_sentinel_tokens()]
| 558 | """simple docstring"""
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing the experiment tracking capability,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
SCREAMING_SNAKE_CASE__ : List[Any] =16
SCREAMING_SNAKE_CASE__ : Union[str, Any] =32
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 16 ) ->List[str]:
_lowerCamelCase : Dict = AutoTokenizer.from_pretrained('''bert-base-cased''' )
_lowerCamelCase : Any = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(SCREAMING_SNAKE_CASE_ ):
# max_length=None => use the model max length (it's actually the default)
_lowerCamelCase : str = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_lowerCamelCase : Optional[int] = datasets.map(
SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_lowerCamelCase : List[str] = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(SCREAMING_SNAKE_CASE_ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_lowerCamelCase : List[str] = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_lowerCamelCase : Any = 16
elif accelerator.mixed_precision != "no":
_lowerCamelCase : List[str] = 8
else:
_lowerCamelCase : int = None
return tokenizer.pad(
SCREAMING_SNAKE_CASE_ , padding='''longest''' , max_length=SCREAMING_SNAKE_CASE_ , pad_to_multiple_of=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' , )
# Instantiate dataloaders.
_lowerCamelCase : Any = DataLoader(
tokenized_datasets['''train'''] , shuffle=SCREAMING_SNAKE_CASE_ , collate_fn=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ )
_lowerCamelCase : List[Any] = DataLoader(
tokenized_datasets['''validation'''] , shuffle=SCREAMING_SNAKE_CASE_ , collate_fn=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
SCREAMING_SNAKE_CASE__ : Tuple =mocked_dataloaders # noqa: F811
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ->Union[str, Any]:
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , SCREAMING_SNAKE_CASE_ ) == "1":
_lowerCamelCase : str = 2
# Initialize Accelerator
# New Code #
# We pass in "all" to `log_with` to grab all available trackers in the environment
# Note: If using a custom `Tracker` class, should be passed in here such as:
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
if args.with_tracking:
_lowerCamelCase : List[Any] = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='''all''' , project_dir=args.project_dir )
else:
_lowerCamelCase : Union[str, Any] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_lowerCamelCase : List[str] = config['''lr''']
_lowerCamelCase : Tuple = int(config['''num_epochs'''] )
_lowerCamelCase : Tuple = int(config['''seed'''] )
_lowerCamelCase : str = int(config['''batch_size'''] )
set_seed(SCREAMING_SNAKE_CASE_ )
_lowerCamelCase, _lowerCamelCase : List[str] = get_dataloaders(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
_lowerCamelCase : Union[str, Any] = evaluate.load('''glue''' , '''mrpc''' )
# If the batch size is too big we use gradient accumulation
_lowerCamelCase : List[Any] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
_lowerCamelCase : Optional[Any] = batch_size // MAX_GPU_BATCH_SIZE
_lowerCamelCase : Union[str, Any] = MAX_GPU_BATCH_SIZE
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_lowerCamelCase : int = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=SCREAMING_SNAKE_CASE_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_lowerCamelCase : Dict = model.to(accelerator.device )
# Instantiate optimizer
_lowerCamelCase : List[Any] = AdamW(params=model.parameters() , lr=SCREAMING_SNAKE_CASE_ )
# Instantiate scheduler
_lowerCamelCase : List[str] = get_linear_schedule_with_warmup(
optimizer=SCREAMING_SNAKE_CASE_ , num_warmup_steps=100 , num_training_steps=(len(SCREAMING_SNAKE_CASE_ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Tuple = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# New Code #
# We need to initialize the trackers we use. Overall configurations can also be stored
if args.with_tracking:
_lowerCamelCase : Tuple = os.path.split(SCREAMING_SNAKE_CASE_ )[-1].split('''.''' )[0]
accelerator.init_trackers(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Now we train the model
for epoch in range(SCREAMING_SNAKE_CASE_ ):
model.train()
# New Code #
# For our tracking example, we will log the total loss of each epoch
if args.with_tracking:
_lowerCamelCase : Any = 0
for step, batch in enumerate(SCREAMING_SNAKE_CASE_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
_lowerCamelCase : Tuple = model(**SCREAMING_SNAKE_CASE_ )
_lowerCamelCase : Tuple = outputs.loss
# New Code #
if args.with_tracking:
total_loss += loss.detach().float()
_lowerCamelCase : List[Any] = loss / gradient_accumulation_steps
accelerator.backward(SCREAMING_SNAKE_CASE_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(SCREAMING_SNAKE_CASE_ ):
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
batch.to(accelerator.device )
with torch.no_grad():
_lowerCamelCase : Optional[Any] = model(**SCREAMING_SNAKE_CASE_ )
_lowerCamelCase : int = outputs.logits.argmax(dim=-1 )
_lowerCamelCase, _lowerCamelCase : int = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=SCREAMING_SNAKE_CASE_ , references=SCREAMING_SNAKE_CASE_ , )
_lowerCamelCase : Optional[Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , SCREAMING_SNAKE_CASE_ )
# New Code #
# To actually log, we call `Accelerator.log`
# The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`
if args.with_tracking:
accelerator.log(
{
'''accuracy''': eval_metric['''accuracy'''],
'''f1''': eval_metric['''f1'''],
'''train_loss''': total_loss.item() / len(SCREAMING_SNAKE_CASE_ ),
'''epoch''': epoch,
} , step=SCREAMING_SNAKE_CASE_ , )
# New Code #
# When a run is finished, you should call `accelerator.end_training()`
# to close all of the open trackers
if args.with_tracking:
accelerator.end_training()
def UpperCamelCase ( ) ->Optional[Any]:
_lowerCamelCase : List[str] = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
parser.add_argument(
'''--with_tracking''' , action='''store_true''' , help='''Whether to load in all available experiment trackers from the environment and use them for logging.''' , )
parser.add_argument(
'''--project_dir''' , type=SCREAMING_SNAKE_CASE_ , default='''logs''' , help='''Location on where to store experiment tracking logs` and relevent project information''' , )
_lowerCamelCase : str = parser.parse_args()
_lowerCamelCase : Optional[int] = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
main()
| 558 | 1 |
'''simple docstring'''
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 199 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
SCREAMING_SNAKE_CASE = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ["""GPTSw3Tokenizer"""]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 199 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/config.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/config.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/config.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/config.json''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json''',
'''roberta-large-openai-detector''': '''https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json''',
}
class UpperCAmelCase_ ( __a ):
'''simple docstring'''
__A : List[str] = '''roberta'''
def __init__( self , __A=5_0265 , __A=768 , __A=12 , __A=12 , __A=3072 , __A="gelu" , __A=0.1 , __A=0.1 , __A=512 , __A=2 , __A=0.02 , __A=1e-12 , __A=1 , __A=0 , __A=2 , __A="absolute" , __A=True , __A=None , **__A , ):
"""simple docstring"""
super().__init__(pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ )
lowerCamelCase : Tuple = vocab_size
lowerCamelCase : int = hidden_size
lowerCamelCase : Dict = num_hidden_layers
lowerCamelCase : Optional[Any] = num_attention_heads
lowerCamelCase : Union[str, Any] = hidden_act
lowerCamelCase : Optional[int] = intermediate_size
lowerCamelCase : Union[str, Any] = hidden_dropout_prob
lowerCamelCase : int = attention_probs_dropout_prob
lowerCamelCase : str = max_position_embeddings
lowerCamelCase : int = type_vocab_size
lowerCamelCase : Dict = initializer_range
lowerCamelCase : Dict = layer_norm_eps
lowerCamelCase : Dict = position_embedding_type
lowerCamelCase : Tuple = use_cache
lowerCamelCase : int = classifier_dropout
class UpperCAmelCase_ ( __a ):
'''simple docstring'''
@property
def _snake_case ( self ):
"""simple docstring"""
if self.task == "multiple-choice":
lowerCamelCase : Union[str, Any] = {0: "batch", 1: "choice", 2: "sequence"}
else:
lowerCamelCase : str = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 706 |
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--txt2img_unclip''',
default='''kakaobrain/karlo-v1-alpha''',
type=str,
required=False,
help='''The pretrained txt2img unclip.''',
)
_snake_case = parser.parse_args()
_snake_case = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
_snake_case = CLIPImageProcessor()
_snake_case = CLIPVisionModelWithProjection.from_pretrained('''openai/clip-vit-large-patch14''')
_snake_case = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 231 | 0 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class lowerCAmelCase__ ( _lowerCAmelCase ):
A = 42
A = 42
class lowerCAmelCase__ ( nn.Module ):
A = 42
A = (16, 32, 96, 256)
A = jnp.floataa
def __UpperCamelCase ( self : Tuple ) -> str:
"""simple docstring"""
lowerCamelCase_ : Tuple = nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
lowerCamelCase_ : List[str] = []
for i in range(len(self.block_out_channels ) - 1 ):
lowerCamelCase_ : List[Any] = self.block_out_channels[i]
lowerCamelCase_ : str = self.block_out_channels[i + 1]
lowerCamelCase_ : Optional[Any] = nn.Conv(
UpperCamelCase_ , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(UpperCamelCase_ )
lowerCamelCase_ : Optional[Any] = nn.Conv(
UpperCamelCase_ , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(UpperCamelCase_ )
lowerCamelCase_ : Any = blocks
lowerCamelCase_ : Dict = nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self : Any , UpperCamelCase_ : str ) -> str:
"""simple docstring"""
lowerCamelCase_ : List[Any] = self.conv_in(UpperCamelCase_ )
lowerCamelCase_ : Dict = nn.silu(UpperCamelCase_ )
for block in self.blocks:
lowerCamelCase_ : List[str] = block(UpperCamelCase_ )
lowerCamelCase_ : List[Any] = nn.silu(UpperCamelCase_ )
lowerCamelCase_ : int = self.conv_out(UpperCamelCase_ )
return embedding
@flax_register_to_config
class lowerCAmelCase__ ( nn.Module ,_lowerCAmelCase ,_lowerCAmelCase ):
A = 32
A = 4
A = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
A = False
A = (320, 640, 1_280, 1_280)
A = 2
A = 8
A = None
A = 1_280
A = 0.0
A = False
A = jnp.floataa
A = True
A = 0
A = "rgb"
A = (16, 32, 96, 256)
def __UpperCamelCase ( self : int , UpperCamelCase_ : jax.random.KeyArray ) -> FrozenDict:
"""simple docstring"""
lowerCamelCase_ : Union[str, Any] = (1, self.in_channels, self.sample_size, self.sample_size)
lowerCamelCase_ : Optional[int] = jnp.zeros(UpperCamelCase_ , dtype=jnp.floataa )
lowerCamelCase_ : List[Any] = jnp.ones((1,) , dtype=jnp.intaa )
lowerCamelCase_ : Tuple = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
lowerCamelCase_ : Union[str, Any] = (1, 3, self.sample_size * 8, self.sample_size * 8)
lowerCamelCase_ : Union[str, Any] = jnp.zeros(UpperCamelCase_ , dtype=jnp.floataa )
lowerCamelCase_ , lowerCamelCase_ : Any = jax.random.split(UpperCamelCase_ )
lowerCamelCase_ : Optional[Any] = {'''params''': params_rng, '''dropout''': dropout_rng}
return self.init(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )["params"]
def __UpperCamelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ : Union[str, Any] = self.block_out_channels
lowerCamelCase_ : List[str] = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
lowerCamelCase_ : int = self.num_attention_heads or self.attention_head_dim
# input
lowerCamelCase_ : Dict = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
lowerCamelCase_ : Dict = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
lowerCamelCase_ : Optional[int] = FlaxTimestepEmbedding(UpperCamelCase_ , dtype=self.dtype )
lowerCamelCase_ : Optional[Any] = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
lowerCamelCase_ : Optional[int] = self.only_cross_attention
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
lowerCamelCase_ : Union[str, Any] = (only_cross_attention,) * len(self.down_block_types )
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
lowerCamelCase_ : Optional[Any] = (num_attention_heads,) * len(self.down_block_types )
# down
lowerCamelCase_ : Optional[Any] = []
lowerCamelCase_ : Tuple = []
lowerCamelCase_ : Dict = block_out_channels[0]
lowerCamelCase_ : List[str] = nn.Conv(
UpperCamelCase_ , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(UpperCamelCase_ )
for i, down_block_type in enumerate(self.down_block_types ):
lowerCamelCase_ : Union[str, Any] = output_channel
lowerCamelCase_ : Optional[Any] = block_out_channels[i]
lowerCamelCase_ : Tuple = i == len(UpperCamelCase_ ) - 1
if down_block_type == "CrossAttnDownBlock2D":
lowerCamelCase_ : List[Any] = FlaxCrossAttnDownBlockaD(
in_channels=UpperCamelCase_ , out_channels=UpperCamelCase_ , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
lowerCamelCase_ : Optional[Any] = FlaxDownBlockaD(
in_channels=UpperCamelCase_ , out_channels=UpperCamelCase_ , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(UpperCamelCase_ )
for _ in range(self.layers_per_block ):
lowerCamelCase_ : Optional[Any] = nn.Conv(
UpperCamelCase_ , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(UpperCamelCase_ )
if not is_final_block:
lowerCamelCase_ : List[Any] = nn.Conv(
UpperCamelCase_ , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(UpperCamelCase_ )
lowerCamelCase_ : List[str] = down_blocks
lowerCamelCase_ : Optional[int] = controlnet_down_blocks
# mid
lowerCamelCase_ : str = block_out_channels[-1]
lowerCamelCase_ : List[str] = FlaxUNetMidBlockaDCrossAttn(
in_channels=UpperCamelCase_ , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
lowerCamelCase_ : Optional[Any] = nn.Conv(
UpperCamelCase_ , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self : Tuple , UpperCamelCase_ : str , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : float = 1.0 , UpperCamelCase_ : bool = True , UpperCamelCase_ : bool = False , ) -> Union[FlaxControlNetOutput, Tuple]:
"""simple docstring"""
lowerCamelCase_ : str = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
lowerCamelCase_ : Tuple = jnp.flip(UpperCamelCase_ , axis=1 )
# 1. time
if not isinstance(UpperCamelCase_ , jnp.ndarray ):
lowerCamelCase_ : Optional[Any] = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(UpperCamelCase_ , jnp.ndarray ) and len(timesteps.shape ) == 0:
lowerCamelCase_ : Optional[Any] = timesteps.astype(dtype=jnp.floataa )
lowerCamelCase_ : List[Any] = jnp.expand_dims(UpperCamelCase_ , 0 )
lowerCamelCase_ : Union[str, Any] = self.time_proj(UpperCamelCase_ )
lowerCamelCase_ : List[str] = self.time_embedding(UpperCamelCase_ )
# 2. pre-process
lowerCamelCase_ : Union[str, Any] = jnp.transpose(UpperCamelCase_ , (0, 2, 3, 1) )
lowerCamelCase_ : Optional[int] = self.conv_in(UpperCamelCase_ )
lowerCamelCase_ : Dict = jnp.transpose(UpperCamelCase_ , (0, 2, 3, 1) )
lowerCamelCase_ : Optional[int] = self.controlnet_cond_embedding(UpperCamelCase_ )
sample += controlnet_cond
# 3. down
lowerCamelCase_ : Optional[int] = (sample,)
for down_block in self.down_blocks:
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
lowerCamelCase_ , lowerCamelCase_ : int = down_block(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , deterministic=not train )
else:
lowerCamelCase_ , lowerCamelCase_ : Any = down_block(UpperCamelCase_ , UpperCamelCase_ , deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
lowerCamelCase_ : str = self.mid_block(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , deterministic=not train )
# 5. contronet blocks
lowerCamelCase_ : List[str] = ()
for down_block_res_sample, controlnet_block in zip(UpperCamelCase_ , self.controlnet_down_blocks ):
lowerCamelCase_ : List[str] = controlnet_block(UpperCamelCase_ )
controlnet_down_block_res_samples += (down_block_res_sample,)
lowerCamelCase_ : Any = controlnet_down_block_res_samples
lowerCamelCase_ : Optional[Any] = self.controlnet_mid_block(UpperCamelCase_ )
# 6. scaling
lowerCamelCase_ : Union[str, Any] = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=UpperCamelCase_ , mid_block_res_sample=UpperCamelCase_ )
| 501 |
'''simple docstring'''
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
__lowerCamelCase : str = WebClient(token=os.environ["""CI_SLACK_BOT_TOKEN"""])
def __snake_case (__UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase_ : Optional[Any] = test_results.split(''' ''' )
lowerCamelCase_ : Tuple = 0
lowerCamelCase_ : Optional[int] = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
lowerCamelCase_ : Optional[Any] = expressions[-2] if '''=''' in expressions[-1] else expressions[-1]
for i, expression in enumerate(__UpperCAmelCase ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def __snake_case (__UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase_ : Tuple = {}
lowerCamelCase_ : List[Any] = None
lowerCamelCase_ : Any = False
for line in failures_short_lines.split('''\n''' ):
if re.search(R'''_ \[doctest\]''' , __UpperCAmelCase ):
lowerCamelCase_ : Union[str, Any] = True
lowerCamelCase_ : Optional[Any] = line.split(''' ''' )[2]
elif in_error and not line.split(''' ''' )[0].isdigit():
lowerCamelCase_ : str = line
lowerCamelCase_ : List[Any] = False
return failures
class lowerCAmelCase__ :
def __init__( self : Any , UpperCamelCase_ : str , UpperCamelCase_ : Dict ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ : List[Any] = title
lowerCamelCase_ : Dict = doc_test_results['''time_spent'''].split(''',''' )[0]
lowerCamelCase_ : str = doc_test_results['''success''']
lowerCamelCase_ : int = doc_test_results['''failures''']
lowerCamelCase_ : List[Any] = self.n_success + self.n_failures
# Failures and success of the modeling tests
lowerCamelCase_ : Union[str, Any] = doc_test_results
@property
def __UpperCamelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
lowerCamelCase_ : Dict = [self._time_spent]
lowerCamelCase_ : Optional[int] = 0
for time in time_spent:
lowerCamelCase_ : Tuple = time.split(''':''' )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(UpperCamelCase_ ) == 1:
lowerCamelCase_ : int = [0, 0, time_parts[0]]
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : Dict = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 3_600 + minutes * 60 + seconds
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : Optional[Any] = total_secs // 3_600, (total_secs % 3_600) // 60, total_secs % 60
return F"""{int(UpperCamelCase_ )}h{int(UpperCamelCase_ )}m{int(UpperCamelCase_ )}s"""
@property
def __UpperCamelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def __UpperCamelCase ( self : int ) -> Dict:
"""simple docstring"""
return {
"type": "section",
"text": {
"type": "plain_text",
"text": F"""🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.""",
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F"""https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}""",
},
}
@property
def __UpperCamelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
F"""There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in"""
F""" {self.time}."""
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F"""https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}""",
},
}
@property
def __UpperCamelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ : Any = 40
lowerCamelCase_ : Tuple = {k: v['''failed'''] for k, v in doc_test_results.items() if isinstance(UpperCamelCase_ , UpperCamelCase_ )}
lowerCamelCase_ : int = ''''''
for category, failures in category_failures.items():
if len(UpperCamelCase_ ) == 0:
continue
if report != "":
report += "\n\n"
report += F"""*{category} failures*:""".ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(UpperCamelCase_ )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": F"""The following examples had failures:\n\n\n{report}\n""",
},
}
@property
def __UpperCamelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
lowerCamelCase_ : str = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(UpperCamelCase_ )
@staticmethod
def __UpperCamelCase ( ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ : Any = [
{
'''type''': '''section''',
'''text''': {
'''type''': '''plain_text''',
'''text''': '''There was an issue running the tests.''',
},
'''accessory''': {
'''type''': '''button''',
'''text''': {'''type''': '''plain_text''', '''text''': '''Check Action results''', '''emoji''': True},
'''url''': F"""https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}""",
},
}
]
print('''Sending the following payload''' )
print(json.dumps({'''blocks''': json.loads(UpperCamelCase_ )} ) )
client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] , text='''There was an issue running the tests.''' , blocks=UpperCamelCase_ , )
def __UpperCamelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
print('''Sending the following payload''' )
print(json.dumps({'''blocks''': json.loads(self.payload )} ) )
lowerCamelCase_ : Optional[int] = F"""{self.n_failures} failures out of {self.n_tests} tests,""" if self.n_failures else '''All tests passed.'''
lowerCamelCase_ : Union[str, Any] = client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] , blocks=self.payload , text=UpperCamelCase_ , )
def __UpperCamelCase ( self : Dict , UpperCamelCase_ : int , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : int , UpperCamelCase_ : int ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ : str = ''''''
for key, value in failures.items():
lowerCamelCase_ : List[Any] = value[:200] + ''' [Truncated]''' if len(UpperCamelCase_ ) > 250 else value
failures_text += F"""*{key}*\n_{value}_\n\n"""
lowerCamelCase_ : List[str] = job_name
lowerCamelCase_ : List[str] = {'''type''': '''section''', '''text''': {'''type''': '''mrkdwn''', '''text''': text}}
if job_link is not None:
lowerCamelCase_ : List[str] = {
'''type''': '''button''',
'''text''': {'''type''': '''plain_text''', '''text''': '''GitHub Action job''', '''emoji''': True},
'''url''': job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def __UpperCamelCase ( self : Dict ) -> List[str]:
"""simple docstring"""
if self.thread_ts is None:
raise ValueError('''Can only post reply if a post has been made.''' )
lowerCamelCase_ : Dict = self.doc_test_results.pop('''job_link''' )
self.doc_test_results.pop('''failures''' )
self.doc_test_results.pop('''success''' )
self.doc_test_results.pop('''time_spent''' )
lowerCamelCase_ : List[Any] = sorted(self.doc_test_results.items() , key=lambda UpperCamelCase_ : t[0] )
for job, job_result in sorted_dict:
if len(job_result['''failures'''] ):
lowerCamelCase_ : str = F"""*Num failures* :{len(job_result['failed'] )} \n"""
lowerCamelCase_ : Any = job_result['''failures''']
lowerCamelCase_ : Any = self.get_reply_blocks(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , text=UpperCamelCase_ )
print('''Sending the following reply''' )
print(json.dumps({'''blocks''': blocks} ) )
client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] , text=F"""Results for {job}""" , blocks=UpperCamelCase_ , thread_ts=self.thread_ts['''ts'''] , )
time.sleep(1 )
def __snake_case ():
"""simple docstring"""
lowerCamelCase_ : Any = os.environ['''GITHUB_RUN_ID''']
lowerCamelCase_ : Dict = F"""https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100"""
lowerCamelCase_ : Tuple = requests.get(__UpperCAmelCase ).json()
lowerCamelCase_ : Union[str, Any] = {}
try:
jobs.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
lowerCamelCase_ : Union[str, Any] = math.ceil((result['''total_count'''] - 100) / 100 )
for i in range(__UpperCAmelCase ):
lowerCamelCase_ : Optional[Any] = requests.get(url + F"""&page={i + 2}""" ).json()
jobs.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
return jobs
except Exception as e:
print('''Unknown error, could not fetch links.''' , __UpperCAmelCase )
return {}
def __snake_case (__UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase_ : List[Any] = {}
if os.path.exists(__UpperCAmelCase ):
lowerCamelCase_ : Optional[Any] = os.listdir(__UpperCAmelCase )
for file in files:
try:
with open(os.path.join(__UpperCAmelCase , __UpperCAmelCase ) , encoding='''utf-8''' ) as f:
lowerCamelCase_ : str = f.read()
except UnicodeDecodeError as e:
raise ValueError(F"""Could not open {os.path.join(__UpperCAmelCase , __UpperCAmelCase )}.""" ) from e
return _artifact
def __snake_case ():
"""simple docstring"""
class lowerCAmelCase__ :
def __init__( self : Any , UpperCamelCase_ : str ) -> int:
"""simple docstring"""
lowerCamelCase_ : Optional[int] = name
lowerCamelCase_ : str = []
def __str__( self : int ) -> Tuple:
"""simple docstring"""
return self.name
def __UpperCamelCase ( self : List[str] , UpperCamelCase_ : str ) -> List[str]:
"""simple docstring"""
self.paths.append({'''name''': self.name, '''path''': path} )
lowerCamelCase_ : Dict[str, Artifact] = {}
lowerCamelCase_ : List[str] = filter(os.path.isdir , os.listdir() )
for directory in directories:
lowerCamelCase_ : List[Any] = directory
if artifact_name not in _available_artifacts:
lowerCamelCase_ : List[Any] = Artifact(__UpperCAmelCase )
_available_artifacts[artifact_name].add_path(__UpperCAmelCase )
return _available_artifacts
if __name__ == "__main__":
__lowerCamelCase : List[Any] = get_job_links()
__lowerCamelCase : str = retrieve_available_artifacts()
__lowerCamelCase : Tuple = collections.OrderedDict(
[
("""*.py""", """API Examples"""),
("""*.md""", """MD Examples"""),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
__lowerCamelCase : Optional[Any] = {
v: {
"""failed""": [],
"""failures""": {},
}
for v in docs.values()
}
# Link to the GitHub Action job
__lowerCamelCase : str = github_actions_job_links.get("""run_doctests""")
__lowerCamelCase : Optional[int] = available_artifacts["""doc_tests_gpu_test_reports"""].paths[0]
__lowerCamelCase : Tuple = retrieve_artifact(artifact_path["""name"""])
if "stats" in artifact:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : str = handle_test_results(artifact["""stats"""])
__lowerCamelCase : Union[str, Any] = failed
__lowerCamelCase : str = success
__lowerCamelCase : int = time_spent[1:-1] + """, """
__lowerCamelCase : List[Any] = extract_first_line_failure(artifact["""failures_short"""])
for line in artifact["summary_short"].split("""\n"""):
if re.search("""FAILED""", line):
__lowerCamelCase : List[Any] = line.replace("""FAILED """, """""")
__lowerCamelCase : List[str] = line.split()[0].replace("""\n""", """""")
if "::" in line:
__lowerCamelCase , __lowerCamelCase : Optional[Any] = line.split("""::""")
else:
__lowerCamelCase , __lowerCamelCase : str = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
__lowerCamelCase : Tuple = docs[file_regex]
doc_test_results[category]["failed"].append(test)
__lowerCamelCase : List[Any] = all_failures[test] if test in all_failures else """N/A"""
__lowerCamelCase : Optional[int] = failure
break
__lowerCamelCase : Dict = Message("""🤗 Results of the doc tests.""", doc_test_results)
message.post()
message.post_reply()
| 501 | 1 |
'''simple docstring'''
from __future__ import annotations
def _lowercase ( lowerCamelCase__ : list[int], lowerCamelCase__ : int ):
_a = 0
_a = len(lowerCamelCase__ ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
_a = i + 1
else:
_a = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'''{two_pointer([2, 7, 11, 15], 9) = }''')
| 691 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
__snake_case : List[str] = logging.get_logger(__name__)
__snake_case : Union[str, Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
# See all MVP models at https://huggingface.co/models?filter=mvp
__snake_case : str = {
"vocab_file": {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json",
},
"added_tokens.json": {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json",
},
"merges_file": {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt",
},
"tokenizer_file": {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json",
},
}
__snake_case : Dict = {
"RUCAIBox/mvp": 1024,
}
class A ( a ):
__UpperCAmelCase : int = VOCAB_FILES_NAMES
__UpperCAmelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : List[str] = ["""input_ids""", """attention_mask"""]
__UpperCAmelCase : List[Any] = MvpTokenizer
def __init__( self , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_="replace" , snake_case_="<s>" , snake_case_="</s>" , snake_case_="</s>" , snake_case_="<s>" , snake_case_="<unk>" , snake_case_="<pad>" , snake_case_="<mask>" , snake_case_=False , snake_case_=True , **snake_case_ , ) -> List[str]:
super().__init__(
snake_case_ , snake_case_ , tokenizer_file=snake_case_ , errors=snake_case_ , bos_token=snake_case_ , eos_token=snake_case_ , sep_token=snake_case_ , cls_token=snake_case_ , unk_token=snake_case_ , pad_token=snake_case_ , mask_token=snake_case_ , add_prefix_space=snake_case_ , trim_offsets=snake_case_ , **snake_case_ , )
_a = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , snake_case_ ) != add_prefix_space:
_a = getattr(snake_case_ , pre_tok_state.pop("type" ) )
_a = add_prefix_space
_a = pre_tok_class(**snake_case_ )
_a = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
_a = "post_processor"
_a = getattr(self.backend_tokenizer , snake_case_ , snake_case_ )
if tokenizer_component_instance:
_a = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_a = tuple(state["sep"] )
if "cls" in state:
_a = tuple(state["cls"] )
_a = False
if state.get("add_prefix_space" , snake_case_ ) != add_prefix_space:
_a = add_prefix_space
_a = True
if state.get("trim_offsets" , snake_case_ ) != trim_offsets:
_a = trim_offsets
_a = True
if changes_to_apply:
_a = getattr(snake_case_ , state.pop("type" ) )
_a = component_class(**snake_case_ )
setattr(self.backend_tokenizer , snake_case_ , snake_case_ )
@property
def __lowerCAmelCase ( self ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def __lowerCAmelCase ( self , snake_case_ ) -> List[Any]:
_a = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else value
_a = value
def __lowerCAmelCase ( self , *snake_case_ , **snake_case_ ) -> BatchEncoding:
_a = kwargs.get("is_split_into_words" , snake_case_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*snake_case_ , **snake_case_ )
def __lowerCAmelCase ( self , *snake_case_ , **snake_case_ ) -> BatchEncoding:
_a = kwargs.get("is_split_into_words" , snake_case_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs." )
return super()._encode_plus(*snake_case_ , **snake_case_ )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ = None ) -> Tuple[str]:
_a = self._tokenizer.model.save(snake_case_ , name=snake_case_ )
return tuple(snake_case_ )
def __lowerCAmelCase ( self , snake_case_ , snake_case_=None ) -> Optional[Any]:
_a = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __lowerCAmelCase ( self , snake_case_ , snake_case_ = None ) -> List[int]:
_a = [self.sep_token_id]
_a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 691 | 1 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : int = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : int = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : List[Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : List[Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Dict = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> int:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : List[Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : int = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Optional[int] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Union[str, Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> int:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Optional[int] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Union[str, Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> int:
requires_backends(cls , ["torch"] )
def __a(*SCREAMING_SNAKE_CASE_ : Optional[Any] , **SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
requires_backends(SCREAMING_SNAKE_CASE_ , ["torch"] )
def __a(*SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : str ):
'''simple docstring'''
requires_backends(SCREAMING_SNAKE_CASE_ , ["torch"] )
def __a(*SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : List[Any] ):
'''simple docstring'''
requires_backends(SCREAMING_SNAKE_CASE_ , ["torch"] )
def __a(*SCREAMING_SNAKE_CASE_ : Optional[Any] , **SCREAMING_SNAKE_CASE_ : List[str] ):
'''simple docstring'''
requires_backends(SCREAMING_SNAKE_CASE_ , ["torch"] )
def __a(*SCREAMING_SNAKE_CASE_ : List[Any] , **SCREAMING_SNAKE_CASE_ : List[str] ):
'''simple docstring'''
requires_backends(SCREAMING_SNAKE_CASE_ , ["torch"] )
def __a(*SCREAMING_SNAKE_CASE_ : Tuple , **SCREAMING_SNAKE_CASE_ : str ):
'''simple docstring'''
requires_backends(SCREAMING_SNAKE_CASE_ , ["torch"] )
def __a(*SCREAMING_SNAKE_CASE_ : int , **SCREAMING_SNAKE_CASE_ : Dict ):
'''simple docstring'''
requires_backends(SCREAMING_SNAKE_CASE_ , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Dict = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Union[str, Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Optional[Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : List[Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> int:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : List[Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> int:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Any = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Optional[int] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : List[str] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Any = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> int:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> int:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : List[str] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : List[str] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> int:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Optional[int] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Union[str, Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : List[Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Optional[int] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> int:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> int:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : str = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> int:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Tuple = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Union[str, Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : List[str] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Any = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> int:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Union[str, Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Union[str, Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> int:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : int = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Tuple = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> int:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : str = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Dict = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Any = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> int:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : List[str] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : int = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Dict = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : str = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : int = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : List[Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : List[Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : List[str] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Tuple = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Union[str, Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : str = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(cls , ["torch"] )
class lowerCAmelCase_ ( metaclass=__magic_name__ ):
__lowerCamelCase : Union[str, Any] = ["torch"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]:
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict:
requires_backends(cls , ["torch"] )
| 18 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
'''Helsinki-NLP/opus-mt-en-de''': '''https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json''',
# See all Marian models at https://huggingface.co/models?filter=marian
}
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
snake_case = '''marian'''
snake_case = ['''past_key_values''']
snake_case = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : List[Any] , __UpperCAmelCase : Union[str, Any]=58101 , __UpperCAmelCase : List[str]=None , __UpperCAmelCase : Optional[int]=1024 , __UpperCAmelCase : Union[str, Any]=12 , __UpperCAmelCase : Tuple=4096 , __UpperCAmelCase : Any=16 , __UpperCAmelCase : Optional[int]=12 , __UpperCAmelCase : Optional[Any]=4096 , __UpperCAmelCase : str=16 , __UpperCAmelCase : Tuple=0.0 , __UpperCAmelCase : Optional[Any]=0.0 , __UpperCAmelCase : int=True , __UpperCAmelCase : Tuple=True , __UpperCAmelCase : Dict="gelu" , __UpperCAmelCase : Union[str, Any]=1024 , __UpperCAmelCase : int=0.1 , __UpperCAmelCase : Dict=0.0 , __UpperCAmelCase : List[Any]=0.0 , __UpperCAmelCase : Union[str, Any]=0.02 , __UpperCAmelCase : Union[str, Any]=58100 , __UpperCAmelCase : int=False , __UpperCAmelCase : List[str]=58100 , __UpperCAmelCase : List[Any]=0 , __UpperCAmelCase : Union[str, Any]=0 , __UpperCAmelCase : Optional[int]=True , **__UpperCAmelCase : Union[str, Any] , ):
'''simple docstring'''
_A = vocab_size
_A = decoder_vocab_size or vocab_size
_A = max_position_embeddings
_A = d_model
_A = encoder_ffn_dim
_A = encoder_layers
_A = encoder_attention_heads
_A = decoder_ffn_dim
_A = decoder_layers
_A = decoder_attention_heads
_A = dropout
_A = attention_dropout
_A = activation_dropout
_A = activation_function
_A = init_std
_A = encoder_layerdrop
_A = decoder_layerdrop
_A = use_cache
_A = encoder_layers
_A = scale_embedding # scale factor will be sqrt(d_model) if True
_A = share_encoder_decoder_embeddings
super().__init__(
pad_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , is_encoder_decoder=__UpperCAmelCase , decoder_start_token_id=__UpperCAmelCase , forced_eos_token_id=__UpperCAmelCase , **__UpperCAmelCase , )
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
_A = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
_A = {0: "batch"}
_A = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
_A = {0: "batch", 1: "decoder_sequence"}
_A = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(__UpperCAmelCase , direction="inputs" )
elif self.task == "causal-lm":
# TODO: figure this case out.
_A = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
_A , _A = self.num_layers
for i in range(__UpperCAmelCase ):
_A = {0: "batch", 2: "past_sequence + sequence"}
_A = {0: "batch", 2: "past_sequence + sequence"}
else:
_A = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
_A = super().outputs
else:
_A = super(__UpperCAmelCase , self ).outputs
if self.use_past:
_A , _A = self.num_layers
for i in range(__UpperCAmelCase ):
_A = {0: "batch", 2: "past_sequence + sequence"}
_A = {0: "batch", 2: "past_sequence + sequence"}
return common_outputs
def lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : PreTrainedTokenizer , __UpperCAmelCase : int = -1 , __UpperCAmelCase : int = -1 , __UpperCAmelCase : bool = False , __UpperCAmelCase : Optional[TensorType] = None , ):
'''simple docstring'''
_A = self._generate_dummy_inputs_for_encoder_and_decoder(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# Generate decoder inputs
_A = seq_length if not self.use_past else 1
_A = self._generate_dummy_inputs_for_encoder_and_decoder(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
_A = {f'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()}
_A = dict(**__UpperCAmelCase , **__UpperCAmelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
_A , _A = common_inputs["input_ids"].shape
_A = common_inputs["decoder_input_ids"].shape[1]
_A , _A = self.num_attention_heads
_A = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_A = decoder_seq_length + 3
_A = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
_A = torch.cat(
[common_inputs["decoder_attention_mask"], torch.ones(__UpperCAmelCase , __UpperCAmelCase )] , dim=1 )
_A = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
_A , _A = self.num_layers
_A = min(__UpperCAmelCase , __UpperCAmelCase )
_A = max(__UpperCAmelCase , __UpperCAmelCase ) - min_num_layers
_A = "encoder" if num_encoder_layers > num_decoder_layers else "decoder"
for _ in range(__UpperCAmelCase ):
common_inputs["past_key_values"].append(
(
torch.zeros(__UpperCAmelCase ),
torch.zeros(__UpperCAmelCase ),
torch.zeros(__UpperCAmelCase ),
torch.zeros(__UpperCAmelCase ),
) )
# TODO: test this.
_A = encoder_shape if remaining_side_name == "encoder" else decoder_shape
for _ in range(__UpperCAmelCase , __UpperCAmelCase ):
common_inputs["past_key_values"].append((torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase )) )
return common_inputs
def lowerCAmelCase ( self : Dict , __UpperCAmelCase : PreTrainedTokenizer , __UpperCAmelCase : int = -1 , __UpperCAmelCase : int = -1 , __UpperCAmelCase : bool = False , __UpperCAmelCase : Optional[TensorType] = None , ):
'''simple docstring'''
_A = self._generate_dummy_inputs_for_encoder_and_decoder(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
_A , _A = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
_A = seqlen + 2
_A , _A = self.num_layers
_A , _A = self.num_attention_heads
_A = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_A = common_inputs["attention_mask"].dtype
_A = torch.cat(
[common_inputs["attention_mask"], torch.ones(__UpperCAmelCase , __UpperCAmelCase , dtype=__UpperCAmelCase )] , dim=1 )
_A = [
(torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase )) for _ in range(__UpperCAmelCase )
]
return common_inputs
def lowerCAmelCase ( self : str , __UpperCAmelCase : PreTrainedTokenizer , __UpperCAmelCase : int = -1 , __UpperCAmelCase : int = -1 , __UpperCAmelCase : bool = False , __UpperCAmelCase : Optional[TensorType] = None , ):
'''simple docstring'''
_A = compute_effective_axis_dimension(
__UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_A = tokenizer.num_special_tokens_to_add(__UpperCAmelCase )
_A = compute_effective_axis_dimension(
__UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__UpperCAmelCase )
# Generate dummy inputs according to compute batch and sequence
_A = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size
_A = dict(tokenizer(__UpperCAmelCase , return_tensors=__UpperCAmelCase ) )
return common_inputs
def lowerCAmelCase ( self : Dict , __UpperCAmelCase : PreTrainedTokenizer , __UpperCAmelCase : int = -1 , __UpperCAmelCase : int = -1 , __UpperCAmelCase : bool = False , __UpperCAmelCase : Optional[TensorType] = None , ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
_A = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
__UpperCAmelCase , batch_size=__UpperCAmelCase , seq_length=__UpperCAmelCase , is_pair=__UpperCAmelCase , framework=__UpperCAmelCase )
else:
_A = self._generate_dummy_inputs_for_causal_lm(
__UpperCAmelCase , batch_size=__UpperCAmelCase , seq_length=__UpperCAmelCase , is_pair=__UpperCAmelCase , framework=__UpperCAmelCase )
return common_inputs
def lowerCAmelCase ( self : str , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : str ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
_A = super()._flatten_past_key_values_(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
else:
_A = super(__UpperCAmelCase , self )._flatten_past_key_values_(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
@property
def lowerCAmelCase ( self : int ):
'''simple docstring'''
return 1E-4
| 330 | 0 |
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
__lowercase :Optional[int] = logging.get_logger(__name__)
class _a ( lowercase__ ):
"""simple docstring"""
def __init__( self : int , *a : Any , **a : Optional[int] ) ->None:
warnings.warn(
"The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use BeitImageProcessor instead." , a , )
super().__init__(*a , **a ) | 708 |
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
__lowercase :Tuple = "\\n@misc{wu2016googles,\n title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n"
__lowercase :str = "\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe 'GLEU score'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore's range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n"
__lowercase :List[Any] = "\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n 'google_bleu': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
"""simple docstring"""
def A_ ( self : List[Any] ) ->MetricInfo:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ),
} ) , )
def A_ ( self : str , a : List[List[List[str]]] , a : List[List[str]] , a : int = 1 , a : int = 4 , ) ->Dict[str, float]:
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=a , hypotheses=a , min_len=a , max_len=a )
} | 26 | 0 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
UpperCAmelCase__ : int = {
"Acehnese Arabic": "ace_Arab",
"Acehnese Latin": "ace_Latn",
"Mesopotamian Arabic": "acm_Arab",
"Ta'izzi-Adeni Arabic": "acq_Arab",
"Tunisian Arabic": "aeb_Arab",
"Afrikaans": "afr_Latn",
"South Levantine Arabic": "ajp_Arab",
"Akan": "aka_Latn",
"Amharic": "amh_Ethi",
"North Levantine Arabic": "apc_Arab",
"Modern Standard Arabic": "arb_Arab",
"Modern Standard Arabic Romanized": "arb_Latn",
"Najdi Arabic": "ars_Arab",
"Moroccan Arabic": "ary_Arab",
"Egyptian Arabic": "arz_Arab",
"Assamese": "asm_Beng",
"Asturian": "ast_Latn",
"Awadhi": "awa_Deva",
"Central Aymara": "ayr_Latn",
"South Azerbaijani": "azb_Arab",
"North Azerbaijani": "azj_Latn",
"Bashkir": "bak_Cyrl",
"Bambara": "bam_Latn",
"Balinese": "ban_Latn",
"Belarusian": "bel_Cyrl",
"Bemba": "bem_Latn",
"Bengali": "ben_Beng",
"Bhojpuri": "bho_Deva",
"Banjar Arabic": "bjn_Arab",
"Banjar Latin": "bjn_Latn",
"Standard Tibetan": "bod_Tibt",
"Bosnian": "bos_Latn",
"Buginese": "bug_Latn",
"Bulgarian": "bul_Cyrl",
"Catalan": "cat_Latn",
"Cebuano": "ceb_Latn",
"Czech": "ces_Latn",
"Chokwe": "cjk_Latn",
"Central Kurdish": "ckb_Arab",
"Crimean Tatar": "crh_Latn",
"Welsh": "cym_Latn",
"Danish": "dan_Latn",
"German": "deu_Latn",
"Southwestern Dinka": "dik_Latn",
"Dyula": "dyu_Latn",
"Dzongkha": "dzo_Tibt",
"Greek": "ell_Grek",
"English": "eng_Latn",
"Esperanto": "epo_Latn",
"Estonian": "est_Latn",
"Basque": "eus_Latn",
"Ewe": "ewe_Latn",
"Faroese": "fao_Latn",
"Fijian": "fij_Latn",
"Finnish": "fin_Latn",
"Fon": "fon_Latn",
"French": "fra_Latn",
"Friulian": "fur_Latn",
"Nigerian Fulfulde": "fuv_Latn",
"Scottish Gaelic": "gla_Latn",
"Irish": "gle_Latn",
"Galician": "glg_Latn",
"Guarani": "grn_Latn",
"Gujarati": "guj_Gujr",
"Haitian Creole": "hat_Latn",
"Hausa": "hau_Latn",
"Hebrew": "heb_Hebr",
"Hindi": "hin_Deva",
"Chhattisgarhi": "hne_Deva",
"Croatian": "hrv_Latn",
"Hungarian": "hun_Latn",
"Armenian": "hye_Armn",
"Igbo": "ibo_Latn",
"Ilocano": "ilo_Latn",
"Indonesian": "ind_Latn",
"Icelandic": "isl_Latn",
"Italian": "ita_Latn",
"Javanese": "jav_Latn",
"Japanese": "jpn_Jpan",
"Kabyle": "kab_Latn",
"Jingpho": "kac_Latn",
"Kamba": "kam_Latn",
"Kannada": "kan_Knda",
"Kashmiri Arabic": "kas_Arab",
"Kashmiri Devanagari": "kas_Deva",
"Georgian": "kat_Geor",
"Central Kanuri Arabic": "knc_Arab",
"Central Kanuri Latin": "knc_Latn",
"Kazakh": "kaz_Cyrl",
"Kabiyè": "kbp_Latn",
"Kabuverdianu": "kea_Latn",
"Khmer": "khm_Khmr",
"Kikuyu": "kik_Latn",
"Kinyarwanda": "kin_Latn",
"Kyrgyz": "kir_Cyrl",
"Kimbundu": "kmb_Latn",
"Northern Kurdish": "kmr_Latn",
"Kikongo": "kon_Latn",
"Korean": "kor_Hang",
"Lao": "lao_Laoo",
"Ligurian": "lij_Latn",
"Limburgish": "lim_Latn",
"Lingala": "lin_Latn",
"Lithuanian": "lit_Latn",
"Lombard": "lmo_Latn",
"Latgalian": "ltg_Latn",
"Luxembourgish": "ltz_Latn",
"Luba-Kasai": "lua_Latn",
"Ganda": "lug_Latn",
"Luo": "luo_Latn",
"Mizo": "lus_Latn",
"Standard Latvian": "lvs_Latn",
"Magahi": "mag_Deva",
"Maithili": "mai_Deva",
"Malayalam": "mal_Mlym",
"Marathi": "mar_Deva",
"Minangkabau Arabic ": "min_Arab",
"Minangkabau Latin": "min_Latn",
"Macedonian": "mkd_Cyrl",
"Plateau Malagasy": "plt_Latn",
"Maltese": "mlt_Latn",
"Meitei Bengali": "mni_Beng",
"Halh Mongolian": "khk_Cyrl",
"Mossi": "mos_Latn",
"Maori": "mri_Latn",
"Burmese": "mya_Mymr",
"Dutch": "nld_Latn",
"Norwegian Nynorsk": "nno_Latn",
"Norwegian Bokmål": "nob_Latn",
"Nepali": "npi_Deva",
"Northern Sotho": "nso_Latn",
"Nuer": "nus_Latn",
"Nyanja": "nya_Latn",
"Occitan": "oci_Latn",
"West Central Oromo": "gaz_Latn",
"Odia": "ory_Orya",
"Pangasinan": "pag_Latn",
"Eastern Panjabi": "pan_Guru",
"Papiamento": "pap_Latn",
"Western Persian": "pes_Arab",
"Polish": "pol_Latn",
"Portuguese": "por_Latn",
"Dari": "prs_Arab",
"Southern Pashto": "pbt_Arab",
"Ayacucho Quechua": "quy_Latn",
"Romanian": "ron_Latn",
"Rundi": "run_Latn",
"Russian": "rus_Cyrl",
"Sango": "sag_Latn",
"Sanskrit": "san_Deva",
"Santali": "sat_Olck",
"Sicilian": "scn_Latn",
"Shan": "shn_Mymr",
"Sinhala": "sin_Sinh",
"Slovak": "slk_Latn",
"Slovenian": "slv_Latn",
"Samoan": "smo_Latn",
"Shona": "sna_Latn",
"Sindhi": "snd_Arab",
"Somali": "som_Latn",
"Southern Sotho": "sot_Latn",
"Spanish": "spa_Latn",
"Tosk Albanian": "als_Latn",
"Sardinian": "srd_Latn",
"Serbian": "srp_Cyrl",
"Swati": "ssw_Latn",
"Sundanese": "sun_Latn",
"Swedish": "swe_Latn",
"Swahili": "swh_Latn",
"Silesian": "szl_Latn",
"Tamil": "tam_Taml",
"Tatar": "tat_Cyrl",
"Telugu": "tel_Telu",
"Tajik": "tgk_Cyrl",
"Tagalog": "tgl_Latn",
"Thai": "tha_Thai",
"Tigrinya": "tir_Ethi",
"Tamasheq Latin": "taq_Latn",
"Tamasheq Tifinagh": "taq_Tfng",
"Tok Pisin": "tpi_Latn",
"Tswana": "tsn_Latn",
"Tsonga": "tso_Latn",
"Turkmen": "tuk_Latn",
"Tumbuka": "tum_Latn",
"Turkish": "tur_Latn",
"Twi": "twi_Latn",
"Central Atlas Tamazight": "tzm_Tfng",
"Uyghur": "uig_Arab",
"Ukrainian": "ukr_Cyrl",
"Umbundu": "umb_Latn",
"Urdu": "urd_Arab",
"Northern Uzbek": "uzn_Latn",
"Venetian": "vec_Latn",
"Vietnamese": "vie_Latn",
"Waray": "war_Latn",
"Wolof": "wol_Latn",
"Xhosa": "xho_Latn",
"Eastern Yiddish": "ydd_Hebr",
"Yoruba": "yor_Latn",
"Yue Chinese": "yue_Hant",
"Chinese Simplified": "zho_Hans",
"Chinese Traditional": "zho_Hant",
"Standard Malay": "zsm_Latn",
"Zulu": "zul_Latn",
}
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :Tuple = 'facebook/nllb-200-distilled-600M'
snake_case__ :Optional[Any] = (
'This is a tool that translates text from a language to another. It takes three inputs: `text`, which should '
'be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, '
'which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in '
'plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.'
)
snake_case__ :List[Any] = 'translator'
snake_case__ :List[Any] = AutoTokenizer
snake_case__ :Optional[Any] = AutoModelForSeqaSeqLM
snake_case__ :List[str] = LANGUAGE_CODES
snake_case__ :List[Any] = ['text', 'text', 'text']
snake_case__ :List[Any] = ['text']
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Optional[int] , __magic_name__ : Optional[int] ):
"""simple docstring"""
if src_lang not in self.lang_to_code:
raise ValueError(f"""{src_lang} is not a supported language.""" )
if tgt_lang not in self.lang_to_code:
raise ValueError(f"""{tgt_lang} is not a supported language.""" )
lowerCAmelCase__ = self.lang_to_code[src_lang]
lowerCAmelCase__ = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
__magic_name__ , return_tensors="pt" , src_lang=__magic_name__ , tgt_lang=__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : Optional[Any] ):
"""simple docstring"""
return self.model.generate(**__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __magic_name__ : Tuple ):
"""simple docstring"""
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=__magic_name__ )
| 48 |
from math import factorial, radians
def lowercase_ ( __snake_case : float , __snake_case : int = 18 , __snake_case : int = 10 ) -> float:
'''simple docstring'''
snake_case__ :Optional[int] = angle_in_degrees - ((angle_in_degrees // 3_6_0.0) * 3_6_0.0)
# Converting from degrees to radians
snake_case__ :Optional[int] = radians(__snake_case )
snake_case__ :Optional[Any] = angle_in_radians
snake_case__ :Optional[int] = 3
snake_case__ :Union[str, Any] = -1
for _ in range(__snake_case ):
result += (b * (angle_in_radians**a)) / factorial(__snake_case )
snake_case__ :Optional[int] = -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(__snake_case , __snake_case )
if __name__ == "__main__":
__import__("doctest").testmod() | 241 | 0 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( __A ) -> str:
return "".join([hex(a_ )[2:].zfill(2 ).upper() for byte in list(a_ )] )
def SCREAMING_SNAKE_CASE__ ( __A ) -> bytes:
if (len(a_ ) % 2) != 0:
raise ValueError(
'Base16 encoded data is invalid:\nData does not have an even number of hex digits.' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(a_ ) <= set('0123456789ABCDEF' ):
raise ValueError(
'Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters.' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(a_ ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 718 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowercase : int = {
"configuration_mask2former": [
"MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Mask2FormerConfig",
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Optional[int] = ["Mask2FormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Optional[Any] = [
"MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"Mask2FormerForUniversalSegmentation",
"Mask2FormerModel",
"Mask2FormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
lowercase : int = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 542 | 0 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowerCamelCase__ : Optional[Any] = abspath(join(dirname(__file__), """src"""))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="""ignore""", category=FutureWarning)
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> Optional[Any]:
config.addinivalue_line(
'''markers''' , '''is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested''' )
config.addinivalue_line(
'''markers''' , '''is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested''' )
config.addinivalue_line('''markers''' , '''is_pipeline_test: mark test to run only when pipelines are tested''' )
config.addinivalue_line('''markers''' , '''is_staging_test: mark test to run only in the staging environment''' )
config.addinivalue_line('''markers''' , '''accelerate_tests: mark test that require accelerate''' )
config.addinivalue_line('''markers''' , '''tool_tests: mark the tool tests that are run on their specific schedule''' )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> Dict:
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> int:
from transformers.testing_utils import pytest_terminal_summary_main
snake_case__ = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(__lowerCAmelCase , id=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> Dict:
# If no tests are collected, pytest exists with code 5, which makes the CI fail.
if exitstatus == 5:
snake_case__ = 0
# Doctest custom flag to ignore output.
lowerCamelCase__ : Tuple = doctest.register_optionflag("""IGNORE_RESULT""")
lowerCamelCase__ : Optional[Any] = doctest.OutputChecker
class __magic_name__ (snake_case_ ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self:List[str] , _a:Optional[Any] , _a:Optional[int] , _a:Union[str, Any] ):
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , _a , _a , _a )
lowerCamelCase__ : Optional[int] = CustomOutputChecker
lowerCamelCase__ : Dict = HfDoctestModule
lowerCamelCase__ : int = HfDocTestParser
| 33 |
"""simple docstring"""
from collections import namedtuple
UpperCAmelCase = namedtuple("""from_to""", """from_ to""")
UpperCAmelCase = {
"""cubicmeter""": from_to(1, 1),
"""litre""": from_to(0.001, 1_0_0_0),
"""kilolitre""": from_to(1, 1),
"""gallon""": from_to(0.00_454, 264.172),
"""cubicyard""": from_to(0.76_455, 1.30_795),
"""cubicfoot""": from_to(0.028, 35.3_147),
"""cup""": from_to(0.000_236_588, 4_226.75),
}
def __magic_name__ ( _lowerCamelCase: float, _lowerCamelCase: str, _lowerCamelCase: str ) -> float:
'''simple docstring'''
if from_type not in METRIC_CONVERSION:
raise ValueError(
F"""Invalid 'from_type' value: {from_type!r} Supported values are:\n"""
+ ''', '''.join(_lowerCamelCase ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
F"""Invalid 'to_type' value: {to_type!r}. Supported values are:\n"""
+ ''', '''.join(_lowerCamelCase ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 535 | 0 |
"""simple docstring"""
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
snake_case = Path(__file__).resolve().parents[3] / '''src'''
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(4_2)
snake_case = {'''base''': '''patrickvonplaten/wav2vec2_tiny_random''', '''robust''': '''patrickvonplaten/wav2vec2_tiny_random_robust'''}
snake_case = '''zero2'''
snake_case = '''zero3'''
snake_case = [ZEROa, ZEROa]
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Any:
# customize the test name generator function as we want both params to appear in the sub-test
# name, as by default it shows only the first param
_snake_case = parameterized.to_safe_name('''_'''.join(str(_UpperCAmelCase ) for x in param.args ) )
return f"""{func.__name__}_{param_based_name}"""
# Cartesian-product of zero stages with models to test
snake_case = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class UpperCAmelCase ( __UpperCAmelCase ):
@parameterized.expand(__SCREAMING_SNAKE_CASE , name_func=__SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self : Dict , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any ):
"""simple docstring"""
self.run_and_check(
stage=__SCREAMING_SNAKE_CASE , model=__SCREAMING_SNAKE_CASE , distributed=__SCREAMING_SNAKE_CASE , fpaa=__SCREAMING_SNAKE_CASE , )
@require_torch_multi_gpu
@parameterized.expand(__SCREAMING_SNAKE_CASE , name_func=__SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self : str , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] ):
"""simple docstring"""
self.run_and_check(
stage=__SCREAMING_SNAKE_CASE , model=__SCREAMING_SNAKE_CASE , distributed=__SCREAMING_SNAKE_CASE , fpaa=__SCREAMING_SNAKE_CASE , )
@parameterized.expand(__SCREAMING_SNAKE_CASE , name_func=__SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self : Tuple , __lowerCamelCase : Optional[int] , __lowerCamelCase : Any ):
"""simple docstring"""
self.run_and_check(
stage=__SCREAMING_SNAKE_CASE , model=__SCREAMING_SNAKE_CASE , distributed=__SCREAMING_SNAKE_CASE , fpaa=__SCREAMING_SNAKE_CASE , )
@require_torch_multi_gpu
@parameterized.expand(__SCREAMING_SNAKE_CASE , name_func=__SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] ):
"""simple docstring"""
self.run_and_check(
stage=__SCREAMING_SNAKE_CASE , model=__SCREAMING_SNAKE_CASE , distributed=__SCREAMING_SNAKE_CASE , fpaa=__SCREAMING_SNAKE_CASE , )
def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : List[str] ):
"""simple docstring"""
pass
def __UpperCAmelCase ( self : Any , __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : int = 1_0 , __lowerCamelCase : bool = True , __lowerCamelCase : bool = True , __lowerCamelCase : bool = True , ):
"""simple docstring"""
_snake_case = models[model]
_snake_case = self.run_trainer(
stage=__SCREAMING_SNAKE_CASE , model_name=__SCREAMING_SNAKE_CASE , eval_steps=__SCREAMING_SNAKE_CASE , num_train_epochs=1 , distributed=__SCREAMING_SNAKE_CASE , fpaa=__SCREAMING_SNAKE_CASE , )
self.do_checks(__SCREAMING_SNAKE_CASE )
return output_dir
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : int = 1_0 , __lowerCamelCase : int = 1 , __lowerCamelCase : bool = True , __lowerCamelCase : bool = True , ):
"""simple docstring"""
_snake_case = self.get_auto_remove_tmp_dir('''./xxx''' , after=__SCREAMING_SNAKE_CASE )
_snake_case = f"""\n --model_name_or_path {model_name}\n --dataset_name hf-internal-testing/librispeech_asr_dummy\n --dataset_config_name clean\n --train_split_name validation\n --validation_split_name validation\n --output_dir {output_dir}\n --num_train_epochs {str(__SCREAMING_SNAKE_CASE )}\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 2\n --evaluation_strategy steps\n --learning_rate 5e-4\n --warmup_steps 8\n --orthography timit\n --preprocessing_num_workers 1\n --group_by_length\n --freeze_feature_extractor\n --report_to none\n --save_steps 0\n --eval_steps {eval_steps}\n --report_to none\n """.split()
if fpaa:
args.extend(['''--fp16'''] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
_snake_case = f"""--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json""".split()
_snake_case = [f"""{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py"""]
_snake_case = self.get_launcher(__SCREAMING_SNAKE_CASE )
_snake_case = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(__SCREAMING_SNAKE_CASE , env=self.get_env() )
return output_dir
def __UpperCAmelCase ( self : int , __lowerCamelCase : List[Any]=False ):
"""simple docstring"""
_snake_case = min(2 , get_gpu_count() ) if distributed else 1
return f"""deepspeed --num_nodes 1 --num_gpus {num_gpus}""".split()
| 716 |
"""simple docstring"""
from __future__ import annotations
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> None:
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
_snake_case , _snake_case = array[indexa], array[indexa]
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> None:
if length > 1:
_snake_case = int(length / 2 )
for i in range(lowerCAmelCase_ , low + middle ):
comp_and_swap(lowerCAmelCase_ , lowerCAmelCase_ , i + middle , lowerCAmelCase_ )
bitonic_merge(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
bitonic_merge(lowerCAmelCase_ , low + middle , lowerCAmelCase_ , lowerCAmelCase_ )
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> None:
if length > 1:
_snake_case = int(length / 2 )
bitonic_sort(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , 1 )
bitonic_sort(lowerCAmelCase_ , low + middle , lowerCAmelCase_ , 0 )
bitonic_merge(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
if __name__ == "__main__":
snake_case = input('''Enter numbers separated by a comma:\n''').strip()
snake_case = [int(item.strip()) for item in user_input.split(''',''')]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print('''\nSorted array in ascending order is: ''', end='''''')
print(*unsorted, sep=''', ''')
bitonic_merge(unsorted, 0, len(unsorted), 0)
print('''Sorted array in descending order is: ''', end='''''')
print(*unsorted, sep=''', ''')
| 404 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self : Tuple , a__ : Tuple , ):
UpperCAmelCase = parent
UpperCAmelCase = 13
UpperCAmelCase = 7
UpperCAmelCase = True
UpperCAmelCase = True
UpperCAmelCase = False
UpperCAmelCase = True
UpperCAmelCase = 99
UpperCAmelCase = 32
UpperCAmelCase = 2
UpperCAmelCase = 4
UpperCAmelCase = 37
UpperCAmelCase = "gelu"
UpperCAmelCase = 0.1
UpperCAmelCase = 0.1
UpperCAmelCase = 512
UpperCAmelCase = 16
UpperCAmelCase = 2
UpperCAmelCase = 0.02
UpperCAmelCase = 3
UpperCAmelCase = 4
UpperCAmelCase = None
def __snake_case ( self : List[Any] ):
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase = None
if self.use_input_mask:
UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __snake_case ( self : str , a__ : int , a__ : Union[str, Any] , a__ : Union[str, Any] , a__ : Optional[Any] , a__ : Optional[int] , a__ : str ):
UpperCAmelCase = TFDistilBertModel(config=a__ )
UpperCAmelCase = {"input_ids": input_ids, "attention_mask": input_mask}
UpperCAmelCase = model(a__ )
UpperCAmelCase = [input_ids, input_mask]
UpperCAmelCase = model(a__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __snake_case ( self : int , a__ : Optional[Any] , a__ : str , a__ : Any , a__ : str , a__ : Optional[int] , a__ : str ):
UpperCAmelCase = TFDistilBertForMaskedLM(config=a__ )
UpperCAmelCase = {"input_ids": input_ids, "attention_mask": input_mask}
UpperCAmelCase = model(a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __snake_case ( self : int , a__ : Tuple , a__ : Dict , a__ : List[str] , a__ : Tuple , a__ : str , a__ : Any ):
UpperCAmelCase = TFDistilBertForQuestionAnswering(config=a__ )
UpperCAmelCase = {
"input_ids": input_ids,
"attention_mask": input_mask,
}
UpperCAmelCase = model(a__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __snake_case ( self : Optional[int] , a__ : Tuple , a__ : List[str] , a__ : Dict , a__ : int , a__ : int , a__ : str ):
UpperCAmelCase = self.num_labels
UpperCAmelCase = TFDistilBertForSequenceClassification(a__ )
UpperCAmelCase = {"input_ids": input_ids, "attention_mask": input_mask}
UpperCAmelCase = model(a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __snake_case ( self : Any , a__ : Optional[Any] , a__ : int , a__ : Optional[int] , a__ : Any , a__ : Dict , a__ : List[str] ):
UpperCAmelCase = self.num_choices
UpperCAmelCase = TFDistilBertForMultipleChoice(a__ )
UpperCAmelCase = tf.tile(tf.expand_dims(a__ , 1 ) , (1, self.num_choices, 1) )
UpperCAmelCase = tf.tile(tf.expand_dims(a__ , 1 ) , (1, self.num_choices, 1) )
UpperCAmelCase = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
}
UpperCAmelCase = model(a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __snake_case ( self : int , a__ : List[str] , a__ : Union[str, Any] , a__ : str , a__ : Tuple , a__ : List[str] , a__ : Union[str, Any] ):
UpperCAmelCase = self.num_labels
UpperCAmelCase = TFDistilBertForTokenClassification(a__ )
UpperCAmelCase = {"input_ids": input_ids, "attention_mask": input_mask}
UpperCAmelCase = model(a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __snake_case ( self : int ):
UpperCAmelCase = self.prepare_config_and_inputs()
(UpperCAmelCase) = config_and_inputs
UpperCAmelCase = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class lowerCAmelCase__ ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase =(
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
_lowerCamelCase =(
{
"feature-extraction": TFDistilBertModel,
"fill-mask": TFDistilBertForMaskedLM,
"question-answering": TFDistilBertForQuestionAnswering,
"text-classification": TFDistilBertForSequenceClassification,
"token-classification": TFDistilBertForTokenClassification,
"zero-shot": TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
_lowerCamelCase =False
_lowerCamelCase =False
def __snake_case ( self : Union[str, Any] ):
UpperCAmelCase = TFDistilBertModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=a__ , dim=37 )
def __snake_case ( self : Optional[int] ):
self.config_tester.run_common_tests()
def __snake_case ( self : int ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*a__ )
def __snake_case ( self : Union[str, Any] ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*a__ )
def __snake_case ( self : List[Any] ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*a__ )
def __snake_case ( self : Optional[int] ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*a__ )
def __snake_case ( self : Optional[Any] ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*a__ )
def __snake_case ( self : str ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*a__ )
@slow
def __snake_case ( self : List[str] ):
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
UpperCAmelCase = TFDistilBertModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
@require_tf
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def __snake_case ( self : str ):
UpperCAmelCase = TFDistilBertModel.from_pretrained('''distilbert-base-uncased''' )
UpperCAmelCase = tf.constant([[0, 1, 2, 3, 4, 5]] )
UpperCAmelCase = model(a__ )[0]
UpperCAmelCase = [1, 6, 768]
self.assertEqual(output.shape , a__ )
UpperCAmelCase = tf.constant(
[
[
[0.19_261_885, -0.13_732_955, 0.4_119_799],
[0.22_150_156, -0.07_422_661, 0.39_037_204],
[0.22_756_018, -0.0_896_414, 0.3_701_467],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , a__ , atol=1e-4 )
| 51 |
"""simple docstring"""
import string
from math import logaa
def A ( _A, _A ):
"""simple docstring"""
snake_case_ :Union[str, Any] = document.translate(
str.maketrans("", "", string.punctuation ) ).replace("\n", "" )
snake_case_ :Tuple = document_without_punctuation.split(" " ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def A ( _A, _A ):
"""simple docstring"""
snake_case_ :Dict = corpus.lower().translate(
str.maketrans("", "", string.punctuation ) ) # strip all punctuation and replace it with ''
snake_case_ :Any = corpus_without_punctuation.split("\n" )
snake_case_ :Dict = term.lower()
return (len([doc for doc in docs if term in doc] ), len(_A ))
def A ( _A, _A, _A=False ):
"""simple docstring"""
if smoothing:
if n == 0:
raise ValueError("log10(0) is undefined." )
return round(1 + logaa(n / (1 + df) ), 3 )
if df == 0:
raise ZeroDivisionError("df must be > 0" )
elif n == 0:
raise ValueError("log10(0) is undefined." )
return round(logaa(n / df ), 3 )
def A ( _A, _A ):
"""simple docstring"""
return round(tf * idf, 3 )
| 584 | 0 |
"""simple docstring"""
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
a__ : int = logging.get_logger(__name__)
@add_end_docstrings(_UpperCamelCase )
class __magic_name__ ( _UpperCamelCase ):
def __init__( self , **__magic_name__ ):
"""simple docstring"""
super().__init__(**__magic_name__ )
requires_backends(self , 'vision' )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self , __magic_name__ , **__magic_name__ ):
"""simple docstring"""
return super().__call__(__magic_name__ , **__magic_name__ )
def _lowerCamelCase ( self , **__magic_name__ ):
"""simple docstring"""
_lowerCAmelCase = {}
if "candidate_labels" in kwargs:
_lowerCAmelCase = kwargs['candidate_labels']
if "hypothesis_template" in kwargs:
_lowerCAmelCase = kwargs['hypothesis_template']
return preprocess_params, {}, {}
def _lowerCamelCase ( self , __magic_name__ , __magic_name__=None , __magic_name__="This is a photo of {}." ):
"""simple docstring"""
_lowerCAmelCase = load_image(__magic_name__ )
_lowerCAmelCase = self.image_processor(images=[image] , return_tensors=self.framework )
_lowerCAmelCase = candidate_labels
_lowerCAmelCase = [hypothesis_template.format(__magic_name__ ) for x in candidate_labels]
_lowerCAmelCase = self.tokenizer(__magic_name__ , return_tensors=self.framework , padding=__magic_name__ )
_lowerCAmelCase = [text_inputs]
return inputs
def _lowerCamelCase ( self , __magic_name__ ):
"""simple docstring"""
_lowerCAmelCase = model_inputs.pop('candidate_labels' )
_lowerCAmelCase = model_inputs.pop('text_inputs' )
if isinstance(text_inputs[0] , __magic_name__ ):
_lowerCAmelCase = text_inputs[0]
else:
# Batching case.
_lowerCAmelCase = text_inputs[0][0]
_lowerCAmelCase = self.model(**__magic_name__ , **__magic_name__ )
_lowerCAmelCase = {
'candidate_labels': candidate_labels,
'logits': outputs.logits_per_image,
}
return model_outputs
def _lowerCamelCase ( self , __magic_name__ ):
"""simple docstring"""
_lowerCAmelCase = model_outputs.pop('candidate_labels' )
_lowerCAmelCase = model_outputs['logits'][0]
if self.framework == "pt":
_lowerCAmelCase = logits.softmax(dim=-1 ).squeeze(-1 )
_lowerCAmelCase = probs.tolist()
if not isinstance(__magic_name__ , __magic_name__ ):
_lowerCAmelCase = [scores]
elif self.framework == "tf":
_lowerCAmelCase = stable_softmax(__magic_name__ , axis=-1 )
_lowerCAmelCase = probs.numpy().tolist()
else:
raise ValueError(F'''Unsupported framework: {self.framework}''' )
_lowerCAmelCase = [
{'score': score, 'label': candidate_label}
for score, candidate_label in sorted(zip(__magic_name__ , __magic_name__ ) , key=lambda __magic_name__ : -x[0] )
]
return result
| 309 |
"""simple docstring"""
import qiskit
def A__ ( __lowerCamelCase, __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = qiskit.Aer.get_backend('aer_simulator' )
# Create a Quantum Circuit acting on the q register
_lowerCAmelCase = qiskit.QuantumCircuit(__lowerCamelCase, __lowerCamelCase )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1], [0, 1] )
# Execute the circuit on the qasm simulator
_lowerCAmelCase = qiskit.execute(__lowerCamelCase, __lowerCamelCase, shots=1_0_0_0 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(__lowerCamelCase )
if __name__ == "__main__":
a__ : Optional[Any] = single_qubit_measure(2, 2)
print(f'Total count for various states are: {counts}')
| 309 | 1 |
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def a__ ( A__, A__, A__, A__=1_0_2_4 ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = [], []
SCREAMING_SNAKE_CASE_ : str = list(zip(A__, A__ ) )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = sorted_examples[0]
def is_too_big(A__ ):
return tok(A__, return_tensors='pt' ).input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:] ):
SCREAMING_SNAKE_CASE_ : int = new_src + ' ' + src
SCREAMING_SNAKE_CASE_ : Union[str, Any] = new_tgt + ' ' + tgt
if is_too_big(A__ ) or is_too_big(A__ ): # cant fit, finalize example
finished_src.append(A__ )
finished_tgt.append(A__ )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = src, tgt
else: # can fit, keep adding
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(A__ )
finished_tgt.append(A__ )
return finished_src, finished_tgt
def a__ ( A__, A__, A__, A__ ):
SCREAMING_SNAKE_CASE_ : str = Path(A__ )
save_path.mkdir(exist_ok=A__ )
for split in ["train"]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = data_dir / F'''{split}.source''', data_dir / F'''{split}.target'''
SCREAMING_SNAKE_CASE_ : int = [x.rstrip() for x in Path(A__ ).open().readlines()]
SCREAMING_SNAKE_CASE_ : Dict = [x.rstrip() for x in Path(A__ ).open().readlines()]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = pack_examples(A__, A__, A__, A__ )
print(F'''packed {split} split from {len(A__ )} examples -> {len(A__ )}.''' )
Path(save_path / F'''{split}.source''' ).open('w' ).write('\n'.join(A__ ) )
Path(save_path / F'''{split}.target''' ).open('w' ).write('\n'.join(A__ ) )
for split in ["val", "test"]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = data_dir / F'''{split}.source''', data_dir / F'''{split}.target'''
shutil.copyfile(A__, save_path / F'''{split}.source''' )
shutil.copyfile(A__, save_path / F'''{split}.target''' )
def a__ ( ):
SCREAMING_SNAKE_CASE_ : str = argparse.ArgumentParser()
parser.add_argument('--tok_name', type=A__, help='like facebook/bart-large-cnn,t5-base, etc.' )
parser.add_argument('--max_seq_len', type=A__, default=1_2_8 )
parser.add_argument('--data_dir', type=A__ )
parser.add_argument('--save_path', type=A__ )
SCREAMING_SNAKE_CASE_ : str = parser.parse_args()
SCREAMING_SNAKE_CASE_ : List[Any] = AutoTokenizer.from_pretrained(args.tok_name )
return pack_data_dir(A__, Path(args.data_dir ), args.max_seq_len, args.save_path )
if __name__ == "__main__":
packer_cli()
| 101 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ : Union[str, Any] ={
'configuration_blip_2': [
'BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Blip2Config',
'Blip2QFormerConfig',
'Blip2VisionConfig',
],
'processing_blip_2': ['Blip2Processor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Dict =[
'BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST',
'Blip2Model',
'Blip2QFormerModel',
'Blip2PreTrainedModel',
'Blip2ForConditionalGeneration',
'Blip2VisionModel',
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
lowerCAmelCase__ : Union[str, Any] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 101 | 1 |
'''simple docstring'''
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
snake_case_ : str = logging.getLogger(__name__)
@dataclass
class __a :
__a : Optional[int] = field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__a : bool = field(
default=lowerCamelCase , metadata={"help": "Overwrite the cached preprocessed datasets or not."} )
__a : bool = field(
default=lowerCamelCase , metadata={
"help": (
"Whether to pad all samples to `max_seq_length`. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch."
)
} , )
__a : Optional[int] = field(
default=lowerCamelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
__a : Optional[int] = field(
default=lowerCamelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
__a : Optional[int] = field(
default=lowerCamelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of prediction examples to this "
"value if set."
)
} , )
@dataclass
class __a :
__a : str = field(
default=lowerCamelCase , metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
__a : str = field(
default=lowerCamelCase , metadata={"help": "Evaluation language. Also train language if `train_language` is set to None."} )
__a : Optional[str] = field(
default=lowerCamelCase , metadata={"help": "Train language if it is different from the evaluation language."} )
__a : Optional[str] = field(
default=lowerCamelCase , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
__a : Optional[str] = field(
default=lowerCamelCase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
__a : Optional[str] = field(
default=lowerCamelCase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
__a : Optional[bool] = field(
default=lowerCamelCase , metadata={"help": "arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()"} , )
__a : bool = field(
default=lowerCamelCase , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , )
__a : str = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
__a : bool = field(
default=lowerCamelCase , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
__a : bool = field(
default=lowerCamelCase , metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."} , )
def lowerCamelCase_ ( ) -> Union[str, Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCAmelCase_ : Dict = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Tuple = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_xnli''', SCREAMING_SNAKE_CASE__ )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', handlers=[logging.StreamHandler(sys.stdout )], )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCAmelCase_ : Optional[Any] = training_args.get_process_log_level()
logger.setLevel(SCREAMING_SNAKE_CASE__ )
datasets.utils.logging.set_verbosity(SCREAMING_SNAKE_CASE__ )
transformers.utils.logging.set_verbosity(SCREAMING_SNAKE_CASE__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
UpperCAmelCase_ : Optional[int] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCAmelCase_ : Dict = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
# Downloading and loading xnli dataset from the hub.
if training_args.do_train:
if model_args.train_language is None:
UpperCAmelCase_ : Any = load_dataset(
'''xnli''', model_args.language, split='''train''', cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, )
else:
UpperCAmelCase_ : Union[str, Any] = load_dataset(
'''xnli''', model_args.train_language, split='''train''', cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, )
UpperCAmelCase_ : Optional[Any] = train_dataset.features['''label'''].names
if training_args.do_eval:
UpperCAmelCase_ : Dict = load_dataset(
'''xnli''', model_args.language, split='''validation''', cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, )
UpperCAmelCase_ : Union[str, Any] = eval_dataset.features['''label'''].names
if training_args.do_predict:
UpperCAmelCase_ : Any = load_dataset(
'''xnli''', model_args.language, split='''test''', cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, )
UpperCAmelCase_ : List[Any] = predict_dataset.features['''label'''].names
# Labels
UpperCAmelCase_ : List[Any] = len(SCREAMING_SNAKE_CASE__ )
# Load pretrained model and tokenizer
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCAmelCase_ : Dict = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path, num_labels=SCREAMING_SNAKE_CASE__, idalabel={str(SCREAMING_SNAKE_CASE__ ): label for i, label in enumerate(SCREAMING_SNAKE_CASE__ )}, labelaid={label: i for i, label in enumerate(SCREAMING_SNAKE_CASE__ )}, finetuning_task='''xnli''', cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
UpperCAmelCase_ : str = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, do_lower_case=model_args.do_lower_case, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
UpperCAmelCase_ : str = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path, from_tf=bool('''.ckpt''' in model_args.model_name_or_path ), config=SCREAMING_SNAKE_CASE__, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ignore_mismatched_sizes=model_args.ignore_mismatched_sizes, )
# Preprocessing the datasets
# Padding strategy
if data_args.pad_to_max_length:
UpperCAmelCase_ : Dict = '''max_length'''
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
UpperCAmelCase_ : Any = False
def preprocess_function(SCREAMING_SNAKE_CASE__ : Tuple ):
# Tokenize the texts
return tokenizer(
examples['''premise'''], examples['''hypothesis'''], padding=SCREAMING_SNAKE_CASE__, max_length=data_args.max_seq_length, truncation=SCREAMING_SNAKE_CASE__, )
if training_args.do_train:
if data_args.max_train_samples is not None:
UpperCAmelCase_ : List[Any] = min(len(SCREAMING_SNAKE_CASE__ ), data_args.max_train_samples )
UpperCAmelCase_ : Union[str, Any] = train_dataset.select(range(SCREAMING_SNAKE_CASE__ ) )
with training_args.main_process_first(desc='''train dataset map pre-processing''' ):
UpperCAmelCase_ : Optional[Any] = train_dataset.map(
SCREAMING_SNAKE_CASE__, batched=SCREAMING_SNAKE_CASE__, load_from_cache_file=not data_args.overwrite_cache, desc='''Running tokenizer on train dataset''', )
# Log a few random samples from the training set:
for index in random.sample(range(len(SCREAMING_SNAKE_CASE__ ) ), 3 ):
logger.info(F"""Sample {index} of the training set: {train_dataset[index]}.""" )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
UpperCAmelCase_ : str = min(len(SCREAMING_SNAKE_CASE__ ), data_args.max_eval_samples )
UpperCAmelCase_ : Optional[int] = eval_dataset.select(range(SCREAMING_SNAKE_CASE__ ) )
with training_args.main_process_first(desc='''validation dataset map pre-processing''' ):
UpperCAmelCase_ : Optional[Any] = eval_dataset.map(
SCREAMING_SNAKE_CASE__, batched=SCREAMING_SNAKE_CASE__, load_from_cache_file=not data_args.overwrite_cache, desc='''Running tokenizer on validation dataset''', )
if training_args.do_predict:
if data_args.max_predict_samples is not None:
UpperCAmelCase_ : Optional[int] = min(len(SCREAMING_SNAKE_CASE__ ), data_args.max_predict_samples )
UpperCAmelCase_ : int = predict_dataset.select(range(SCREAMING_SNAKE_CASE__ ) )
with training_args.main_process_first(desc='''prediction dataset map pre-processing''' ):
UpperCAmelCase_ : List[Any] = predict_dataset.map(
SCREAMING_SNAKE_CASE__, batched=SCREAMING_SNAKE_CASE__, load_from_cache_file=not data_args.overwrite_cache, desc='''Running tokenizer on prediction dataset''', )
# Get the metric function
UpperCAmelCase_ : Optional[Any] = evaluate.load('''xnli''' )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(SCREAMING_SNAKE_CASE__ : EvalPrediction ):
UpperCAmelCase_ : Dict = p.predictions[0] if isinstance(p.predictions, SCREAMING_SNAKE_CASE__ ) else p.predictions
UpperCAmelCase_ : Dict = np.argmax(SCREAMING_SNAKE_CASE__, axis=1 )
return metric.compute(predictions=SCREAMING_SNAKE_CASE__, references=p.label_ids )
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
UpperCAmelCase_ : Dict = default_data_collator
elif training_args.fpaa:
UpperCAmelCase_ : Optional[int] = DataCollatorWithPadding(SCREAMING_SNAKE_CASE__, pad_to_multiple_of=8 )
else:
UpperCAmelCase_ : Dict = None
# Initialize our Trainer
UpperCAmelCase_ : int = Trainer(
model=SCREAMING_SNAKE_CASE__, args=SCREAMING_SNAKE_CASE__, train_dataset=train_dataset if training_args.do_train else None, eval_dataset=eval_dataset if training_args.do_eval else None, compute_metrics=SCREAMING_SNAKE_CASE__, tokenizer=SCREAMING_SNAKE_CASE__, data_collator=SCREAMING_SNAKE_CASE__, )
# Training
if training_args.do_train:
UpperCAmelCase_ : str = None
if training_args.resume_from_checkpoint is not None:
UpperCAmelCase_ : int = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCAmelCase_ : Dict = last_checkpoint
UpperCAmelCase_ : str = trainer.train(resume_from_checkpoint=SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : int = train_result.metrics
UpperCAmelCase_ : Optional[int] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(SCREAMING_SNAKE_CASE__ )
)
UpperCAmelCase_ : Optional[int] = min(SCREAMING_SNAKE_CASE__, len(SCREAMING_SNAKE_CASE__ ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('''train''', SCREAMING_SNAKE_CASE__ )
trainer.save_metrics('''train''', SCREAMING_SNAKE_CASE__ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
UpperCAmelCase_ : int = trainer.evaluate(eval_dataset=SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : List[str] = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : int = min(SCREAMING_SNAKE_CASE__, len(SCREAMING_SNAKE_CASE__ ) )
trainer.log_metrics('''eval''', SCREAMING_SNAKE_CASE__ )
trainer.save_metrics('''eval''', SCREAMING_SNAKE_CASE__ )
# Prediction
if training_args.do_predict:
logger.info('''*** Predict ***''' )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Tuple = trainer.predict(SCREAMING_SNAKE_CASE__, metric_key_prefix='''predict''' )
UpperCAmelCase_ : List[str] = (
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(SCREAMING_SNAKE_CASE__ )
)
UpperCAmelCase_ : List[str] = min(SCREAMING_SNAKE_CASE__, len(SCREAMING_SNAKE_CASE__ ) )
trainer.log_metrics('''predict''', SCREAMING_SNAKE_CASE__ )
trainer.save_metrics('''predict''', SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : str = np.argmax(SCREAMING_SNAKE_CASE__, axis=1 )
UpperCAmelCase_ : Optional[Any] = os.path.join(training_args.output_dir, '''predictions.txt''' )
if trainer.is_world_process_zero():
with open(SCREAMING_SNAKE_CASE__, '''w''' ) as writer:
writer.write('''index\tprediction\n''' )
for index, item in enumerate(SCREAMING_SNAKE_CASE__ ):
UpperCAmelCase_ : Any = label_list[item]
writer.write(F"""{index}\t{item}\n""" )
if __name__ == "__main__":
main()
| 644 |
'''simple docstring'''
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int ) -> str:
if number > 0:
raise ValueError('''input must be a negative integer''' )
UpperCAmelCase_ : Union[str, Any] = len(bin(SCREAMING_SNAKE_CASE__ )[3:] )
UpperCAmelCase_ : Union[str, Any] = bin(abs(SCREAMING_SNAKE_CASE__ ) - (1 << binary_number_length) )[3:]
UpperCAmelCase_ : Optional[Any] = (
(
'''1'''
+ '''0''' * (binary_number_length - len(SCREAMING_SNAKE_CASE__ ))
+ twos_complement_number
)
if number < 0
else '''0'''
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 644 | 1 |
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
# Initialise PyTorch model
SCREAMING_SNAKE_CASE_: List[Any] = BertConfig.from_json_file(_UpperCAmelCase )
print(f"Building PyTorch model from configuration: {config}" )
SCREAMING_SNAKE_CASE_: Tuple = BertForPreTraining(_UpperCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_bert(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Save pytorch-model
print(f"Save PyTorch model to {pytorch_dump_path}" )
torch.save(model.state_dict() , _UpperCAmelCase )
if __name__ == "__main__":
lowerCAmelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--bert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained BERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
lowerCAmelCase : Optional[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 671 |
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase__ : float):
return 0.0
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: List[str] = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
SCREAMING_SNAKE_CASE_: Dict = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Optional[int] = 5_12
SCREAMING_SNAKE_CASE_: str = [1] + [0] * (size - 1)
SCREAMING_SNAKE_CASE_: Dict = [filter_type.process(_UpperCAmelCase ) for item in inputs]
SCREAMING_SNAKE_CASE_: Optional[Any] = [0] * (samplerate - size) # zero-padding
outputs += filler
SCREAMING_SNAKE_CASE_: Tuple = np.abs(np.fft.fft(_UpperCAmelCase ) )
SCREAMING_SNAKE_CASE_: Optional[Any] = 20 * np.logaa(_UpperCAmelCase )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("Frequency (Hz)" )
plt.xscale("log" )
# Display within reasonable bounds
SCREAMING_SNAKE_CASE_: Any = get_bounds(_UpperCAmelCase , _UpperCAmelCase )
plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) )
plt.ylabel("Gain (dB)" )
plt.plot(_UpperCAmelCase )
plt.show()
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Optional[int] = 5_12
SCREAMING_SNAKE_CASE_: Union[str, Any] = [1] + [0] * (size - 1)
SCREAMING_SNAKE_CASE_: Dict = [filter_type.process(_UpperCAmelCase ) for item in inputs]
SCREAMING_SNAKE_CASE_: int = [0] * (samplerate - size) # zero-padding
outputs += filler
SCREAMING_SNAKE_CASE_: Any = np.angle(np.fft.fft(_UpperCAmelCase ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("Frequency (Hz)" )
plt.xscale("log" )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel("Phase shift (Radians)" )
plt.plot(np.unwrap(_UpperCAmelCase , -2 * pi ) )
plt.show()
| 671 | 1 |
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
a = ["""small""", """medium""", """large"""]
a = """lm_head.decoder.weight"""
a = """lm_head.weight"""
def UpperCamelCase_( __magic_name__ : str , __magic_name__ : str ):
"""simple docstring"""
_lowerCAmelCase :Union[str, Any] = torch.load(__magic_name__ )
_lowerCAmelCase :int = d.pop(__magic_name__ )
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
torch.save(__magic_name__ , os.path.join(__magic_name__ , __magic_name__ ) )
if __name__ == "__main__":
a = argparse.ArgumentParser()
parser.add_argument("""--dialogpt_path""", default=""".""", type=str)
a = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
a = os.path.join(args.dialogpt_path, F'''{MODEL}_ft.pkl''')
a = F'''./DialoGPT-{MODEL}'''
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
) | 382 |
from math import sqrt
def UpperCamelCase_( __magic_name__ : int = 1000000 ):
"""simple docstring"""
_lowerCAmelCase :int = 0
_lowerCAmelCase :int = 0
_lowerCAmelCase :int
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(__magic_name__ , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(F'''{solution() = }''') | 382 | 1 |
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
def get_matched_characters(UpperCAmelCase__ , UpperCAmelCase__ ) -> str:
lowercase_ = []
lowercase_ = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
lowercase_ = int(max(0 , i - limit ) )
lowercase_ = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(UpperCAmelCase__ )
lowercase_ = F'''{_stra[0:_stra.index(UpperCAmelCase__ )]} {_stra[_stra.index(UpperCAmelCase__ ) + 1:]}'''
return "".join(UpperCAmelCase__ )
# matching characters
lowercase_ = get_matched_characters(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase_ = get_matched_characters(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase_ = len(UpperCAmelCase__ )
# transposition
lowercase_ = (
len([(ca, ca) for ca, ca in zip(UpperCAmelCase__ , UpperCAmelCase__ ) if ca != ca] ) // 2
)
if not match_count:
lowercase_ = 0.0
else:
lowercase_ = (
1
/ 3
* (
match_count / len(UpperCAmelCase__ )
+ match_count / len(UpperCAmelCase__ )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
lowercase_ = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler('hello', 'world'))
| 412 |
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import VideoMAEConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEModel,
)
from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class UpperCamelCase__ :
def __init__( self : Any , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple=13 , UpperCamelCase__ : int=10 , UpperCamelCase__ : Optional[int]=3 , UpperCamelCase__ : Any=2 , UpperCamelCase__ : Optional[Any]=2 , UpperCamelCase__ : Tuple=2 , UpperCamelCase__ : int=True , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : Any=32 , UpperCamelCase__ : Dict=5 , UpperCamelCase__ : Optional[Any]=4 , UpperCamelCase__ : Any=37 , UpperCamelCase__ : Tuple="gelu" , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : Tuple=0.1 , UpperCamelCase__ : Optional[int]=10 , UpperCamelCase__ : Optional[Any]=0.02 , UpperCamelCase__ : Any=0.9 , UpperCamelCase__ : Dict=None , ):
'''simple docstring'''
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = image_size
lowercase_ = num_channels
lowercase_ = patch_size
lowercase_ = tubelet_size
lowercase_ = num_frames
lowercase_ = is_training
lowercase_ = use_labels
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_act
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = type_sequence_label_size
lowercase_ = initializer_range
lowercase_ = mask_ratio
lowercase_ = scope
# in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame
lowercase_ = (image_size // patch_size) ** 2
lowercase_ = (num_frames // tubelet_size) * self.num_patches_per_frame
# use this variable to define bool_masked_pos
lowercase_ = int(mask_ratio * self.seq_length )
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
lowercase_ = None
if self.use_labels:
lowercase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
return VideoMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , tubelet_size=self.tubelet_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , )
def UpperCAmelCase__ ( self : Any , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : str ):
'''simple docstring'''
lowercase_ = VideoMAEModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[str] ):
'''simple docstring'''
lowercase_ = VideoMAEForPreTraining(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
lowercase_ = torch.ones((self.num_masks,) )
lowercase_ = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] )
lowercase_ = mask.expand(self.batch_size , -1 ).bool()
lowercase_ = model(UpperCamelCase__ , UpperCamelCase__ )
# model only returns predictions for masked patches
lowercase_ = mask.sum().item()
lowercase_ = 3 * self.tubelet_size * self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_masked_patches, decoder_num_labels) )
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ = self.prepare_config_and_inputs()
lowercase_ , lowercase_ , lowercase_ = config_and_inputs
lowercase_ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[Any] = (
(VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else ()
)
__SCREAMING_SNAKE_CASE : str = (
{'feature-extraction': VideoMAEModel, 'video-classification': VideoMAEForVideoClassification}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE : Optional[Any] = False
__SCREAMING_SNAKE_CASE : str = False
__SCREAMING_SNAKE_CASE : Dict = False
__SCREAMING_SNAKE_CASE : Dict = False
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ = VideoMAEModelTester(self )
lowercase_ = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=37 )
def UpperCAmelCase__ ( self : List[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : str=False ):
'''simple docstring'''
lowercase_ = copy.deepcopy(UpperCamelCase__ )
if model_class == VideoMAEForPreTraining:
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
lowercase_ = torch.ones((self.model_tester.num_masks,) )
lowercase_ = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] )
lowercase_ = mask.expand(self.model_tester.batch_size , -1 ).bool()
lowercase_ = bool_masked_pos.to(UpperCamelCase__ )
if return_labels:
if model_class in [
*get_values(UpperCamelCase__ ),
]:
lowercase_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase__ )
return inputs_dict
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""VideoMAE does not use inputs_embeds""" )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
pass
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ = model_class(UpperCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase__ , nn.Linear ) )
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ = model_class(UpperCamelCase__ )
lowercase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ = [*signature.parameters.keys()]
lowercase_ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCamelCase__ )
@slow
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ = VideoMAEModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
if not self.has_attentions:
pass
else:
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = True
for model_class in self.all_model_classes:
lowercase_ = self.model_tester.seq_length - self.model_tester.num_masks
lowercase_ = (
num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
)
lowercase_ = True
lowercase_ = False
lowercase_ = True
lowercase_ = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
lowercase_ = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
lowercase_ = outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowercase_ = True
lowercase_ = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
lowercase_ = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
lowercase_ = outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
lowercase_ = len(UpperCamelCase__ )
# Check attention is always last and order is fine
lowercase_ = True
lowercase_ = True
lowercase_ = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
lowercase_ = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(out_len + 1 , len(UpperCamelCase__ ) )
lowercase_ = outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
def check_hidden_states_output(UpperCamelCase__ : Optional[int] , UpperCamelCase__ : str , UpperCamelCase__ : Tuple ):
lowercase_ = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
lowercase_ = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
lowercase_ = outputs.hidden_states
lowercase_ = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
lowercase_ = self.model_tester.seq_length - self.model_tester.num_masks
lowercase_ = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase_ = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
pass
def UpperCAmelCase_ ( ):
lowercase_ = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""" , filename="""eating_spaghetti.npy""" , repo_type="""dataset""" )
lowercase_ = np.load(UpperCAmelCase__ )
return list(UpperCAmelCase__ )
@require_torch
@require_vision
class UpperCamelCase__ ( unittest.TestCase ):
@cached_property
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ = VideoMAEForVideoClassification.from_pretrained("""MCG-NJU/videomae-base-finetuned-kinetics""" ).to(
UpperCamelCase__ )
lowercase_ = self.default_image_processor
lowercase_ = prepare_video()
lowercase_ = image_processor(UpperCamelCase__ , return_tensors="""pt""" ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
lowercase_ = model(**UpperCamelCase__ )
# verify the logits
lowercase_ = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
lowercase_ = torch.tensor([0.3_669, -0.0_688, -0.2_421] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1e-4 ) )
@slow
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ = VideoMAEForPreTraining.from_pretrained("""MCG-NJU/videomae-base-short""" ).to(UpperCamelCase__ )
lowercase_ = self.default_image_processor
lowercase_ = prepare_video()
lowercase_ = image_processor(UpperCamelCase__ , return_tensors="""pt""" ).to(UpperCamelCase__ )
# add boolean mask, indicating which patches to mask
lowercase_ = hf_hub_download(repo_id="""hf-internal-testing/bool-masked-pos""" , filename="""bool_masked_pos.pt""" )
lowercase_ = torch.load(UpperCamelCase__ )
# forward pass
with torch.no_grad():
lowercase_ = model(**UpperCamelCase__ )
# verify the logits
lowercase_ = torch.Size([1, 1_408, 1_536] )
lowercase_ = torch.tensor(
[[0.7_994, 0.9_612, 0.8_508], [0.7_401, 0.8_958, 0.8_302], [0.5_862, 0.7_468, 0.7_325]] , device=UpperCamelCase__ )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , UpperCamelCase__ , atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `True`)
lowercase_ = torch.tensor([0.5_142] , device=UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.loss , UpperCamelCase__ , atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `False`)
lowercase_ = VideoMAEForPreTraining.from_pretrained("""MCG-NJU/videomae-base-short""" , norm_pix_loss=UpperCamelCase__ ).to(
UpperCamelCase__ )
with torch.no_grad():
lowercase_ = model(**UpperCamelCase__ )
lowercase_ = torch.tensor(torch.tensor([0.6_469] ) , device=UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.loss , UpperCamelCase__ , atol=1e-4 ) )
| 412 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case : int = logging.get_logger(__name__)
_snake_case : Optional[Any] = {}
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
a_ = """llama"""
a_ = ["""past_key_values"""]
def __init__( self : int , lowerCAmelCase_ : Any=3_2_0_0_0 , lowerCAmelCase_ : Optional[int]=4_0_9_6 , lowerCAmelCase_ : Optional[Any]=1_1_0_0_8 , lowerCAmelCase_ : Union[str, Any]=3_2 , lowerCAmelCase_ : List[str]=3_2 , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : Optional[int]="silu" , lowerCAmelCase_ : Union[str, Any]=2_0_4_8 , lowerCAmelCase_ : Tuple=0.02 , lowerCAmelCase_ : Optional[Any]=1e-6 , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : List[str]=0 , lowerCAmelCase_ : Optional[int]=1 , lowerCAmelCase_ : List[Any]=2 , lowerCAmelCase_ : int=1 , lowerCAmelCase_ : int=False , lowerCAmelCase_ : int=None , **lowerCAmelCase_ : Any , ) -> str:
__lowerCAmelCase = vocab_size
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = hidden_size
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = num_key_value_heads
__lowerCAmelCase = hidden_act
__lowerCAmelCase = initializer_range
__lowerCAmelCase = rms_norm_eps
__lowerCAmelCase = pretraining_tp
__lowerCAmelCase = use_cache
__lowerCAmelCase = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , tie_word_embeddings=lowerCAmelCase_ , **lowerCAmelCase_ , )
def lowercase ( self : Tuple ) -> Dict:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , lowerCAmelCase_ ) or len(self.rope_scaling ) != 2:
raise ValueError(
'`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '
f"""got {self.rope_scaling}""" )
__lowerCAmelCase = self.rope_scaling.get('type' , lowerCAmelCase_ )
__lowerCAmelCase = self.rope_scaling.get('factor' , lowerCAmelCase_ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) or rope_scaling_factor <= 1.0:
raise ValueError(f"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 712 |
def a_ ( lowerCAmelCase_ : str, lowerCAmelCase_ : str ):
if not (isinstance(lowerCAmelCase_, lowerCAmelCase_ ) and isinstance(lowerCAmelCase_, lowerCAmelCase_ )):
raise ValueError('longest_common_substring() takes two strings for inputs' )
__lowerCAmelCase = len(lowerCAmelCase_ )
__lowerCAmelCase = len(lowerCAmelCase_ )
__lowerCAmelCase = [[0] * (texta_length + 1) for _ in range(texta_length + 1 )]
__lowerCAmelCase = 0
__lowerCAmelCase = 0
for i in range(1, texta_length + 1 ):
for j in range(1, texta_length + 1 ):
if texta[i - 1] == texta[j - 1]:
__lowerCAmelCase = 1 + dp[i - 1][j - 1]
if dp[i][j] > ans_length:
__lowerCAmelCase = i
__lowerCAmelCase = dp[i][j]
return texta[ans_index - ans_length : ans_index]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 421 | 0 |
"""simple docstring"""
from jiwer import compute_measures
import datasets
UpperCAmelCase = """\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
"""
UpperCAmelCase = """\
Word error rate (WER) is a common metric of the performance of an automatic speech recognition system.
The general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.
This problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.
Word error rate can then be computed as:
WER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct words,
N is the number of words in the reference (N=S+D+C).
This value indicates the average number of errors per reference word. The lower the value, the better the
performance of the ASR system with a WER of 0 being a perfect score.
"""
UpperCAmelCase = """
Compute WER score of transcribed segments against references.
Args:
references: List of references for each speech input.
predictions: List of transcriptions to score.
concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.
Returns:
(float): the word error rate
Examples:
>>> predictions = [\"this is the prediction\", \"there is an other sample\"]
>>> references = [\"this is the reference\", \"there is another one\"]
>>> wer = datasets.load_metric(\"wer\")
>>> wer_score = wer.compute(predictions=predictions, references=references)
>>> print(wer_score)
0.5
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class lowercase__ ( datasets.Metric ):
def UpperCamelCase_ ( self) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence"""),
"""references""": datasets.Value("""string""" , id="""sequence"""),
}) , codebase_urls=["""https://github.com/jitsi/jiwer/"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/Word_error_rate""",
] , )
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=False) -> Union[str, Any]:
if concatenate_texts:
return compute_measures(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)["wer"]
else:
_lowerCamelCase : List[str] = 0
_lowerCamelCase : List[Any] = 0
for prediction, reference in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE):
_lowerCamelCase : Union[str, Any] = compute_measures(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 88 |
'''simple docstring'''
snake_case_ = {}
def __lowercase (_SCREAMING_SNAKE_CASE :int , _SCREAMING_SNAKE_CASE :int , _SCREAMING_SNAKE_CASE :int ):
# if we are absent twice, or late 3 consecutive days,
# no further prize strings are possible
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
SCREAMING_SNAKE_CASE : Optional[int] = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
SCREAMING_SNAKE_CASE : str = _calculate(days - 1 , _SCREAMING_SNAKE_CASE , late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
SCREAMING_SNAKE_CASE : Union[str, Any] = _calculate(days - 1 , absent + 1 , 0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
SCREAMING_SNAKE_CASE : int = _calculate(days - 1 , _SCREAMING_SNAKE_CASE , 0 )
SCREAMING_SNAKE_CASE : Tuple = state_late + state_absent + state_ontime
SCREAMING_SNAKE_CASE : List[Any] = prizestrings
return prizestrings
def __lowercase (_SCREAMING_SNAKE_CASE :int = 30 ):
return _calculate(_SCREAMING_SNAKE_CASE , absent=0 , late=0 )
if __name__ == "__main__":
print(solution())
| 507 | 0 |
'''simple docstring'''
from __future__ import annotations
import math
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5, int(math.sqrt(UpperCAmelCase__ ) + 1 ), 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> list[int]:
A_ = str(UpperCAmelCase__ )
A_ = [n]
for i in range(1, len(UpperCAmelCase__ ) ):
list_nums.append(int(str_num[i:] ) )
list_nums.append(int(str_num[:-i] ) )
return list_nums
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> bool:
if len(str(UpperCAmelCase__ ) ) > 3:
if not is_prime(int(str(UpperCAmelCase__ )[-3:] ) ) or not is_prime(int(str(UpperCAmelCase__ )[:3] ) ):
return False
return True
def UpperCAmelCase__ ( UpperCAmelCase__ = 11 ) -> list[int]:
A_ = []
A_ = 13
while len(UpperCAmelCase__ ) != count:
if validate(UpperCAmelCase__ ):
A_ = list_truncated_nums(UpperCAmelCase__ )
if all(is_prime(UpperCAmelCase__ ) for i in list_nums ):
list_truncated_primes.append(UpperCAmelCase__ )
num += 2
return list_truncated_primes
def UpperCAmelCase__ ( ) -> int:
return sum(compute_truncated_primes(11 ) )
if __name__ == "__main__":
print(f"""{sum(compute_truncated_primes(11)) = }""")
| 667 |
'''simple docstring'''
__lowerCamelCase = range(2, 20 + 1)
__lowerCamelCase = [10**k for k in range(ks[-1] + 1)]
__lowerCamelCase = {}
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> Tuple:
A_ = sum(a_i[j] for j in range(UpperCAmelCase__, len(UpperCAmelCase__ ) ) )
A_ = sum(a_i[j] * base[j] for j in range(min(len(UpperCAmelCase__ ), UpperCAmelCase__ ) ) )
A_ , A_ = 0, 0
A_ = n - i
A_ = memo.get(UpperCAmelCase__ )
if sub_memo is not None:
A_ = sub_memo.get(UpperCAmelCase__ )
if jumps is not None and len(UpperCAmelCase__ ) > 0:
# find and make the largest jump without going over
A_ = -1
for _k in range(len(UpperCAmelCase__ ) - 1, -1, -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
A_ = _k
break
if max_jump >= 0:
A_ , A_ , A_ = jumps[max_jump]
# since the difference between jumps is cached, add c
A_ = diff + c
for j in range(min(UpperCAmelCase__, len(UpperCAmelCase__ ) ) ):
A_ , A_ = divmod(UpperCAmelCase__, 10 )
if new_c > 0:
add(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
else:
A_ = []
else:
A_ = {c: []}
A_ = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
A_ , A_ = next_term(UpperCAmelCase__, k - 1, i + dn, UpperCAmelCase__ )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
A_ , A_ = compute(UpperCAmelCase__, UpperCAmelCase__, i + dn, UpperCAmelCase__ )
diff += _diff
dn += terms_jumped
A_ = sub_memo[c]
# keep jumps sorted by # of terms skipped
A_ = 0
while j < len(UpperCAmelCase__ ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(UpperCAmelCase__, (diff, dn, k) )
return (diff, dn)
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> int:
if i >= n:
return 0, i
if k > len(UpperCAmelCase__ ):
a_i.extend([0 for _ in range(k - len(UpperCAmelCase__ ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
A_ = i
A_ , A_ , A_ = 0, 0, 0
for j in range(len(UpperCAmelCase__ ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
A_ = ds_c + ds_b
diff += addend
A_ = 0
for j in range(UpperCAmelCase__ ):
A_ = a_i[j] + addend
A_ , A_ = divmod(UpperCAmelCase__, 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
return diff, i - start_i
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> str:
for j in range(UpperCAmelCase__, len(UpperCAmelCase__ ) ):
A_ = digits[j] + addend
if s >= 10:
A_ , A_ = divmod(UpperCAmelCase__, 10 )
A_ = addend // 10 + quotient
else:
A_ = s
A_ = addend // 10
if addend == 0:
break
while addend > 0:
A_ , A_ = divmod(UpperCAmelCase__, 10 )
digits.append(UpperCAmelCase__ )
def UpperCAmelCase__ ( UpperCAmelCase__ = 10**15 ) -> int:
A_ = [1]
A_ = 1
A_ = 0
while True:
A_ , A_ = next_term(UpperCAmelCase__, 20, i + dn, UpperCAmelCase__ )
dn += terms_jumped
if dn == n - i:
break
A_ = 0
for j in range(len(UpperCAmelCase__ ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(f"""{solution() = }""")
| 667 | 1 |
"""simple docstring"""
def _snake_case ( _snake_case : list ) -> list:
'''simple docstring'''
_A = False
while is_sorted is False: # Until all the indices are traversed keep looping
_A = True
for i in range(0 , len(_snake_case ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
_A , _A = input_list[i + 1], input_list[i]
# swapping if elements not in order
_A = False
for i in range(1 , len(_snake_case ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
_A , _A = input_list[i + 1], input_list[i]
# swapping if elements not in order
_A = False
return input_list
if __name__ == "__main__":
print('''Enter list to be sorted''')
a = [int(x) for x in input().split()]
# inputing elements of the list in one line
a = odd_even_sort(input_list)
print('''The sorted list is''')
print(sorted_list)
| 7 |
'''simple docstring'''
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class lowerCAmelCase ( __lowerCAmelCase , unittest.TestCase):
__lowercase : int = BarthezTokenizer
__lowercase : Any = BarthezTokenizerFast
__lowercase : Dict = True
__lowercase : Optional[int] = True
def lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
super().setUp()
__snake_case = BarthezTokenizerFast.from_pretrained('''moussaKam/mbarthez''' )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=__SCREAMING_SNAKE_CASE )
__snake_case = tokenizer
def lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
__snake_case = '''<pad>'''
__snake_case = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''<mask>''' )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , 10_1122 )
def lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 10_1122 )
@require_torch
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
__snake_case = [0, 57, 3018, 7_0307, 91, 2]
__snake_case = self.tokenizer(
__SCREAMING_SNAKE_CASE , max_length=len(__SCREAMING_SNAKE_CASE ) , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
__snake_case = batch.input_ids.tolist()[0]
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
__snake_case = self.get_tokenizer()
__snake_case = self.get_rust_tokenizer()
__snake_case = '''I was born in 92000, and this is falsé.'''
__snake_case = tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
__snake_case = rust_tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
__snake_case = rust_tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case = self.get_rust_tokenizer()
__snake_case = tokenizer.encode(__SCREAMING_SNAKE_CASE )
__snake_case = rust_tokenizer.encode(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@slow
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
__snake_case = {'''input_ids''': [[0, 490, 1_4328, 4507, 354, 47, 4_3669, 95, 25, 7_8117, 2_0215, 1_9779, 190, 22, 400, 4, 3_5343, 8_0310, 603, 86, 2_4937, 105, 3_3438, 9_4762, 196, 3_9642, 7, 15, 1_5933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_0534, 87, 25, 66, 3358, 196, 5_5289, 8, 8_2961, 81, 2204, 7_5203, 7, 15, 763, 1_2956, 216, 178, 1_4328, 9595, 1377, 6_9693, 7, 448, 7_1021, 196, 1_8106, 1437, 1_3974, 108, 9083, 4, 4_9315, 7, 39, 86, 1326, 2793, 4_6333, 4, 448, 196, 7_4588, 7, 4_9315, 7, 39, 21, 822, 3_8470, 74, 21, 6_6723, 6_2480, 8, 2_2050, 5, 2]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
__snake_case = [
'''Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '''
'''utilisé principalement dans le domaine du traitement automatique des langues (TAL).''',
'''À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '''
'''pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '''
'''telles que la traduction et la synthèse de texte.''',
]
self.tokenizer_integration_test_util(
expected_encoding=__SCREAMING_SNAKE_CASE , model_name='''moussaKam/mbarthez''' , revision='''c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6''' , sequences=__SCREAMING_SNAKE_CASE , )
| 24 | 0 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
def __UpperCamelCase ( self : Tuple ) -> str:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : int ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ : Dict = StableDiffusionKDiffusionPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' )
lowerCamelCase_ : Union[str, Any] = sd_pipe.to(A_ )
sd_pipe.set_progress_bar_config(disable=A_ )
sd_pipe.set_scheduler('''sample_euler''' )
lowerCamelCase_ : List[Any] = '''A painting of a squirrel eating a burger'''
lowerCamelCase_ : int = torch.manual_seed(0 )
lowerCamelCase_ : Tuple = sd_pipe([prompt] , generator=A_ , guidance_scale=9.0 , num_inference_steps=20 , output_type='''np''' )
lowerCamelCase_ : Tuple = output.images
lowerCamelCase_ : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowerCamelCase_ : str = np.array([0.0447, 0.0492, 0.0468, 0.0408, 0.0383, 0.0408, 0.0354, 0.0380, 0.0339] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __UpperCamelCase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ : Union[str, Any] = StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
lowerCamelCase_ : Optional[Any] = sd_pipe.to(A_ )
sd_pipe.set_progress_bar_config(disable=A_ )
sd_pipe.set_scheduler('''sample_euler''' )
lowerCamelCase_ : Union[str, Any] = '''A painting of a squirrel eating a burger'''
lowerCamelCase_ : Tuple = torch.manual_seed(0 )
lowerCamelCase_ : Union[str, Any] = sd_pipe([prompt] , generator=A_ , guidance_scale=9.0 , num_inference_steps=20 , output_type='''np''' )
lowerCamelCase_ : int = output.images
lowerCamelCase_ : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowerCamelCase_ : int = np.array([0.1237, 0.1320, 0.1438, 0.1359, 0.1390, 0.1132, 0.1277, 0.1175, 0.1112] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-1
def __UpperCamelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ : List[str] = StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
lowerCamelCase_ : List[Any] = sd_pipe.to(A_ )
sd_pipe.set_progress_bar_config(disable=A_ )
sd_pipe.set_scheduler('''sample_dpmpp_2m''' )
lowerCamelCase_ : str = '''A painting of a squirrel eating a burger'''
lowerCamelCase_ : List[Any] = torch.manual_seed(0 )
lowerCamelCase_ : List[str] = sd_pipe(
[prompt] , generator=A_ , guidance_scale=7.5 , num_inference_steps=15 , output_type='''np''' , use_karras_sigmas=A_ , )
lowerCamelCase_ : List[Any] = output.images
lowerCamelCase_ : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowerCamelCase_ : List[str] = np.array(
[0.1138_1689, 0.1211_2921, 0.138_9457, 0.1254_9606, 0.124_4964, 0.1083_1517, 0.1156_2866, 0.1086_7816, 0.1049_9048] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 705 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def __snake_case (__UpperCAmelCase ):
"""simple docstring"""
return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device )
def __snake_case (__UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase_ : Optional[Any] = create_tensor(__UpperCAmelCase )
lowerCamelCase_ : Union[str, Any] = gather(__UpperCAmelCase )
assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) )
def __snake_case (__UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase_ : Dict = [state.process_index]
lowerCamelCase_ : str = gather_object(__UpperCAmelCase )
assert len(__UpperCAmelCase ) == state.num_processes, F"""{gathered_obj}, {len(__UpperCAmelCase )} != {state.num_processes}"""
assert gathered_obj == list(range(state.num_processes ) ), F"""{gathered_obj} != {list(range(state.num_processes ) )}"""
def __snake_case (__UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase_ : Union[str, Any] = create_tensor(__UpperCAmelCase )
lowerCamelCase_ : List[Any] = broadcast(__UpperCAmelCase )
assert broadcasted_tensor.shape == torch.Size([state.num_processes] )
assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) )
def __snake_case (__UpperCAmelCase ):
"""simple docstring"""
# We need to pad the tensor with one more element if we are the main process
# to ensure that we can pad
if state.is_main_process:
lowerCamelCase_ : int = torch.arange(state.num_processes + 1 ).to(state.device )
else:
lowerCamelCase_ : Optional[Any] = torch.arange(state.num_processes ).to(state.device )
lowerCamelCase_ : Any = pad_across_processes(__UpperCAmelCase )
assert padded_tensor.shape == torch.Size([state.num_processes + 1] )
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0]
def __snake_case (__UpperCAmelCase ):
"""simple docstring"""
# For now runs on only two processes
if state.num_processes != 2:
return
lowerCamelCase_ : Dict = create_tensor(__UpperCAmelCase )
lowerCamelCase_ : List[Any] = reduce(__UpperCAmelCase , '''sum''' )
lowerCamelCase_ : str = torch.tensor([4.0, 6] ).to(state.device )
assert torch.allclose(__UpperCAmelCase , __UpperCAmelCase ), F"""{reduced_tensor} != {truth_tensor}"""
def __snake_case (__UpperCAmelCase ):
"""simple docstring"""
# For now runs on only two processes
if state.num_processes != 2:
return
lowerCamelCase_ : Optional[int] = create_tensor(__UpperCAmelCase )
lowerCamelCase_ : Any = reduce(__UpperCAmelCase , '''mean''' )
lowerCamelCase_ : Any = torch.tensor([2.0, 3] ).to(state.device )
assert torch.allclose(__UpperCAmelCase , __UpperCAmelCase ), F"""{reduced_tensor} != {truth_tensor}"""
def __snake_case (__UpperCAmelCase ):
"""simple docstring"""
# For xla_spawn (TPUs)
main()
def __snake_case ():
"""simple docstring"""
lowerCamelCase_ : int = PartialState()
state.print(F"""State: {state}""" )
state.print('''testing gather''' )
test_gather(__UpperCAmelCase )
state.print('''testing gather_object''' )
test_gather_object(__UpperCAmelCase )
state.print('''testing broadcast''' )
test_broadcast(__UpperCAmelCase )
state.print('''testing pad_across_processes''' )
test_pad_across_processes(__UpperCAmelCase )
state.print('''testing reduce_sum''' )
test_reduce_sum(__UpperCAmelCase )
state.print('''testing reduce_mean''' )
test_reduce_mean(__UpperCAmelCase )
if __name__ == "__main__":
main()
| 418 | 0 |
def lowerCamelCase__ (__lowerCamelCase ):
if p < 2:
raise ValueError("p should not be less than 2!" )
elif p == 2:
return True
_SCREAMING_SNAKE_CASE : int = 4
_SCREAMING_SNAKE_CASE : str = (1 << p) - 1
for _ in range(p - 2 ):
_SCREAMING_SNAKE_CASE : List[str] = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11)) | 249 |
import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Tuple = AutoConfig.from_pretrained(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxAutoModelForSeqaSeqLM.from_config(config=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Tuple = checkpoints.load_tax_checkpoint(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = "wi_0" in tax_model["target"]["encoder"]["layers_0"]["mlp"]
if config.model_type == "t5":
_SCREAMING_SNAKE_CASE : List[str] = "SelfAttention"
if config.model_type == "longt5" and config.encoder_attention_type == "local":
_SCREAMING_SNAKE_CASE : Optional[Any] = "LocalSelfAttention"
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_SCREAMING_SNAKE_CASE : int = "TransientGlobalSelfAttention"
else:
raise ValueError(
"Given config is expected to have `model_type='t5'`, or `model_type='longt5` with `encoder_attention_type`"
" attribute with a value from ['local', 'transient-global]." )
# Encoder
for layer_index in range(config.num_layers ):
_SCREAMING_SNAKE_CASE : List[str] = f"""layers_{str(__lowerCamelCase )}"""
# Self-Attention
_SCREAMING_SNAKE_CASE : Optional[Any] = tax_model["target"]["encoder"][layer_name]["attention"]["key"]["kernel"]
_SCREAMING_SNAKE_CASE : List[str] = tax_model["target"]["encoder"][layer_name]["attention"]["out"]["kernel"]
_SCREAMING_SNAKE_CASE : Any = tax_model["target"]["encoder"][layer_name]["attention"]["query"]["kernel"]
_SCREAMING_SNAKE_CASE : int = tax_model["target"]["encoder"][layer_name]["attention"]["value"]["kernel"]
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_SCREAMING_SNAKE_CASE : Union[str, Any] = tax_model["target"]["encoder"][layer_name]["attention"]["T5LayerNorm_0"]["scale"]
# Layer Normalization
_SCREAMING_SNAKE_CASE : Tuple = tax_model["target"]["encoder"][layer_name]["pre_attention_layer_norm"]["scale"]
if split_mlp_wi:
_SCREAMING_SNAKE_CASE : Optional[Any] = tax_model["target"]["encoder"][layer_name]["mlp"]["wi_0"]["kernel"]
_SCREAMING_SNAKE_CASE : Dict = tax_model["target"]["encoder"][layer_name]["mlp"]["wi_1"]["kernel"]
else:
_SCREAMING_SNAKE_CASE : Union[str, Any] = tax_model["target"]["encoder"][layer_name]["mlp"]["wi"]["kernel"]
_SCREAMING_SNAKE_CASE : int = tax_model["target"]["encoder"][layer_name]["mlp"]["wo"]["kernel"]
# Layer Normalization
_SCREAMING_SNAKE_CASE : Dict = tax_model["target"]["encoder"][layer_name]["pre_mlp_layer_norm"]["scale"]
# Assigning
_SCREAMING_SNAKE_CASE : List[str] = flax_model.params["encoder"]["block"][str(__lowerCamelCase )]["layer"]
_SCREAMING_SNAKE_CASE : Tuple = tax_attention_key
_SCREAMING_SNAKE_CASE : List[Any] = tax_attention_out
_SCREAMING_SNAKE_CASE : Tuple = tax_attention_query
_SCREAMING_SNAKE_CASE : List[str] = tax_attention_value
_SCREAMING_SNAKE_CASE : Any = tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_SCREAMING_SNAKE_CASE : str = tax_global_layer_norm
if split_mlp_wi:
_SCREAMING_SNAKE_CASE : int = tax_mlp_wi_a
_SCREAMING_SNAKE_CASE : Tuple = tax_mlp_wi_a
else:
_SCREAMING_SNAKE_CASE : List[str] = tax_mlp_wi
_SCREAMING_SNAKE_CASE : int = tax_mlp_wo
_SCREAMING_SNAKE_CASE : Optional[int] = tax_mlp_layer_norm
_SCREAMING_SNAKE_CASE : Tuple = flax_model_encoder_layer_block
# Only for layer 0:
_SCREAMING_SNAKE_CASE : Optional[Any] = tax_model["target"]["encoder"]["relpos_bias"]["rel_embedding"].T
_SCREAMING_SNAKE_CASE : str = tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_SCREAMING_SNAKE_CASE : Dict = tax_model["target"]["encoder"]["side_relpos_bias"]["rel_embedding"].T
_SCREAMING_SNAKE_CASE : Tuple = tax_encoder_global_rel_embedding
# Assigning
_SCREAMING_SNAKE_CASE : List[str] = tax_model["target"]["encoder"]["encoder_norm"]["scale"]
_SCREAMING_SNAKE_CASE : Union[str, Any] = tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers ):
_SCREAMING_SNAKE_CASE : Dict = f"""layers_{str(__lowerCamelCase )}"""
# Self-Attention
_SCREAMING_SNAKE_CASE : List[str] = tax_model["target"]["decoder"][layer_name]["self_attention"]["key"]["kernel"]
_SCREAMING_SNAKE_CASE : str = tax_model["target"]["decoder"][layer_name]["self_attention"]["out"]["kernel"]
_SCREAMING_SNAKE_CASE : Tuple = tax_model["target"]["decoder"][layer_name]["self_attention"]["query"]["kernel"]
_SCREAMING_SNAKE_CASE : Dict = tax_model["target"]["decoder"][layer_name]["self_attention"]["value"]["kernel"]
# Layer Normalization
_SCREAMING_SNAKE_CASE : Union[str, Any] = tax_model["target"]["decoder"][layer_name]["pre_self_attention_layer_norm"][
"scale"
]
# Encoder-Decoder-Attention
_SCREAMING_SNAKE_CASE : List[Any] = tax_model["target"]["decoder"][layer_name]["encoder_decoder_attention"]
_SCREAMING_SNAKE_CASE : Union[str, Any] = tax_enc_dec_attention_module["key"]["kernel"]
_SCREAMING_SNAKE_CASE : Any = tax_enc_dec_attention_module["out"]["kernel"]
_SCREAMING_SNAKE_CASE : Any = tax_enc_dec_attention_module["query"]["kernel"]
_SCREAMING_SNAKE_CASE : int = tax_enc_dec_attention_module["value"]["kernel"]
# Layer Normalization
_SCREAMING_SNAKE_CASE : int = tax_model["target"]["decoder"][layer_name]["pre_cross_attention_layer_norm"]["scale"]
# MLP
if split_mlp_wi:
_SCREAMING_SNAKE_CASE : int = tax_model["target"]["decoder"][layer_name]["mlp"]["wi_0"]["kernel"]
_SCREAMING_SNAKE_CASE : Any = tax_model["target"]["decoder"][layer_name]["mlp"]["wi_1"]["kernel"]
else:
_SCREAMING_SNAKE_CASE : Optional[Any] = tax_model["target"]["decoder"][layer_name]["mlp"]["wi"]["kernel"]
_SCREAMING_SNAKE_CASE : Union[str, Any] = tax_model["target"]["decoder"][layer_name]["mlp"]["wo"]["kernel"]
# Layer Normalization
_SCREAMING_SNAKE_CASE : Any = tax_model["target"]["decoder"][layer_name]["pre_mlp_layer_norm"]["scale"]
# Assigning
_SCREAMING_SNAKE_CASE : int = flax_model.params["decoder"]["block"][str(__lowerCamelCase )]["layer"]
_SCREAMING_SNAKE_CASE : Any = tax_attention_key
_SCREAMING_SNAKE_CASE : Optional[int] = tax_attention_out
_SCREAMING_SNAKE_CASE : Optional[Any] = tax_attention_query
_SCREAMING_SNAKE_CASE : Union[str, Any] = tax_attention_value
_SCREAMING_SNAKE_CASE : Optional[Any] = tax_pre_attention_layer_norm
_SCREAMING_SNAKE_CASE : Optional[int] = tax_enc_dec_attention_key
_SCREAMING_SNAKE_CASE : Dict = tax_enc_dec_attention_out
_SCREAMING_SNAKE_CASE : Dict = tax_enc_dec_attention_query
_SCREAMING_SNAKE_CASE : Union[str, Any] = tax_enc_dec_attention_value
_SCREAMING_SNAKE_CASE : Union[str, Any] = tax_cross_layer_norm
if split_mlp_wi:
_SCREAMING_SNAKE_CASE : int = tax_mlp_wi_a
_SCREAMING_SNAKE_CASE : Tuple = tax_mlp_wi_a
else:
_SCREAMING_SNAKE_CASE : Tuple = tax_mlp_wi
_SCREAMING_SNAKE_CASE : List[str] = tax_mlp_wo
_SCREAMING_SNAKE_CASE : Any = txa_mlp_layer_norm
_SCREAMING_SNAKE_CASE : List[str] = flax_model_decoder_layer_block
# Decoder Normalization
_SCREAMING_SNAKE_CASE : Optional[int] = tax_model["target"]["decoder"]["decoder_norm"]["scale"]
_SCREAMING_SNAKE_CASE : List[str] = txa_decoder_norm
# Only for layer 0:
_SCREAMING_SNAKE_CASE : List[str] = tax_model["target"]["decoder"]["relpos_bias"]["rel_embedding"].T
_SCREAMING_SNAKE_CASE : Tuple = tax_decoder_rel_embedding
# Token Embeddings
_SCREAMING_SNAKE_CASE : Tuple = tax_model["target"]["token_embedder"]["embedding"]
_SCREAMING_SNAKE_CASE : Optional[int] = txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
_SCREAMING_SNAKE_CASE : List[str] = tax_model["target"]["decoder"]["logits_dense"]["kernel"]
flax_model.save_pretrained(__lowerCamelCase )
print("T5X Model was sucessfully converted!" )
if __name__ == "__main__":
UpperCamelCase__ =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--t5x_checkpoint_path', default=None, type=str, required=True, help='Path the T5X checkpoint.'
)
parser.add_argument('--config_name', default=None, type=str, required=True, help='Config name of LongT5/T5 model.')
parser.add_argument(
'--flax_dump_folder_path', default=None, type=str, required=True, help='Path to the output FLAX model.'
)
UpperCamelCase__ =parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path) | 249 | 1 |
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class lowerCamelCase ( unittest.TestCase ):
def UpperCAmelCase(self : Dict ) -> List[Any]:
snake_case = "hf-internal-testing/tiny-random-t5"
snake_case = AutoTokenizer.from_pretrained(_A )
snake_case = AutoModelForSeqaSeqLM.from_pretrained(_A )
snake_case = tokenizer("This is me" , return_tensors="pt" )
snake_case = model.to_bettertransformer()
self.assertTrue(any("BetterTransformer" in mod.__class__.__name__ for _, mod in model.named_modules() ) )
snake_case = model.generate(**_A )
snake_case = model.reverse_bettertransformer()
self.assertFalse(any("BetterTransformer" in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_A )
snake_case = AutoModelForSeqaSeqLM.from_pretrained(_A )
self.assertFalse(
any("BetterTransformer" in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
snake_case = model_reloaded.generate(**_A )
self.assertTrue(torch.allclose(_A , _A ) )
def UpperCAmelCase(self : Union[str, Any] ) -> Any:
snake_case = "hf-internal-testing/tiny-random-t5"
snake_case = AutoModelForSeqaSeqLM.from_pretrained(_A )
snake_case = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(_A ):
model.save_pretrained(_A )
snake_case = model.reverse_bettertransformer()
model.save_pretrained(_A )
| 294 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def lowercase_ ( A__ , A__ , A__ , A__ , A__ , A__ = None , ) -> List[Any]:
"""simple docstring"""
snake_case = {}
if train_file is not None:
snake_case = [train_file]
if eval_file is not None:
snake_case = [eval_file]
if test_file is not None:
snake_case = [test_file]
snake_case = datasets.load_dataset("csv" , data_files=A__ )
snake_case = list(ds[list(files.keys() )[0]].features.keys() )
snake_case = features_name.pop(A__ )
snake_case = list(set(ds[list(files.keys() )[0]][label_name] ) )
snake_case = {label: i for i, label in enumerate(A__ )}
snake_case = tokenizer.model_input_names
snake_case = {}
if len(A__ ) == 1:
for k in files.keys():
snake_case = ds[k].map(
lambda A__ : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=A__ , max_length=A__ , padding="max_length" ) , batched=A__ , )
elif len(A__ ) == 2:
for k in files.keys():
snake_case = ds[k].map(
lambda A__ : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=A__ , max_length=A__ , padding="max_length" , ) , batched=A__ , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
snake_case = {k: v for k, v in ex.items() if k in input_names}
snake_case = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
snake_case = {k: v for k, v in ex.items() if k in input_names}
snake_case = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
snake_case = {k: v for k, v in ex.items() if k in input_names}
snake_case = labelaid[ex[label_name]]
yield (d, label)
snake_case = (
tf.data.Dataset.from_generator(
A__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
snake_case = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
snake_case = (
tf.data.Dataset.from_generator(
A__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
snake_case = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
snake_case = (
tf.data.Dataset.from_generator(
A__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
snake_case = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
_A = logging.getLogger(__name__)
@dataclass
class lowerCamelCase :
UpperCAmelCase__ : int = field(metadata={"help": "Which column contains the label"} )
UpperCAmelCase__ : str = field(default=A_ , metadata={"help": "The path of the training file"} )
UpperCAmelCase__ : Optional[str] = field(default=A_ , metadata={"help": "The path of the development file"} )
UpperCAmelCase__ : Optional[str] = field(default=A_ , metadata={"help": "The path of the test file"} )
UpperCAmelCase__ : int = field(
default=1_28 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
UpperCAmelCase__ : bool = field(
default=A_ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
@dataclass
class lowerCamelCase :
UpperCAmelCase__ : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
UpperCAmelCase__ : Optional[str] = field(
default=A_ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
UpperCAmelCase__ : Optional[str] = field(
default=A_ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
UpperCAmelCase__ : bool = field(default=A_ , metadata={"help": "Set this flag to use fast tokenization."} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
UpperCAmelCase__ : Optional[str] = field(
default=A_ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
def lowercase_ ( ) -> Dict:
"""simple docstring"""
snake_case = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
snake_case , snake_case , snake_case = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
" --overwrite_output_dir to overcome." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , )
logger.info(
F'n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, '
F'16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
snake_case = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
snake_case , snake_case , snake_case , snake_case = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=A__ , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
snake_case = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(A__ ) , labelaid=A__ , idalabel={id: label for label, id in labelaid.items()} , finetuning_task="text-classification" , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
snake_case = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool(".bin" in model_args.model_name_or_path ) , config=A__ , cache_dir=model_args.cache_dir , )
def compute_metrics(A__ ) -> Dict:
snake_case = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
snake_case = TFTrainer(
model=A__ , args=A__ , train_dataset=A__ , eval_dataset=A__ , compute_metrics=A__ , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
snake_case = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
snake_case = trainer.evaluate()
snake_case = os.path.join(training_args.output_dir , "eval_results.txt" )
with open(A__ , "w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in result.items():
logger.info(F' {key} = {value}' )
writer.write(F'{key} = {value}\n' )
results.update(A__ )
return results
if __name__ == "__main__":
main()
| 294 | 1 |
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ : Optional[int] = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> Optional[Any]:
snake_case__ = SwinConfig.from_pretrained(
'''microsoft/swin-tiny-patch4-window7-224''' , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] )
snake_case__ = MaskFormerConfig(backbone_config=__lowerCAmelCase )
snake_case__ = '''huggingface/label-files'''
if "ade20k-full" in model_name:
# this should be ok
snake_case__ = 847
snake_case__ = '''maskformer-ade20k-full-id2label.json'''
elif "ade" in model_name:
# this should be ok
snake_case__ = 150
snake_case__ = '''ade20k-id2label.json'''
elif "coco-stuff" in model_name:
# this should be ok
snake_case__ = 171
snake_case__ = '''maskformer-coco-stuff-id2label.json'''
elif "coco" in model_name:
# TODO
snake_case__ = 133
snake_case__ = '''coco-panoptic-id2label.json'''
elif "cityscapes" in model_name:
# this should be ok
snake_case__ = 19
snake_case__ = '''cityscapes-id2label.json'''
elif "vistas" in model_name:
# this should be ok
snake_case__ = 65
snake_case__ = '''mapillary-vistas-id2label.json'''
snake_case__ = json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
snake_case__ = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
return config
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> Optional[int]:
snake_case__ = []
# stem
# fmt: off
rename_keys.append(('''backbone.patch_embed.proj.weight''', '''model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''backbone.patch_embed.proj.bias''', '''model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias''') )
rename_keys.append(('''backbone.patch_embed.norm.weight''', '''model.pixel_level_module.encoder.model.embeddings.norm.weight''') )
rename_keys.append(('''backbone.patch_embed.norm.bias''', '''model.pixel_level_module.encoder.model.embeddings.norm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm1.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm1.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.relative_position_index""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.proj.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.proj.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm2.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm2.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc1.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc1.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc2.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc2.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias""") )
if i < 3:
rename_keys.append((F"""backbone.layers.{i}.downsample.reduction.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight""") )
rename_keys.append((F"""backbone.layers.{i}.downsample.norm.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight""") )
rename_keys.append((F"""backbone.layers.{i}.downsample.norm.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias""") )
rename_keys.append((F"""backbone.norm{i}.weight""", F"""model.pixel_level_module.encoder.hidden_states_norms.{i}.weight""") )
rename_keys.append((F"""backbone.norm{i}.bias""", F"""model.pixel_level_module.encoder.hidden_states_norms.{i}.bias""") )
# FPN
rename_keys.append(('''sem_seg_head.layer_4.weight''', '''model.pixel_level_module.decoder.fpn.stem.0.weight''') )
rename_keys.append(('''sem_seg_head.layer_4.norm.weight''', '''model.pixel_level_module.decoder.fpn.stem.1.weight''') )
rename_keys.append(('''sem_seg_head.layer_4.norm.bias''', '''model.pixel_level_module.decoder.fpn.stem.1.bias''') )
for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ):
rename_keys.append((F"""sem_seg_head.adapter_{source_index}.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight""") )
rename_keys.append((F"""sem_seg_head.adapter_{source_index}.norm.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight""") )
rename_keys.append((F"""sem_seg_head.adapter_{source_index}.norm.bias""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias""") )
rename_keys.append((F"""sem_seg_head.layer_{source_index}.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight""") )
rename_keys.append((F"""sem_seg_head.layer_{source_index}.norm.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight""") )
rename_keys.append((F"""sem_seg_head.layer_{source_index}.norm.bias""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias""") )
rename_keys.append(('''sem_seg_head.mask_features.weight''', '''model.pixel_level_module.decoder.mask_projection.weight''') )
rename_keys.append(('''sem_seg_head.mask_features.bias''', '''model.pixel_level_module.decoder.mask_projection.bias''') )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight""", F"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias""", F"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias""") )
# cross-attention out projection
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias""") )
# MLP 1
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight""", F"""model.transformer_module.decoder.layers.{idx}.fc1.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias""", F"""model.transformer_module.decoder.layers.{idx}.fc1.bias""") )
# MLP 2
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight""", F"""model.transformer_module.decoder.layers.{idx}.fc2.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias""", F"""model.transformer_module.decoder.layers.{idx}.fc2.bias""") )
# layernorm 1 (self-attention layernorm)
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight""", F"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias""", F"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias""") )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias""") )
# layernorm 3 (final layernorm)
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight""", F"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias""", F"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias""") )
rename_keys.append(('''sem_seg_head.predictor.transformer.decoder.norm.weight''', '''model.transformer_module.decoder.layernorm.weight''') )
rename_keys.append(('''sem_seg_head.predictor.transformer.decoder.norm.bias''', '''model.transformer_module.decoder.layernorm.bias''') )
# heads on top
rename_keys.append(('''sem_seg_head.predictor.query_embed.weight''', '''model.transformer_module.queries_embedder.weight''') )
rename_keys.append(('''sem_seg_head.predictor.input_proj.weight''', '''model.transformer_module.input_projection.weight''') )
rename_keys.append(('''sem_seg_head.predictor.input_proj.bias''', '''model.transformer_module.input_projection.bias''') )
rename_keys.append(('''sem_seg_head.predictor.class_embed.weight''', '''class_predictor.weight''') )
rename_keys.append(('''sem_seg_head.predictor.class_embed.bias''', '''class_predictor.bias''') )
for i in range(3 ):
rename_keys.append((F"""sem_seg_head.predictor.mask_embed.layers.{i}.weight""", F"""mask_embedder.{i}.0.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.mask_embed.layers.{i}.bias""", F"""mask_embedder.{i}.0.bias""") )
# fmt: on
return rename_keys
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Optional[int]:
snake_case__ = dct.pop(__lowerCAmelCase )
snake_case__ = val
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> Tuple:
snake_case__ = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
snake_case__ = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
snake_case__ = state_dict.pop(F"""backbone.layers.{i}.blocks.{j}.attn.qkv.weight""" )
snake_case__ = state_dict.pop(F"""backbone.layers.{i}.blocks.{j}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
snake_case__ = in_proj_weight[:dim, :]
snake_case__ = in_proj_bias[: dim]
snake_case__ = in_proj_weight[
dim : dim * 2, :
]
snake_case__ = in_proj_bias[
dim : dim * 2
]
snake_case__ = in_proj_weight[
-dim :, :
]
snake_case__ = in_proj_bias[-dim :]
# fmt: on
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> int:
snake_case__ = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
snake_case__ = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight""" )
snake_case__ = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
snake_case__ = in_proj_weight[: hidden_size, :]
snake_case__ = in_proj_bias[:config.hidden_size]
snake_case__ = in_proj_weight[hidden_size : hidden_size * 2, :]
snake_case__ = in_proj_bias[hidden_size : hidden_size * 2]
snake_case__ = in_proj_weight[-hidden_size :, :]
snake_case__ = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
snake_case__ = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight""" )
snake_case__ = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
snake_case__ = in_proj_weight[: hidden_size, :]
snake_case__ = in_proj_bias[:config.hidden_size]
snake_case__ = in_proj_weight[hidden_size : hidden_size * 2, :]
snake_case__ = in_proj_bias[hidden_size : hidden_size * 2]
snake_case__ = in_proj_weight[-hidden_size :, :]
snake_case__ = in_proj_bias[-hidden_size :]
# fmt: on
def SCREAMING_SNAKE_CASE ( ) -> Dict:
snake_case__ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
snake_case__ = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = False ) -> Optional[int]:
snake_case__ = get_maskformer_config(__lowerCAmelCase )
# load original state_dict
with open(__lowerCAmelCase , '''rb''' ) as f:
snake_case__ = pickle.load(__lowerCAmelCase )
snake_case__ = data['''model''']
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
snake_case__ = create_rename_keys(__lowerCAmelCase )
for src, dest in rename_keys:
rename_key(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
read_in_swin_q_k_v(__lowerCAmelCase , config.backbone_config )
read_in_decoder_q_k_v(__lowerCAmelCase , __lowerCAmelCase )
# update to torch tensors
for key, value in state_dict.items():
snake_case__ = torch.from_numpy(__lowerCAmelCase )
# load 🤗 model
snake_case__ = MaskFormerForInstanceSegmentation(__lowerCAmelCase )
model.eval()
for name, param in model.named_parameters():
print(__lowerCAmelCase , param.shape )
snake_case__ , snake_case__ = model.load_state_dict(__lowerCAmelCase , strict=__lowerCAmelCase )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(__lowerCAmelCase ) == 0, F"""Unexpected keys: {unexpected_keys}"""
# verify results
snake_case__ = prepare_img()
if "vistas" in model_name:
snake_case__ = 65
elif "cityscapes" in model_name:
snake_case__ = 6_5535
else:
snake_case__ = 255
snake_case__ = True if '''ade''' in model_name else False
snake_case__ = MaskFormerImageProcessor(ignore_index=__lowerCAmelCase , reduce_labels=__lowerCAmelCase )
snake_case__ = image_processor(__lowerCAmelCase , return_tensors='''pt''' )
snake_case__ = model(**__lowerCAmelCase )
print('''Logits:''' , outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
snake_case__ = torch.tensor(
[[3.6353, -4.4770, -2.6065], [0.5081, -4.2394, -3.5343], [2.1909, -5.0353, -1.9323]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowerCAmelCase , atol=1e-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F"""Saving model and image processor to {pytorch_dump_folder_path}""" )
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
model.save_pretrained(__lowerCAmelCase )
image_processor.save_pretrained(__lowerCAmelCase )
if push_to_hub:
print('''Pushing model and image processor to the hub...''' )
model.push_to_hub(F"""nielsr/{model_name}""" )
image_processor.push_to_hub(F"""nielsr/{model_name}""" )
if __name__ == "__main__":
lowerCamelCase__ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""maskformer-swin-tiny-ade""",
type=str,
help=("""Name of the MaskFormer model you\'d like to convert""",),
)
parser.add_argument(
"""--checkpoint_path""",
default="""/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl""",
type=str,
help="""Path to the original state dict (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
lowerCamelCase__ : str = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 33 |
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
lowerCAmelCase = '\\n@misc{wu2016googles,\n title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n'
lowerCAmelCase = '\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe \'GLEU score\'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore\'s range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n'
lowerCAmelCase = '\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n \'google_bleu\': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results["google_bleu"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results["google_bleu"], 2))\n 0.4\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
def lowerCamelCase_ ( self: Tuple ) -> MetricInfo:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ),
'''references''': datasets.Sequence(
datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ) , id='''references''' ),
} ) , )
def lowerCamelCase_ ( self: str , UpperCamelCase_: List[List[List[str]]] , UpperCamelCase_: List[List[str]] , UpperCamelCase_: int = 1 , UpperCamelCase_: int = 4 , ) -> Dict[str, float]:
"""simple docstring"""
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=UpperCamelCase_ , hypotheses=UpperCamelCase_ , min_len=UpperCamelCase_ , max_len=UpperCamelCase_ )
}
| 43 | 0 |
def _lowerCAmelCase ( _a : str , _a : str ) -> str:
lowerCAmelCase_ : int = len(_a )
lowerCAmelCase_ : int = len(_a )
lowerCAmelCase_ : int = (
first_str_length if first_str_length > second_str_length else second_str_length
)
lowerCAmelCase_ : list = []
for char_count in range(_a ):
if char_count < first_str_length:
output_list.append(first_str[char_count] )
if char_count < second_str_length:
output_list.append(second_str[char_count] )
return "".join(_a )
if __name__ == "__main__":
print(alternative_string_arrange("""AB""", """XYZ"""), end=""" """)
| 440 |
def _lowerCAmelCase ( _a : int , _a : bool = False ) -> bool:
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 10 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3_31_70_44_06_46_79_88_73_85_96_19_81 and not allow_probable:
raise ValueError(
"""Warning: upper bound of deterministic test is exceeded. """
"""Pass allow_probable=True to allow probabilistic test. """
"""A return value of True indicates a probable prime.""" )
# array bounds provided by analysis
lowerCAmelCase_ : Optional[int] = [
20_47,
1_37_36_53,
25_32_60_01,
32_15_03_17_51,
2_15_23_02_89_87_47,
3_47_47_49_66_03_83,
3_41_55_00_71_72_83_21,
1,
3_82_51_23_05_65_46_41_30_51,
1,
1,
31_86_65_85_78_34_03_11_51_16_74_61,
3_31_70_44_06_46_79_88_73_85_96_19_81,
]
lowerCAmelCase_ : Dict = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41]
for idx, _p in enumerate(_a , 1 ):
if n < _p:
# then we have our last prime to check
lowerCAmelCase_ : str = primes[:idx]
break
lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
lowerCAmelCase_ : Tuple = False
for r in range(_a ):
lowerCAmelCase_ : Tuple = pow(_a , d * 2**r , _a )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
lowerCAmelCase_ : List[Any] = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def _lowerCAmelCase ( ) -> None:
assert not miller_rabin(5_61 )
assert miller_rabin(5_63 )
# 2047
assert not miller_rabin(83_82_01 )
assert miller_rabin(83_82_07 )
# 1_373_653
assert not miller_rabin(17_31_60_01 )
assert miller_rabin(17_31_60_17 )
# 25_326_001
assert not miller_rabin(30_78_38_66_41 )
assert miller_rabin(30_78_38_66_53 )
# 3_215_031_751
assert not miller_rabin(1_71_30_45_57_48_01 )
assert miller_rabin(1_71_30_45_57_48_19 )
# 2_152_302_898_747
assert not miller_rabin(2_77_97_99_72_83_07 )
assert miller_rabin(2_77_97_99_72_83_27 )
# 3_474_749_660_383
assert not miller_rabin(1_13_85_00_23_90_94_41 )
assert miller_rabin(1_13_85_00_23_90_95_27 )
# 341_550_071_728_321
assert not miller_rabin(1_27_50_41_01_88_48_80_43_51 )
assert miller_rabin(1_27_50_41_01_88_48_80_43_91 )
# 3_825_123_056_546_413_051
assert not miller_rabin(7_96_66_46_44_58_50_77_87_79_18_67 )
assert miller_rabin(7_96_66_46_44_58_50_77_87_79_19_51 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(55_28_40_67_74_46_64_78_97_66_03_33 )
assert miller_rabin(55_28_40_67_74_46_64_78_97_66_03_59 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin()
| 440 | 1 |
'''simple docstring'''
from __future__ import annotations
def a_ ( _lowerCAmelCase ) -> Any:
__lowerCamelCase : str = 2
__lowerCamelCase : Optional[int] = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(__lowerCAmelCase )
if n > 1:
factors.append(__lowerCAmelCase )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 459 |
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
__magic_name__ : Tuple = data_utils.TransfoXLTokenizer
__magic_name__ : int = data_utils.TransfoXLCorpus
__magic_name__ : List[str] = data_utils
__magic_name__ : str = data_utils
def a_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(__lowerCAmelCase , '''rb''' ) as fp:
lowerCAmelCase__ = pickle.load(__lowerCAmelCase , encoding='''latin1''' )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
lowerCAmelCase__ = pytorch_dump_folder_path + '''/''' + VOCAB_FILES_NAMES['''pretrained_vocab_file''']
print(F"""Save vocabulary to {pytorch_vocab_dump_path}""" )
lowerCAmelCase__ = corpus.vocab.__dict__
torch.save(__lowerCAmelCase , __lowerCAmelCase )
lowerCAmelCase__ = corpus.__dict__
corpus_dict_no_vocab.pop('''vocab''' , __lowerCAmelCase )
lowerCAmelCase__ = pytorch_dump_folder_path + '''/''' + CORPUS_NAME
print(F"""Save dataset to {pytorch_dataset_dump_path}""" )
torch.save(__lowerCAmelCase , __lowerCAmelCase )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
lowerCAmelCase__ = os.path.abspath(__lowerCAmelCase )
lowerCAmelCase__ = os.path.abspath(__lowerCAmelCase )
print(F"""Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.""" )
# Initialise PyTorch model
if transfo_xl_config_file == "":
lowerCAmelCase__ = TransfoXLConfig()
else:
lowerCAmelCase__ = TransfoXLConfig.from_json_file(__lowerCAmelCase )
print(F"""Building PyTorch model from configuration: {config}""" )
lowerCAmelCase__ = TransfoXLLMHeadModel(__lowerCAmelCase )
lowerCAmelCase__ = load_tf_weights_in_transfo_xl(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Save pytorch-model
lowerCAmelCase__ = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
lowerCAmelCase__ = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
print(F"""Save PyTorch model to {os.path.abspath(__lowerCAmelCase )}""" )
torch.save(model.state_dict() , __lowerCAmelCase )
print(F"""Save configuration file to {os.path.abspath(__lowerCAmelCase )}""" )
with open(__lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__magic_name__ : int = argparse.ArgumentParser()
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the folder to store the PyTorch model or dataset/vocab.""",
)
parser.add_argument(
"""--tf_checkpoint_path""",
default="""""",
type=str,
help="""An optional path to a TensorFlow checkpoint path to be converted.""",
)
parser.add_argument(
"""--transfo_xl_config_file""",
default="""""",
type=str,
help=(
"""An optional config json file corresponding to the pre-trained BERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--transfo_xl_dataset_file""",
default="""""",
type=str,
help="""An optional dataset file to be converted in a vocabulary.""",
)
__magic_name__ : Optional[Any] = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
)
| 615 | 0 |
import argparse
import json
from tqdm import tqdm
def _snake_case ():
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--src_path' , type=__lowercase , default='biencoder-nq-dev.json' , help='Path to raw DPR training data' , )
parser.add_argument(
'--evaluation_set' , type=__lowercase , help='where to store parsed evaluation_set file' , )
parser.add_argument(
'--gold_data_path' , type=__lowercase , help='where to store parsed gold_data_path file' , )
UpperCamelCase_ = parser.parse_args()
with open(args.src_path , 'r') as src_file, open(args.evaluation_set , 'w') as eval_file, open(
args.gold_data_path , 'w') as gold_file:
UpperCamelCase_ = json.load(__lowercase)
for dpr_record in tqdm(__lowercase):
UpperCamelCase_ = dpr_record['question']
UpperCamelCase_ = [context['title'] for context in dpr_record['positive_ctxs']]
eval_file.write(question + '\n')
gold_file.write('\t'.join(__lowercase) + '\n')
if __name__ == "__main__":
main()
| 712 |
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
snake_case__ : Any = logging.get_logger(__name__)
@dataclass
class _a :
"""simple docstring"""
A_ = field(metadata={"""help""": """The name of the task to train on: """ + """, """.join(glue_processors.keys() )} )
A_ = field(
metadata={"""help""": """The input data dir. Should contain the .tsv files (or other data files) for the task."""} )
A_ = field(
default=128 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
A_ = field(
default=UpperCAmelCase__ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def _UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase_ = self.task_name.lower()
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
A_ = """train"""
A_ = """dev"""
A_ = """test"""
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
A_ = 42
A_ = 42
A_ = 42
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = Split.train , _UpperCAmelCase = None , ) -> Tuple:
warnings.warn(
'This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets '
'library. You can have a look at this example script for pointers: '
'https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py' , _UpperCAmelCase , )
UpperCamelCase_ = args
UpperCamelCase_ = glue_processors[args.task_name]()
UpperCamelCase_ = glue_output_modes[args.task_name]
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
try:
UpperCamelCase_ = Split[mode]
except KeyError:
raise KeyError('mode is not a valid split name' )
# Load data features from cache or dataset file
UpperCamelCase_ = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f"""cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}""" , )
UpperCamelCase_ = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCamelCase_ , UpperCamelCase_ = label_list[2], label_list[1]
UpperCamelCase_ = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
UpperCamelCase_ = cached_features_file + '.lock'
with FileLock(_UpperCAmelCase ):
if os.path.exists(_UpperCAmelCase ) and not args.overwrite_cache:
UpperCamelCase_ = time.time()
UpperCamelCase_ = torch.load(_UpperCAmelCase )
logger.info(
f"""Loading features from cached file {cached_features_file} [took %.3f s]""" , time.time() - start )
else:
logger.info(f"""Creating features from dataset file at {args.data_dir}""" )
if mode == Split.dev:
UpperCamelCase_ = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
UpperCamelCase_ = self.processor.get_test_examples(args.data_dir )
else:
UpperCamelCase_ = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
UpperCamelCase_ = examples[:limit_length]
UpperCamelCase_ = glue_convert_examples_to_features(
_UpperCAmelCase , _UpperCAmelCase , max_length=args.max_seq_length , label_list=_UpperCAmelCase , output_mode=self.output_mode , )
UpperCamelCase_ = time.time()
torch.save(self.features , _UpperCAmelCase )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f"""Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]""" )
def __len__( self ) -> List[str]:
return len(self.features )
def __getitem__( self , _UpperCAmelCase ) -> InputFeatures:
return self.features[i]
def _UpperCAmelCase ( self ) -> Tuple:
return self.label_list
| 618 | 0 |
from __future__ import annotations
import csv
import requests
from bsa import BeautifulSoup
def __magic_name__ ( __a : str = "" ):
'''simple docstring'''
UpperCamelCase__ = url or """https://www.imdb.com/chart/top/?ref_=nv_mv_250"""
UpperCamelCase__ = BeautifulSoup(requests.get(__a ).text , """html.parser""" )
UpperCamelCase__ = soup.find_all("""td""" , attrs="""titleColumn""" )
UpperCamelCase__ = soup.find_all("""td""" , class_="""ratingColumn imdbRating""" )
return {
title.a.text: float(rating.strong.text )
for title, rating in zip(__a , __a )
}
def __magic_name__ ( __a : str = "IMDb_Top_250_Movies.csv" ):
'''simple docstring'''
UpperCamelCase__ = get_imdb_top_aaa_movies()
with open(__a , """w""" , newline="""""" ) as out_file:
UpperCamelCase__ = csv.writer(__a )
writer.writerow(["""Movie title""", """IMDb rating"""] )
for title, rating in movies.items():
writer.writerow([title, rating] )
if __name__ == "__main__":
write_movies()
| 513 |
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
lowerCamelCase_ = logging.get_logger(__name__)
class __A( __lowerCamelCase ):
"""simple docstring"""
def __init__(self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
warnings.warn(
"""The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use DonutImageProcessor instead.""" , SCREAMING_SNAKE_CASE_ , )
super().__init__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
| 513 | 1 |
"""simple docstring"""
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _lowercase :
def __init__( self , UpperCAmelCase_ , UpperCAmelCase_=13 , UpperCAmelCase_=30 , UpperCAmelCase_=2 , UpperCAmelCase_=3 , UpperCAmelCase_=True , UpperCAmelCase_=True , UpperCAmelCase_=32 , UpperCAmelCase_=5 , UpperCAmelCase_=4 , UpperCAmelCase_=37 , UpperCAmelCase_="gelu" , UpperCAmelCase_=0.1 , UpperCAmelCase_=0.1 , UpperCAmelCase_=10 , UpperCAmelCase_=0.02 , UpperCAmelCase_=None , ) -> Any:
lowerCamelCase : Optional[int] = parent
lowerCamelCase : List[str] = batch_size
lowerCamelCase : Dict = image_size
lowerCamelCase : Any = patch_size
lowerCamelCase : Dict = num_channels
lowerCamelCase : Union[str, Any] = is_training
lowerCamelCase : Union[str, Any] = use_labels
lowerCamelCase : List[str] = hidden_size
lowerCamelCase : Union[str, Any] = num_hidden_layers
lowerCamelCase : Tuple = num_attention_heads
lowerCamelCase : Dict = intermediate_size
lowerCamelCase : Optional[int] = hidden_act
lowerCamelCase : int = hidden_dropout_prob
lowerCamelCase : Optional[int] = attention_probs_dropout_prob
lowerCamelCase : List[Any] = type_sequence_label_size
lowerCamelCase : Optional[int] = initializer_range
lowerCamelCase : Optional[Any] = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
lowerCamelCase : Optional[int] = (image_size // patch_size) ** 2
lowerCamelCase : List[Any] = num_patches + 1
def _UpperCamelCase ( self ) -> Optional[Any]:
lowerCamelCase : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase : Optional[int] = None
if self.use_labels:
lowerCamelCase : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase : Tuple = self.get_config()
return config, pixel_values, labels
def _UpperCamelCase ( self ) -> Union[str, Any]:
return ViTMSNConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def _UpperCamelCase ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) -> Optional[Any]:
lowerCamelCase : Tuple = ViTMSNModel(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
lowerCamelCase : Optional[int] = model(UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) -> Tuple:
lowerCamelCase : Any = self.type_sequence_label_size
lowerCamelCase : List[Any] = ViTMSNForImageClassification(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
lowerCamelCase : Optional[Any] = model(UpperCAmelCase_ , labels=UpperCAmelCase_ )
print('Pixel and labels shape: {pixel_values.shape}, {labels.shape}' )
print('Labels: {labels}' )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCamelCase : List[Any] = 1
lowerCamelCase : Optional[Any] = ViTMSNForImageClassification(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
lowerCamelCase : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase : str = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _UpperCamelCase ( self ) -> List[Any]:
lowerCamelCase : int = self.prepare_config_and_inputs()
lowerCamelCase , lowerCamelCase , lowerCamelCase : Union[str, Any] = config_and_inputs
lowerCamelCase : Tuple = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class _lowercase ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
lowercase_ = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
lowercase_ = (
{'feature-extraction': ViTMSNModel, 'image-classification': ViTMSNForImageClassification}
if is_torch_available()
else {}
)
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = False
def _UpperCamelCase ( self ) -> Dict:
lowerCamelCase : Any = ViTMSNModelTester(self )
lowerCamelCase : Optional[Any] = ConfigTester(self , config_class=UpperCAmelCase_ , has_text_modality=UpperCAmelCase_ , hidden_size=37 )
def _UpperCamelCase ( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason='ViTMSN does not use inputs_embeds' )
def _UpperCamelCase ( self ) -> Optional[int]:
pass
def _UpperCamelCase ( self ) -> int:
lowerCamelCase , lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase : Union[str, Any] = model_class(UpperCAmelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCamelCase : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase_ , nn.Linear ) )
def _UpperCamelCase ( self ) -> Optional[Any]:
lowerCamelCase , lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase : int = model_class(UpperCAmelCase_ )
lowerCamelCase : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase : List[Any] = [*signature.parameters.keys()]
lowerCamelCase : str = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCAmelCase_ )
def _UpperCamelCase ( self ) -> List[Any]:
lowerCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def _UpperCamelCase ( self ) -> Dict:
lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase_ )
@slow
def _UpperCamelCase ( self ) -> List[str]:
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase : Tuple = ViTMSNModel.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
def UpperCAmelCase ( ):
'''simple docstring'''
lowerCamelCase : Union[str, Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class _lowercase ( unittest.TestCase ):
@cached_property
def _UpperCamelCase ( self ) -> Tuple:
return ViTImageProcessor.from_pretrained('facebook/vit-msn-small' ) if is_vision_available() else None
@slow
def _UpperCamelCase ( self ) -> str:
torch.manual_seed(2 )
lowerCamelCase : Tuple = ViTMSNForImageClassification.from_pretrained('facebook/vit-msn-small' ).to(UpperCAmelCase_ )
lowerCamelCase : Any = self.default_image_processor
lowerCamelCase : Optional[Any] = prepare_img()
lowerCamelCase : Dict = image_processor(images=UpperCAmelCase_ , return_tensors='pt' ).to(UpperCAmelCase_ )
# forward pass
with torch.no_grad():
lowerCamelCase : List[str] = model(**UpperCAmelCase_ )
# verify the logits
lowerCamelCase : List[Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase_ )
lowerCamelCase : Union[str, Any] = torch.tensor([-0.0803, -0.4454, -0.2375] ).to(UpperCAmelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase_ , atol=1E-4 ) )
| 133 |
"""simple docstring"""
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _lowercase ( __UpperCAmelCase ):
lowercase_ = ['image_processor', 'tokenizer']
lowercase_ = 'BlipImageProcessor'
lowercase_ = 'AutoTokenizer'
def __init__( self , UpperCAmelCase_ , UpperCAmelCase_ ) -> Dict:
lowerCamelCase : Tuple = False
super().__init__(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCamelCase : List[Any] = self.image_processor
def __call__( self , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = True , UpperCAmelCase_ = False , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = 0 , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = False , UpperCAmelCase_ = False , UpperCAmelCase_ = False , UpperCAmelCase_ = False , UpperCAmelCase_ = False , UpperCAmelCase_ = True , UpperCAmelCase_ = None , **UpperCAmelCase_ , ) -> BatchEncoding:
if images is None and text is None:
raise ValueError('You have to specify either images or text.' )
# Get only text
if images is None:
lowerCamelCase : str = self.tokenizer
lowerCamelCase : List[str] = self.tokenizer(
text=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , max_length=UpperCAmelCase_ , stride=UpperCAmelCase_ , pad_to_multiple_of=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , return_overflowing_tokens=UpperCAmelCase_ , return_special_tokens_mask=UpperCAmelCase_ , return_offsets_mapping=UpperCAmelCase_ , return_token_type_ids=UpperCAmelCase_ , return_length=UpperCAmelCase_ , verbose=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ , )
return text_encoding
# add pixel_values
lowerCamelCase : Any = self.image_processor(UpperCAmelCase_ , return_tensors=UpperCAmelCase_ )
if text is not None:
lowerCamelCase : List[Any] = self.tokenizer(
text=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , max_length=UpperCAmelCase_ , stride=UpperCAmelCase_ , pad_to_multiple_of=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , return_overflowing_tokens=UpperCAmelCase_ , return_special_tokens_mask=UpperCAmelCase_ , return_offsets_mapping=UpperCAmelCase_ , return_token_type_ids=UpperCAmelCase_ , return_length=UpperCAmelCase_ , verbose=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ , )
else:
lowerCamelCase : str = None
if text_encoding is not None:
encoding_image_processor.update(UpperCAmelCase_ )
return encoding_image_processor
def _UpperCamelCase ( self , *UpperCAmelCase_ , **UpperCAmelCase_ ) -> str:
return self.tokenizer.batch_decode(*UpperCAmelCase_ , **UpperCAmelCase_ )
def _UpperCamelCase ( self , *UpperCAmelCase_ , **UpperCAmelCase_ ) -> List[str]:
return self.tokenizer.decode(*UpperCAmelCase_ , **UpperCAmelCase_ )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def _UpperCamelCase ( self ) -> Union[str, Any]:
lowerCamelCase : List[Any] = self.tokenizer.model_input_names
lowerCamelCase : Optional[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 133 | 1 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = ['''image_processor''', '''tokenizer''']
UpperCAmelCase__ = '''LayoutLMv2ImageProcessor'''
UpperCAmelCase__ = ('''LayoutXLMTokenizer''', '''LayoutXLMTokenizerFast''')
def __init__( self : Any , UpperCAmelCase__ : Any=None , UpperCAmelCase__ : Optional[int]=None , **UpperCAmelCase__ : Optional[int]) ->Optional[Any]:
'''simple docstring'''
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , UpperCAmelCase__ , )
A__ = kwargs.pop('''feature_extractor''')
A__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''')
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''')
super().__init__(UpperCAmelCase__ , UpperCAmelCase__)
def __call__( self : Any , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , UpperCAmelCase__ : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , UpperCAmelCase__ : Union[List[List[int]], List[List[List[int]]]] = None , UpperCAmelCase__ : Optional[Union[List[int], List[List[int]]]] = None , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Union[bool, str, PaddingStrategy] = False , UpperCAmelCase__ : Union[bool, str, TruncationStrategy] = None , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : int = 0 , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Optional[Union[str, TensorType]] = None , **UpperCAmelCase__ : Union[str, Any] , ) ->BatchEncoding:
'''simple docstring'''
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'''You cannot provide bounding boxes '''
'''if you initialized the image processor with apply_ocr set to True.''')
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''')
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('''You cannot return overflowing tokens without returning the offsets mapping.''')
# first, apply the image processor
A__ = self.image_processor(images=UpperCAmelCase__ , return_tensors=UpperCAmelCase__)
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(UpperCAmelCase__ , UpperCAmelCase__):
A__ = [text] # add batch dimension (as the image processor always adds a batch dimension)
A__ = features['''words''']
A__ = self.tokenizer(
text=text if text is not None else features['''words'''] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['''boxes'''] , word_labels=UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , max_length=UpperCAmelCase__ , stride=UpperCAmelCase__ , pad_to_multiple_of=UpperCAmelCase__ , return_token_type_ids=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , return_overflowing_tokens=UpperCAmelCase__ , return_special_tokens_mask=UpperCAmelCase__ , return_offsets_mapping=UpperCAmelCase__ , return_length=UpperCAmelCase__ , verbose=UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , **UpperCAmelCase__ , )
# add pixel values
A__ = features.pop('''pixel_values''')
if return_overflowing_tokens is True:
A__ = self.get_overflowing_images(UpperCAmelCase__ , encoded_inputs['''overflow_to_sample_mapping'''])
A__ = images
return encoded_inputs
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Tuple) ->Union[str, Any]:
'''simple docstring'''
A__ = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx])
if len(UpperCAmelCase__) != len(UpperCAmelCase__):
raise ValueError(
'''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'''
f""" {len(UpperCAmelCase__)} and {len(UpperCAmelCase__)}""")
return images_with_overflow
def SCREAMING_SNAKE_CASE ( self : Tuple , *UpperCAmelCase__ : List[Any] , **UpperCAmelCase__ : Optional[int]) ->int:
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCAmelCase__ , **UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : str , *UpperCAmelCase__ : Tuple , **UpperCAmelCase__ : Dict) ->int:
'''simple docstring'''
return self.tokenizer.decode(*UpperCAmelCase__ , **UpperCAmelCase__)
@property
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->str:
'''simple docstring'''
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def SCREAMING_SNAKE_CASE ( self : int) ->Union[str, Any]:
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , UpperCAmelCase__ , )
return self.image_processor_class
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Any:
'''simple docstring'''
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , UpperCAmelCase__ , )
return self.image_processor
| 87 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class _a :
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=2 , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=10 , _UpperCAmelCase=3 , _UpperCAmelCase=32 * 8 , _UpperCAmelCase=32 * 8 , _UpperCAmelCase=4 , _UpperCAmelCase=64 , ) -> List[Any]:
UpperCamelCase_ = parent
UpperCamelCase_ = batch_size
UpperCamelCase_ = is_training
UpperCamelCase_ = use_auxiliary_loss
UpperCamelCase_ = num_queries
UpperCamelCase_ = num_channels
UpperCamelCase_ = min_size
UpperCamelCase_ = max_size
UpperCamelCase_ = num_labels
UpperCamelCase_ = hidden_dim
UpperCamelCase_ = hidden_dim
def _UpperCAmelCase ( self ) -> List[str]:
UpperCamelCase_ = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
_UpperCAmelCase )
UpperCamelCase_ = torch.ones([self.batch_size, self.min_size, self.max_size] , device=_UpperCAmelCase )
UpperCamelCase_ = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=_UpperCAmelCase ) > 0.5
).float()
UpperCamelCase_ = (torch.rand((self.batch_size, self.num_labels) , device=_UpperCAmelCase ) > 0.5).long()
UpperCamelCase_ = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def _UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase_ = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
UpperCamelCase_ = self.num_queries
UpperCamelCase_ = self.num_labels
UpperCamelCase_ = [1, 1, 1, 1]
UpperCamelCase_ = self.num_channels
UpperCamelCase_ = 64
UpperCamelCase_ = 128
UpperCamelCase_ = self.hidden_dim
UpperCamelCase_ = self.hidden_dim
UpperCamelCase_ = self.hidden_dim
return config
def _UpperCAmelCase ( self ) -> Dict:
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = self.prepare_config_and_inputs()
UpperCamelCase_ = {'pixel_values': pixel_values, 'pixel_mask': pixel_mask}
return config, inputs_dict
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> List[Any]:
UpperCamelCase_ = output.encoder_hidden_states
UpperCamelCase_ = output.pixel_decoder_hidden_states
UpperCamelCase_ = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(_UpperCAmelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_UpperCAmelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_UpperCAmelCase ) , config.decoder_layers )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False ) -> Any:
with torch.no_grad():
UpperCamelCase_ = MaskaFormerModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCamelCase_ = model(pixel_values=_UpperCAmelCase , pixel_mask=_UpperCAmelCase )
UpperCamelCase_ = model(_UpperCAmelCase , output_hidden_states=_UpperCAmelCase )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(_UpperCAmelCase , _UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Union[str, Any]:
UpperCamelCase_ = MaskaFormerForUniversalSegmentation(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
def comm_check_on_output(_UpperCAmelCase ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
UpperCamelCase_ = model(pixel_values=_UpperCAmelCase , pixel_mask=_UpperCAmelCase )
UpperCamelCase_ = model(_UpperCAmelCase )
comm_check_on_output(_UpperCAmelCase )
UpperCamelCase_ = model(
pixel_values=_UpperCAmelCase , pixel_mask=_UpperCAmelCase , mask_labels=_UpperCAmelCase , class_labels=_UpperCAmelCase )
comm_check_on_output(_UpperCAmelCase )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class _a ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
A_ = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
A_ = {"""feature-extraction""": MaskaFormerModel} if is_torch_available() else {}
A_ = False
A_ = False
A_ = False
A_ = False
def _UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase_ = MaskaFormerModelTester(self )
UpperCamelCase_ = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(_UpperCAmelCase , **_UpperCAmelCase , output_hidden_states=_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*_UpperCAmelCase )
@unittest.skip(reason='Mask2Former does not use inputs_embeds' )
def _UpperCAmelCase ( self ) -> Any:
pass
@unittest.skip(reason='Mask2Former does not have a get_input_embeddings method' )
def _UpperCAmelCase ( self ) -> Optional[int]:
pass
@unittest.skip(reason='Mask2Former is not a generative model' )
def _UpperCAmelCase ( self ) -> Any:
pass
@unittest.skip(reason='Mask2Former does not use token embeddings' )
def _UpperCAmelCase ( self ) -> Optional[Any]:
pass
@require_torch_multi_gpu
@unittest.skip(
reason='Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def _UpperCAmelCase ( self ) -> int:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def _UpperCAmelCase ( self ) -> str:
pass
def _UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ = model_class(_UpperCAmelCase )
UpperCamelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase_ = [*signature.parameters.keys()]
UpperCamelCase_ = ['pixel_values']
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
@slow
def _UpperCAmelCase ( self ) -> Tuple:
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
UpperCamelCase_ = MaskaFormerModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Dict:
UpperCamelCase_ = (self.model_tester.min_size,) * 2
UpperCamelCase_ = {
'pixel_values': torch.randn((2, 3, *size) , device=_UpperCAmelCase ),
'mask_labels': torch.randn((2, 10, *size) , device=_UpperCAmelCase ),
'class_labels': torch.zeros(2 , 10 , device=_UpperCAmelCase ).long(),
}
UpperCamelCase_ = self.model_tester.get_config()
UpperCamelCase_ = MaskaFormerForUniversalSegmentation(_UpperCAmelCase ).to(_UpperCAmelCase )
UpperCamelCase_ = model(**_UpperCAmelCase )
self.assertTrue(outputs.loss is not None )
def _UpperCAmelCase ( self ) -> str:
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(_UpperCAmelCase , **_UpperCAmelCase , output_hidden_states=_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ = model_class(_UpperCAmelCase ).to(_UpperCAmelCase )
UpperCamelCase_ = model(**_UpperCAmelCase , output_attentions=_UpperCAmelCase )
self.assertTrue(outputs.attentions is not None )
def _UpperCAmelCase ( self ) -> List[Any]:
if not self.model_tester.is_training:
return
UpperCamelCase_ = self.all_model_classes[1]
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
UpperCamelCase_ = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.train()
UpperCamelCase_ = model(_UpperCAmelCase , mask_labels=_UpperCAmelCase , class_labels=_UpperCAmelCase ).loss
loss.backward()
def _UpperCAmelCase ( self ) -> int:
UpperCamelCase_ = self.all_model_classes[1]
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
UpperCamelCase_ = True
UpperCamelCase_ = True
UpperCamelCase_ = model_class(_UpperCAmelCase ).to(_UpperCAmelCase )
model.train()
UpperCamelCase_ = model(_UpperCAmelCase , mask_labels=_UpperCAmelCase , class_labels=_UpperCAmelCase )
UpperCamelCase_ = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
UpperCamelCase_ = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
UpperCamelCase_ = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
UpperCamelCase_ = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=_UpperCAmelCase )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
snake_case__ : List[Any] = 1E-4
def _snake_case ():
UpperCamelCase_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
return image
@require_vision
@slow
class _a ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _UpperCAmelCase ( self ) -> Optional[int]:
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def _UpperCAmelCase ( self ) -> List[str]:
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def _UpperCAmelCase ( self ) -> str:
UpperCamelCase_ = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(_UpperCAmelCase )
UpperCamelCase_ = self.default_image_processor
UpperCamelCase_ = prepare_img()
UpperCamelCase_ = image_processor(_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase )
UpperCamelCase_ = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_UpperCAmelCase , (1, 3, 384, 384) )
with torch.no_grad():
UpperCamelCase_ = model(**_UpperCAmelCase )
UpperCamelCase_ = torch.tensor(
[[-0.2_7_9_0, -1.0_7_1_7, -1.1_6_6_8], [-0.5_1_2_8, -0.3_1_2_8, -0.4_9_8_7], [-0.5_8_3_2, 0.1_9_7_1, -0.0_1_9_7]] ).to(_UpperCAmelCase )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , _UpperCAmelCase , atol=_UpperCAmelCase ) )
UpperCamelCase_ = torch.tensor(
[[0.8_9_7_3, 1.1_8_4_7, 1.1_7_7_6], [1.1_9_3_4, 1.5_0_4_0, 1.5_1_2_8], [1.1_1_5_3, 1.4_4_8_6, 1.4_9_5_1]] ).to(_UpperCAmelCase )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , _UpperCAmelCase , atol=_UpperCAmelCase ) )
UpperCamelCase_ = torch.tensor(
[[2.1_1_5_2, 1.7_0_0_0, -0.8_6_0_3], [1.5_8_0_8, 1.8_0_0_4, -0.9_3_5_3], [1.6_0_4_3, 1.7_4_9_5, -0.5_9_9_9]] ).to(_UpperCAmelCase )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , _UpperCAmelCase , atol=_UpperCAmelCase ) )
def _UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase_ = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_UpperCAmelCase ).eval()
UpperCamelCase_ = self.default_image_processor
UpperCamelCase_ = prepare_img()
UpperCamelCase_ = image_processor(_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase )
UpperCamelCase_ = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_UpperCAmelCase , (1, 3, 384, 384) )
with torch.no_grad():
UpperCamelCase_ = model(**_UpperCAmelCase )
# masks_queries_logits
UpperCamelCase_ = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
UpperCamelCase_ = [
[-8.7_8_3_9, -9.0_0_5_6, -8.8_1_2_1],
[-7.4_1_0_4, -7.0_3_1_3, -6.5_4_0_1],
[-6.6_1_0_5, -6.3_4_2_7, -6.4_6_7_5],
]
UpperCamelCase_ = torch.tensor(_UpperCAmelCase ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _UpperCAmelCase , atol=_UpperCAmelCase ) )
# class_queries_logits
UpperCamelCase_ = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
UpperCamelCase_ = torch.tensor(
[
[1.8_3_2_4, -8.0_8_3_5, -4.1_9_2_2],
[0.8_4_5_0, -9.0_0_5_0, -3.6_0_5_3],
[0.3_0_4_5, -7.7_2_9_3, -3.0_2_7_5],
] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _UpperCAmelCase , atol=_UpperCAmelCase ) )
def _UpperCAmelCase ( self ) -> Dict:
UpperCamelCase_ = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_UpperCAmelCase ).eval()
UpperCamelCase_ = self.default_image_processor
UpperCamelCase_ = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors='pt' , )
UpperCamelCase_ = inputs['pixel_values'].to(_UpperCAmelCase )
UpperCamelCase_ = [el.to(_UpperCAmelCase ) for el in inputs['mask_labels']]
UpperCamelCase_ = [el.to(_UpperCAmelCase ) for el in inputs['class_labels']]
with torch.no_grad():
UpperCamelCase_ = model(**_UpperCAmelCase )
self.assertTrue(outputs.loss is not None )
| 23 | 0 |
import contextlib
import importlib
import io
import unittest
import transformers
# Try to import everything from transformers to ensure every object can be loaded.
from transformers import * # noqa F406
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch
from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available
if is_torch_available():
from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification
if is_tf_available():
from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification
if is_flax_available():
from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification
SCREAMING_SNAKE_CASE__ : int =DUMMY_UNKNOWN_IDENTIFIER
# An actual model hosted on huggingface.co
SCREAMING_SNAKE_CASE__ : int ='main'
# Default branch name
SCREAMING_SNAKE_CASE__ : List[str] ='f2c752cfc5c0ab6f4bdec59acea69eefbee381c2'
# One particular commit (not the top of `main`)
SCREAMING_SNAKE_CASE__ : Dict ='aaaaaaa'
# This commit does not exist, so we should 404.
SCREAMING_SNAKE_CASE__ : Optional[int] ='d9e9f15bc825e4b2c9249e9578f884bbcb5e3684'
# Sha-1 of config.json on the top of `main`, for checking purposes
SCREAMING_SNAKE_CASE__ : Any ='4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3'
@contextlib.contextmanager
def UpperCamelCase ( ) ->int:
print('''Welcome!''' )
yield
print('''Bye!''' )
@contextlib.contextmanager
def UpperCamelCase ( ) ->str:
print('''Bonjour!''' )
yield
print('''Au revoir!''' )
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def a__ ( self ) -> Tuple:
# If the spec is missing, importlib would not be able to import the module dynamically.
assert transformers.__spec__ is not None
assert importlib.util.find_spec('''transformers''' ) is not None
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@unittest.mock.patch('''sys.stdout''' , new_callable=io.StringIO )
def a__ ( self , _lowercase ) -> List[Any]:
with ContextManagers([] ):
print('''Transformers are awesome!''' )
# The print statement adds a new line at the end of the output
self.assertEqual(mock_stdout.getvalue() , '''Transformers are awesome!\n''' )
@unittest.mock.patch('''sys.stdout''' , new_callable=io.StringIO )
def a__ ( self , _lowercase ) -> List[str]:
with ContextManagers([context_en()] ):
print('''Transformers are awesome!''' )
# The output should be wrapped with an English welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , '''Welcome!\nTransformers are awesome!\nBye!\n''' )
@unittest.mock.patch('''sys.stdout''' , new_callable=io.StringIO )
def a__ ( self , _lowercase ) -> Optional[int]:
with ContextManagers([context_fr(), context_en()] ):
print('''Transformers are awesome!''' )
# The output should be wrapped with an English and French welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , '''Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n''' )
@require_torch
def a__ ( self ) -> List[str]:
self.assertEqual(find_labels(_lowercase ) , ['''labels'''] )
self.assertEqual(find_labels(_lowercase ) , ['''labels''', '''next_sentence_label'''] )
self.assertEqual(find_labels(_lowercase ) , ['''start_positions''', '''end_positions'''] )
class _UpperCAmelCase ( a_ ):
"""simple docstring"""
pass
self.assertEqual(find_labels(_lowercase ) , ['''labels'''] )
@require_tf
def a__ ( self ) -> str:
self.assertEqual(find_labels(_lowercase ) , ['''labels'''] )
self.assertEqual(find_labels(_lowercase ) , ['''labels''', '''next_sentence_label'''] )
self.assertEqual(find_labels(_lowercase ) , ['''start_positions''', '''end_positions'''] )
class _UpperCAmelCase ( a_ ):
"""simple docstring"""
pass
self.assertEqual(find_labels(_lowercase ) , ['''labels'''] )
@require_flax
def a__ ( self ) -> Tuple:
# Flax models don't have labels
self.assertEqual(find_labels(_lowercase ) , [] )
self.assertEqual(find_labels(_lowercase ) , [] )
self.assertEqual(find_labels(_lowercase ) , [] )
class _UpperCAmelCase ( a_ ):
"""simple docstring"""
pass
self.assertEqual(find_labels(_lowercase ) , [] )
| 704 | """simple docstring"""
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
__snake_case = 42
__snake_case = None
# Automatically constructed
__snake_case = "dict"
__snake_case = None
__snake_case = field(default="""Translation""" , init=a_ , repr=a_ )
def __call__( self ) -> Dict:
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def a__ ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
return {k: Value('''string''' ) for k in sorted(self.languages )}
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
__snake_case = None
__snake_case = None
__snake_case = None
# Automatically constructed
__snake_case = "dict"
__snake_case = None
__snake_case = field(default="""TranslationVariableLanguages""" , init=a_ , repr=a_ )
def a__ ( self ) -> Any:
_lowerCamelCase : Optional[Any] = sorted(set(self.languages ) ) if self.languages else None
_lowerCamelCase : List[Any] = len(self.languages ) if self.languages else None
def __call__( self ) -> Tuple:
return pa.struct({'''language''': pa.list_(pa.string() ), '''translation''': pa.list_(pa.string() )} )
def a__ ( self , _lowercase ) -> Tuple:
_lowerCamelCase : int = set(self.languages )
if self.languages and set(_lowercase ) - lang_set:
raise ValueError(
F'''Some languages in example ({", ".join(sorted(set(_lowercase ) - lang_set ) )}) are not in valid set ({", ".join(_lowercase )}).''' )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
_lowerCamelCase : Optional[Any] = []
for lang, text in translation_dict.items():
if isinstance(_lowercase , _lowercase ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
_lowerCamelCase, _lowerCamelCase : int = zip(*sorted(_lowercase ) )
return {"language": languages, "translation": translations}
def a__ ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Sequence, Value
return {
"language": Sequence(Value('''string''' ) ),
"translation": Sequence(Value('''string''' ) ),
}
| 558 | 0 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Dict = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "ctc_proj",
"mask_emb": "masked_spec_embed",
}
SCREAMING_SNAKE_CASE : List[str] = [
"ctc_proj",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def _lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[str] ):
"""simple docstring"""
for attribute in key.split(""".""" ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
a_ : Optional[Any] = """lm_head"""
a_ : Any = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if weight_type is not None:
a_ : Union[str, Any] = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).shape
else:
a_ : List[Any] = hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
a_ : Tuple = value
elif weight_type == "weight_g":
a_ : str = value
elif weight_type == "weight_v":
a_ : List[Any] = value
elif weight_type == "bias":
a_ : Optional[int] = value
else:
a_ : Any = value
logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def _lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Tuple ):
"""simple docstring"""
a_ : str = []
a_ : Dict = fairseq_model.state_dict()
a_ : List[str] = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
a_ : str = False
if "conv_layers" in name:
load_conv_layer(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , hf_model.config.feat_extract_norm == """group""" , )
a_ : int = True
else:
for key, mapped_key in MAPPING.items():
a_ : Any = """unispeech.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
a_ : int = True
if "*" in mapped_key:
a_ : Tuple = name.split(SCREAMING_SNAKE_CASE_ )[0].split(""".""" )[-2]
a_ : Union[str, Any] = mapped_key.replace("""*""" , SCREAMING_SNAKE_CASE_ )
if "weight_g" in name:
a_ : str = """weight_g"""
elif "weight_v" in name:
a_ : Tuple = """weight_v"""
elif "bias" in name:
a_ : int = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
a_ : Optional[int] = """weight"""
else:
a_ : Optional[Any] = None
set_recursively(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
continue
if not is_used:
unused_weights.append(SCREAMING_SNAKE_CASE_ )
logger.warning(F"""Unused weights: {unused_weights}""" )
def _lowerCamelCase ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Dict ):
"""simple docstring"""
a_ : Optional[int] = full_name.split("""conv_layers.""" )[-1]
a_ : List[str] = name.split(""".""" )
a_ : List[Any] = int(items[0] )
a_ : Any = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
a_ : Dict = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
a_ : Optional[Any] = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
a_ : Dict = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
a_ : Dict = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(SCREAMING_SNAKE_CASE_ )
@torch.no_grad()
def _lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Tuple=None , SCREAMING_SNAKE_CASE_ : str=None , SCREAMING_SNAKE_CASE_ : int=True ):
"""simple docstring"""
if config_path is not None:
a_ : List[str] = UniSpeechConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
else:
a_ : Tuple = UniSpeechConfig()
if is_finetuned:
if dict_path:
a_ : List[str] = Dictionary.load_from_json(SCREAMING_SNAKE_CASE_ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
a_ : int = target_dict.pad_index
a_ : Any = target_dict.bos_index
a_ : Tuple = target_dict.eos_index
a_ : Tuple = len(target_dict.symbols )
a_ : List[str] = os.path.join(SCREAMING_SNAKE_CASE_ , """vocab.json""" )
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(SCREAMING_SNAKE_CASE_ ) )
return
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
a_ : Dict = target_dict.indices
# fairseq has the <pad> and <s> switched
a_ : Tuple = 42
a_ : Optional[int] = 43
with open(SCREAMING_SNAKE_CASE_ , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
a_ : Union[str, Any] = WavaVecaPhonemeCTCTokenizer(
SCREAMING_SNAKE_CASE_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=SCREAMING_SNAKE_CASE_ , )
a_ : Union[str, Any] = True if config.feat_extract_norm == """layer""" else False
a_ : List[str] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , )
a_ : str = WavaVecaProcessor(feature_extractor=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ )
processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
a_ : Union[str, Any] = UniSpeechForCTC(SCREAMING_SNAKE_CASE_ )
else:
a_ : Optional[int] = UniSpeechForPreTraining(SCREAMING_SNAKE_CASE_ )
if is_finetuned:
a_ , a_ , a_ : Any = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] ), """w2v_path""": checkpoint_path} )
else:
a_ , a_ , a_ : Union[str, Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
a_ : Any = model[0].eval()
recursively_load_weights(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
hf_unispeech.save_pretrained(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
SCREAMING_SNAKE_CASE : Dict = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 419 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : List[Any] = "▁"
SCREAMING_SNAKE_CASE : str = {"vocab_file": "sentencepiece.bpe.model"}
SCREAMING_SNAKE_CASE : str = {
"vocab_file": {
"xlm-roberta-base": "https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model",
"xlm-roberta-large": "https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model",
"xlm-roberta-large-finetuned-conll02-dutch": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll02-spanish": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll03-english": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll03-german": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model"
),
}
}
SCREAMING_SNAKE_CASE : Dict = {
"xlm-roberta-base": 5_12,
"xlm-roberta-large": 5_12,
"xlm-roberta-large-finetuned-conll02-dutch": 5_12,
"xlm-roberta-large-finetuned-conll02-spanish": 5_12,
"xlm-roberta-large-finetuned-conll03-english": 5_12,
"xlm-roberta-large-finetuned-conll03-german": 5_12,
}
class snake_case__ ( __A ):
UpperCAmelCase : Dict = VOCAB_FILES_NAMES
UpperCAmelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase : Dict = ["""input_ids""", """attention_mask"""]
def __init__( self , UpperCamelCase_ , UpperCamelCase_="<s>" , UpperCamelCase_="</s>" , UpperCamelCase_="</s>" , UpperCamelCase_="<s>" , UpperCamelCase_="<unk>" , UpperCamelCase_="<pad>" , UpperCamelCase_="<mask>" , UpperCamelCase_ = None , **UpperCamelCase_ , ) -> None:
"""simple docstring"""
a_ : int = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token
a_ : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase_ , )
a_ : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(UpperCamelCase_ ) )
a_ : Optional[Any] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
a_ : List[str] = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
a_ : Optional[int] = 1
a_ : int = len(self.sp_model ) + self.fairseq_offset
a_ : Union[str, Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> List[Any]:
"""simple docstring"""
a_ : Union[str, Any] = self.__dict__.copy()
a_ : List[Any] = None
a_ : Tuple = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , UpperCamelCase_ ) -> str:
"""simple docstring"""
a_ : List[Any] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
a_ : int = {}
a_ : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def A ( self , UpperCamelCase_ , UpperCamelCase_ = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
a_ : Optional[int] = [self.cls_token_id]
a_ : List[str] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def A ( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase_ )) + [1]
return [1] + ([0] * len(UpperCamelCase_ )) + [1, 1] + ([0] * len(UpperCamelCase_ )) + [1]
def A ( self , UpperCamelCase_ , UpperCamelCase_ = None ) -> List[int]:
"""simple docstring"""
a_ : Optional[int] = [self.sep_token_id]
a_ : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def A ( self ) -> Optional[int]:
"""simple docstring"""
return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token
def A ( self ) -> Dict:
"""simple docstring"""
a_ : Any = {self.convert_ids_to_tokens(UpperCamelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def A ( self , UpperCamelCase_ ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(UpperCamelCase_ , out_type=UpperCamelCase_ )
def A ( self , UpperCamelCase_ ) -> Optional[Any]:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
a_ : Dict = self.sp_model.PieceToId(UpperCamelCase_ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def A ( self , UpperCamelCase_ ) -> Tuple:
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def A ( self , UpperCamelCase_ ) -> Optional[Any]:
"""simple docstring"""
a_ : Dict = """""".join(UpperCamelCase_ ).replace(UpperCamelCase_ , """ """ ).strip()
return out_string
def A ( self , UpperCamelCase_ , UpperCamelCase_ = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(UpperCamelCase_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
a_ : List[Any] = os.path.join(
UpperCamelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase_ , """wb""" ) as fi:
a_ : str = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase_ )
return (out_vocab_file,)
| 419 | 1 |
import argparse
import tensorflow as tf
import torch
from transformers import BertConfig, BertForMaskedLM
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertPooler,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
logging.set_verbosity_info()
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ):
def get_masked_lm_array(SCREAMING_SNAKE_CASE__ : str ):
__UpperCamelCase =F'masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE'
__UpperCamelCase =tf.train.load_variable(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if "kernel" in name:
__UpperCamelCase =array.transpose()
return torch.from_numpy(SCREAMING_SNAKE_CASE__ )
def get_encoder_array(SCREAMING_SNAKE_CASE__ : str ):
__UpperCamelCase =F'encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE'
__UpperCamelCase =tf.train.load_variable(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if "kernel" in name:
__UpperCamelCase =array.transpose()
return torch.from_numpy(SCREAMING_SNAKE_CASE__ )
def get_encoder_layer_array(SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str ):
__UpperCamelCase =F'encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE'
__UpperCamelCase =tf.train.load_variable(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if "kernel" in name:
__UpperCamelCase =array.transpose()
return torch.from_numpy(SCREAMING_SNAKE_CASE__ )
def get_encoder_attention_layer_array(SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[str] ):
__UpperCamelCase =F'encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE'
__UpperCamelCase =tf.train.load_variable(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =array.reshape(SCREAMING_SNAKE_CASE__ )
if "kernel" in name:
__UpperCamelCase =array.transpose()
return torch.from_numpy(SCREAMING_SNAKE_CASE__ )
print(F'Loading model based on config from {config_path}...' )
__UpperCamelCase =BertConfig.from_json_file(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =BertForMaskedLM(SCREAMING_SNAKE_CASE__ )
# Layers
for layer_index in range(0 , config.num_hidden_layers ):
__UpperCamelCase =model.bert.encoder.layer[layer_index]
# Self-attention
__UpperCamelCase =layer.attention.self
__UpperCamelCase =get_encoder_attention_layer_array(
SCREAMING_SNAKE_CASE__ , '_query_dense/kernel' , self_attn.query.weight.data.shape )
__UpperCamelCase =get_encoder_attention_layer_array(
SCREAMING_SNAKE_CASE__ , '_query_dense/bias' , self_attn.query.bias.data.shape )
__UpperCamelCase =get_encoder_attention_layer_array(
SCREAMING_SNAKE_CASE__ , '_key_dense/kernel' , self_attn.key.weight.data.shape )
__UpperCamelCase =get_encoder_attention_layer_array(
SCREAMING_SNAKE_CASE__ , '_key_dense/bias' , self_attn.key.bias.data.shape )
__UpperCamelCase =get_encoder_attention_layer_array(
SCREAMING_SNAKE_CASE__ , '_value_dense/kernel' , self_attn.value.weight.data.shape )
__UpperCamelCase =get_encoder_attention_layer_array(
SCREAMING_SNAKE_CASE__ , '_value_dense/bias' , self_attn.value.bias.data.shape )
# Self-attention Output
__UpperCamelCase =layer.attention.output
__UpperCamelCase =get_encoder_attention_layer_array(
SCREAMING_SNAKE_CASE__ , '_output_dense/kernel' , self_output.dense.weight.data.shape )
__UpperCamelCase =get_encoder_attention_layer_array(
SCREAMING_SNAKE_CASE__ , '_output_dense/bias' , self_output.dense.bias.data.shape )
__UpperCamelCase =get_encoder_layer_array(SCREAMING_SNAKE_CASE__ , '_attention_layer_norm/gamma' )
__UpperCamelCase =get_encoder_layer_array(SCREAMING_SNAKE_CASE__ , '_attention_layer_norm/beta' )
# Intermediate
__UpperCamelCase =layer.intermediate
__UpperCamelCase =get_encoder_layer_array(SCREAMING_SNAKE_CASE__ , '_intermediate_dense/kernel' )
__UpperCamelCase =get_encoder_layer_array(SCREAMING_SNAKE_CASE__ , '_intermediate_dense/bias' )
# Output
__UpperCamelCase =layer.output
__UpperCamelCase =get_encoder_layer_array(SCREAMING_SNAKE_CASE__ , '_output_dense/kernel' )
__UpperCamelCase =get_encoder_layer_array(SCREAMING_SNAKE_CASE__ , '_output_dense/bias' )
__UpperCamelCase =get_encoder_layer_array(SCREAMING_SNAKE_CASE__ , '_output_layer_norm/gamma' )
__UpperCamelCase =get_encoder_layer_array(SCREAMING_SNAKE_CASE__ , '_output_layer_norm/beta' )
# Embeddings
__UpperCamelCase =get_encoder_array('_position_embedding_layer/embeddings' )
__UpperCamelCase =get_encoder_array('_type_embedding_layer/embeddings' )
__UpperCamelCase =get_encoder_array('_embedding_norm_layer/gamma' )
__UpperCamelCase =get_encoder_array('_embedding_norm_layer/beta' )
# LM Head
__UpperCamelCase =model.cls.predictions.transform
__UpperCamelCase =get_masked_lm_array('dense/kernel' )
__UpperCamelCase =get_masked_lm_array('dense/bias' )
__UpperCamelCase =get_masked_lm_array('layer_norm/gamma' )
__UpperCamelCase =get_masked_lm_array('layer_norm/beta' )
__UpperCamelCase =get_masked_lm_array('embedding_table' )
# Pooling
__UpperCamelCase =BertPooler(config=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =get_encoder_array('_pooler_layer/kernel' )
__UpperCamelCase =get_encoder_array('_pooler_layer/bias' )
# Export final model
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
# Integration test - should load without any errors ;)
__UpperCamelCase =BertForMaskedLM.from_pretrained(SCREAMING_SNAKE_CASE__ )
print(new_model.eval() )
print('Model conversion was done sucessfully!' )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument(
'--tf_checkpoint_path', type=str, required=True, help='Path to the TensorFlow Token Dropping checkpoint path.'
)
parser.add_argument(
'--bert_config_file',
type=str,
required=True,
help='The config json file corresponding to the BERT model. This specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path',
type=str,
required=True,
help='Path to the output PyTorch model.',
)
_A = parser.parse_args()
convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 682 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ , ) -> List[str]:
__UpperCamelCase =parent
__UpperCamelCase =13
__UpperCamelCase =7
__UpperCamelCase =True
__UpperCamelCase =True
__UpperCamelCase =True
__UpperCamelCase =True
__UpperCamelCase =True
__UpperCamelCase =False
__UpperCamelCase =False
__UpperCamelCase =False
__UpperCamelCase =2
__UpperCamelCase =99
__UpperCamelCase =0
__UpperCamelCase =32
__UpperCamelCase =2
__UpperCamelCase =4
__UpperCamelCase =0.1
__UpperCamelCase =0.1
__UpperCamelCase =512
__UpperCamelCase =16
__UpperCamelCase =2
__UpperCamelCase =0.02
__UpperCamelCase =3
__UpperCamelCase =4
__UpperCamelCase ='last'
__UpperCamelCase =True
__UpperCamelCase =None
__UpperCamelCase =0
def _a ( self ) -> List[Any]:
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase =random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa )
__UpperCamelCase =None
if self.use_input_lengths:
__UpperCamelCase =(
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
__UpperCamelCase =None
if self.use_token_type_ids:
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
__UpperCamelCase =None
__UpperCamelCase =None
__UpperCamelCase =None
if self.use_labels:
__UpperCamelCase =ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCamelCase =ids_tensor([self.batch_size] , 2 , dtype=tf.floataa )
__UpperCamelCase =ids_tensor([self.batch_size] , self.num_choices )
__UpperCamelCase =FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , )
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Any:
__UpperCamelCase =TFFlaubertModel(config=A_ )
__UpperCamelCase ={'input_ids': input_ids, 'lengths': input_lengths, 'langs': token_type_ids}
__UpperCamelCase =model(A_ )
__UpperCamelCase =[input_ids, input_mask]
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Optional[Any]:
__UpperCamelCase =TFFlaubertWithLMHeadModel(A_ )
__UpperCamelCase ={'input_ids': input_ids, 'lengths': input_lengths, 'langs': token_type_ids}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Optional[Any]:
__UpperCamelCase =TFFlaubertForQuestionAnsweringSimple(A_ )
__UpperCamelCase ={'input_ids': input_ids, 'lengths': input_lengths}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Optional[int]:
__UpperCamelCase =TFFlaubertForSequenceClassification(A_ )
__UpperCamelCase ={'input_ids': input_ids, 'lengths': input_lengths}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Optional[int]:
__UpperCamelCase =self.num_labels
__UpperCamelCase =TFFlaubertForTokenClassification(config=A_ )
__UpperCamelCase ={'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Optional[int]:
__UpperCamelCase =self.num_choices
__UpperCamelCase =TFFlaubertForMultipleChoice(config=A_ )
__UpperCamelCase =tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase =tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase =tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase ={
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _a ( self ) -> List[Any]:
__UpperCamelCase =self.prepare_config_and_inputs()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) =config_and_inputs
__UpperCamelCase ={
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'langs': token_type_ids,
'lengths': input_lengths,
}
return config, inputs_dict
@require_tf
class UpperCAmelCase__ ( A_ , A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
UpperCAmelCase__ : Optional[int] = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
UpperCAmelCase__ : Any = (
{
"feature-extraction": TFFlaubertModel,
"fill-mask": TFFlaubertWithLMHeadModel,
"question-answering": TFFlaubertForQuestionAnsweringSimple,
"text-classification": TFFlaubertForSequenceClassification,
"token-classification": TFFlaubertForTokenClassification,
"zero-shot": TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCAmelCase__ : Optional[int] = False
UpperCAmelCase__ : Optional[int] = False
def _a ( self , A_ , A_ , A_ , A_ , A_ ) -> List[str]:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _a ( self ) -> Dict:
__UpperCamelCase =TFFlaubertModelTester(self )
__UpperCamelCase =ConfigTester(self , config_class=A_ , emb_dim=37 )
def _a ( self ) -> Dict:
self.config_tester.run_common_tests()
def _a ( self ) -> List[Any]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*A_ )
def _a ( self ) -> str:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*A_ )
def _a ( self ) -> Any:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*A_ )
def _a ( self ) -> List[str]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*A_ )
def _a ( self ) -> int:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*A_ )
def _a ( self ) -> int:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*A_ )
@slow
def _a ( self ) -> Optional[int]:
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase =TFFlaubertModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
@require_tf
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _a ( self ) -> int:
__UpperCamelCase =TFFlaubertModel.from_pretrained('jplu/tf-flaubert-small-cased' )
__UpperCamelCase =tf.convert_to_tensor(
[[0, 158, 735, 2592, 1424, 6727, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !"
__UpperCamelCase =model(A_ )[0]
__UpperCamelCase =tf.TensorShape((1, 8, 512) )
self.assertEqual(output.shape , A_ )
# compare the actual values for a slice.
__UpperCamelCase =tf.convert_to_tensor(
[
[
[-1.876_8773, -1.56_6555, 0.2707_2418],
[-1.692_0038, -0.587_3505, 1.932_9599],
[-2.956_3985, -1.699_3835, 1.797_2052],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 682 | 1 |
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
SCREAMING_SNAKE_CASE__ : List[str] = Path(__file__).resolve().parents[3] / """src"""
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(4_2)
SCREAMING_SNAKE_CASE__ : Optional[Any] = {"""base""": """patrickvonplaten/wav2vec2_tiny_random""", """robust""": """patrickvonplaten/wav2vec2_tiny_random_robust"""}
SCREAMING_SNAKE_CASE__ : Any = """zero2"""
SCREAMING_SNAKE_CASE__ : Dict = """zero3"""
SCREAMING_SNAKE_CASE__ : str = [ZEROa, ZEROa]
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
# customize the test name generator function as we want both params to appear in the sub-test
# name, as by default it shows only the first param
a__ : Optional[Any] = parameterized.to_safe_name("_".join(str(lowerCamelCase ) for x in param.args ) )
return F"""{func.__name__}_{param_based_name}"""
# Cartesian-product of zero stages with models to test
SCREAMING_SNAKE_CASE__ : Union[str, Any] = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class __lowerCAmelCase ( _UpperCamelCase ):
@parameterized.expand(snake_case , name_func=snake_case )
def _snake_case ( self , snake_case , snake_case ) -> Dict:
"""simple docstring"""
self.run_and_check(
stage=snake_case , model=snake_case , distributed=snake_case , fpaa=snake_case , )
@require_torch_multi_gpu
@parameterized.expand(snake_case , name_func=snake_case )
def _snake_case ( self , snake_case , snake_case ) -> str:
"""simple docstring"""
self.run_and_check(
stage=snake_case , model=snake_case , distributed=snake_case , fpaa=snake_case , )
@parameterized.expand(snake_case , name_func=snake_case )
def _snake_case ( self , snake_case , snake_case ) -> List[str]:
"""simple docstring"""
self.run_and_check(
stage=snake_case , model=snake_case , distributed=snake_case , fpaa=snake_case , )
@require_torch_multi_gpu
@parameterized.expand(snake_case , name_func=snake_case )
def _snake_case ( self , snake_case , snake_case ) -> List[str]:
"""simple docstring"""
self.run_and_check(
stage=snake_case , model=snake_case , distributed=snake_case , fpaa=snake_case , )
def _snake_case ( self , snake_case ) -> str:
"""simple docstring"""
pass
def _snake_case ( self , snake_case , snake_case , snake_case = 10 , snake_case = True , snake_case = True , snake_case = True , ) -> str:
"""simple docstring"""
a__ : Tuple = models[model]
a__ : int = self.run_trainer(
stage=snake_case , model_name=snake_case , eval_steps=snake_case , num_train_epochs=1 , distributed=snake_case , fpaa=snake_case , )
self.do_checks(snake_case )
return output_dir
def _snake_case ( self , snake_case , snake_case , snake_case = 10 , snake_case = 1 , snake_case = True , snake_case = True , ) -> Optional[Any]:
"""simple docstring"""
a__ : str = self.get_auto_remove_tmp_dir("./xxx" , after=snake_case )
a__ : List[Any] = F"""
--model_name_or_path {model_name}
--dataset_name hf-internal-testing/librispeech_asr_dummy
--dataset_config_name clean
--train_split_name validation
--validation_split_name validation
--output_dir {output_dir}
--num_train_epochs {str(snake_case )}
--per_device_train_batch_size 2
--per_device_eval_batch_size 2
--evaluation_strategy steps
--learning_rate 5e-4
--warmup_steps 8
--orthography timit
--preprocessing_num_workers 1
--group_by_length
--freeze_feature_extractor
--report_to none
--save_steps 0
--eval_steps {eval_steps}
--report_to none
""".split()
if fpaa:
args.extend(["--fp16"] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
a__ : str = F"""--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json""".split()
a__ : Dict = [F"""{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py"""]
a__ : Optional[int] = self.get_launcher(snake_case )
a__ : Optional[int] = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(snake_case , env=self.get_env() )
return output_dir
def _snake_case ( self , snake_case=False ) -> List[Any]:
"""simple docstring"""
a__ : Union[str, Any] = min(2 , get_gpu_count() ) if distributed else 1
return F"""deepspeed --num_nodes 1 --num_gpus {num_gpus}""".split()
| 112 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE__ : Tuple = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : int = """▁"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {"""vocab_file""": """spiece.model"""}
SCREAMING_SNAKE_CASE__ : Optional[int] = {
"""vocab_file""": {
"""google/reformer-crime-and-punishment""": (
"""https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model"""
)
}
}
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
"""google/reformer-crime-and-punishment""": 5_2_4_2_8_8,
}
class __lowerCAmelCase ( _UpperCamelCase ):
_UpperCamelCase : List[Any] = VOCAB_FILES_NAMES
_UpperCamelCase : Any = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : Tuple = ["""input_ids""", """attention_mask"""]
def __init__( self , snake_case , snake_case="</s>" , snake_case="<unk>" , snake_case=[] , snake_case = None , **snake_case , ) -> None:
"""simple docstring"""
a__ : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=snake_case , unk_token=snake_case , additional_special_tokens=snake_case , sp_model_kwargs=self.sp_model_kwargs , **snake_case , )
a__ : Union[str, Any] = vocab_file
a__ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(snake_case )
@property
def _snake_case ( self ) -> str:
"""simple docstring"""
return self.sp_model.get_piece_size()
def _snake_case ( self ) -> Dict[str, int]:
"""simple docstring"""
a__ : Dict = {self.convert_ids_to_tokens(snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Any:
"""simple docstring"""
a__ : Tuple = self.__dict__.copy()
a__ : Optional[int] = None
return state
def __setstate__( self , snake_case ) -> Union[str, Any]:
"""simple docstring"""
a__ : List[Any] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
a__ : List[Any] = {}
a__ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _snake_case ( self , snake_case ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(snake_case , out_type=snake_case )
def _snake_case ( self , snake_case ) -> Union[str, Any]:
"""simple docstring"""
return self.sp_model.piece_to_id(snake_case )
def _snake_case ( self , snake_case ) -> Any:
"""simple docstring"""
if index < self.sp_model.get_piece_size():
a__ : Optional[Any] = self.sp_model.IdToPiece(snake_case )
return token
def _snake_case ( self , snake_case ) -> Any:
"""simple docstring"""
a__ : int = []
a__ : Dict = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(snake_case ) + token
a__ : Dict = []
else:
current_sub_tokens.append(snake_case )
out_string += self.sp_model.decode(snake_case )
return out_string.strip()
def _snake_case ( self , snake_case , snake_case = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(snake_case ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
a__ : Tuple = os.path.join(
snake_case , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case , "wb" ) as fi:
a__ : str = self.sp_model.serialized_model_proto()
fi.write(snake_case )
return (out_vocab_file,)
| 112 | 1 |
"""simple docstring"""
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ''''''
_lowerCamelCase = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
_lowerCamelCase = None # compression type in fsspec. ex: "gzip"
_lowerCamelCase = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self , _lowercase = "" , _lowercase = None , _lowercase = None , **_lowercase ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(self , **_lowercase )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
snake_case_ : Dict = fsspec.open(
_lowercase , mode="""rb""" , protocol=_lowercase , compression=self.compression , client_kwargs={
"""requote_redirect_url""": False, # see https://github.com/huggingface/datasets/pull/5459
"""trust_env""": True, # Enable reading proxy env variables.
**(target_options or {}).pop("""client_kwargs""" , {} ), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
snake_case_ : List[str] = os.path.basename(self.file.path.split("""::""" )[0] )
snake_case_ : str = (
self.compressed_name[: self.compressed_name.rindex(""".""" )]
if """.""" in self.compressed_name
else self.compressed_name
)
snake_case_ : Any = None
@classmethod
def UpperCAmelCase__ ( cls , _lowercase ) -> List[Any]:
'''simple docstring'''
return super()._strip_protocol(_lowercase ).lstrip("""/""" )
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
if self.dir_cache is None:
snake_case_ : Any = {**self.file.fs.info(self.file.path ), """name""": self.uncompressed_name}
snake_case_ : Dict = {f["""name"""]: f}
def UpperCAmelCase__ ( self , _lowercase ) -> Tuple:
'''simple docstring'''
return self.file.open().read()
def UpperCAmelCase__ ( self , _lowercase , _lowercase = "rb" , _lowercase=None , _lowercase=True , _lowercase=None , **_lowercase , ) -> Any:
'''simple docstring'''
snake_case_ : str = self._strip_protocol(_lowercase )
if mode != "rb":
raise ValueError(f'Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'' )
return self.file.open()
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = '''bz2'''
_lowerCamelCase = '''bz2'''
_lowerCamelCase = '''.bz2'''
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = '''gzip'''
_lowerCamelCase = '''gzip'''
_lowerCamelCase = '''.gz'''
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = '''lz4'''
_lowerCamelCase = '''lz4'''
_lowerCamelCase = '''.lz4'''
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = '''xz'''
_lowerCamelCase = '''xz'''
_lowerCamelCase = '''.xz'''
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = '''zstd'''
_lowerCamelCase = '''zstd'''
_lowerCamelCase = '''.zst'''
def __init__( self , _lowercase , _lowercase = "rb" , _lowercase = None , _lowercase = None , _lowercase = DEFAULT_BLOCK_SIZE , **_lowercase , ) -> str:
'''simple docstring'''
super().__init__(
fo=_lowercase , mode=_lowercase , target_protocol=_lowercase , target_options=_lowercase , block_size=_lowercase , **_lowercase , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
snake_case_ : Union[str, Any] = self.file.__enter__
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , _lowercase ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Optional[Any] = file_
def __enter__( self ) -> Optional[int]:
'''simple docstring'''
self._file.__enter__()
return self
def __exit__( self , *_lowercase , **_lowercase ) -> List[Any]:
'''simple docstring'''
self._file.__exit__(*_lowercase , **_lowercase )
def __iter__( self ) -> Dict:
'''simple docstring'''
return iter(self._file )
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
return next(self._file )
def __getattr__( self , _lowercase ) -> Any:
'''simple docstring'''
return getattr(self._file , _lowercase )
def fixed_enter(*_lowercase , **_lowercase ):
return WrappedFile(_enter(*_lowercase , **_lowercase ) )
snake_case_ : List[str] = fixed_enter
| 21 |
"""simple docstring"""
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
__lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
def __lowerCAmelCase ( __UpperCamelCase : Tuple , __UpperCamelCase : Optional[Any] ):
'''simple docstring'''
snake_case_ : Union[str, Any] = nn.functional.normalize(__UpperCamelCase )
snake_case_ : Tuple = nn.functional.normalize(__UpperCamelCase )
return torch.mm(__UpperCamelCase , normalized_text_embeds.t() )
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = CLIPConfig
_lowerCamelCase = ['''CLIPEncoderLayer''']
def __init__( self , _lowercase ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(_lowercase )
snake_case_ : Tuple = CLIPVisionModel(config.vision_config )
snake_case_ : int = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=_lowercase )
snake_case_ : Optional[Any] = nn.Parameter(torch.ones(1_7 , config.projection_dim ) , requires_grad=_lowercase )
snake_case_ : Dict = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=_lowercase )
snake_case_ : Any = nn.Parameter(torch.ones(1_7 ) , requires_grad=_lowercase )
snake_case_ : List[str] = nn.Parameter(torch.ones(3 ) , requires_grad=_lowercase )
@torch.no_grad()
def UpperCAmelCase__ ( self , _lowercase , _lowercase ) -> Any:
'''simple docstring'''
snake_case_ : int = self.vision_model(_lowercase )[1] # pooled_output
snake_case_ : str = self.visual_projection(_lowercase )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
snake_case_ : Dict = cosine_distance(_lowercase , self.special_care_embeds ).cpu().float().numpy()
snake_case_ : List[str] = cosine_distance(_lowercase , self.concept_embeds ).cpu().float().numpy()
snake_case_ : Any = []
snake_case_ : Any = image_embeds.shape[0]
for i in range(_lowercase ):
snake_case_ : List[Any] = {"""special_scores""": {}, """special_care""": [], """concept_scores""": {}, """bad_concepts""": []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
snake_case_ : int = 0.0
for concept_idx in range(len(special_cos_dist[0] ) ):
snake_case_ : List[str] = special_cos_dist[i][concept_idx]
snake_case_ : Union[str, Any] = self.special_care_embeds_weights[concept_idx].item()
snake_case_ : Tuple = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img["""special_scores"""][concept_idx]} )
snake_case_ : Dict = 0.01
for concept_idx in range(len(cos_dist[0] ) ):
snake_case_ : int = cos_dist[i][concept_idx]
snake_case_ : List[Any] = self.concept_embeds_weights[concept_idx].item()
snake_case_ : List[str] = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(_lowercase )
result.append(_lowercase )
snake_case_ : Union[str, Any] = [len(res["""bad_concepts"""] ) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def UpperCAmelCase__ ( self , _lowercase , _lowercase ) -> Tuple:
'''simple docstring'''
snake_case_ : Optional[Any] = self.vision_model(_lowercase )[1] # pooled_output
snake_case_ : List[str] = self.visual_projection(_lowercase )
snake_case_ : str = cosine_distance(_lowercase , self.special_care_embeds )
snake_case_ : Optional[int] = cosine_distance(_lowercase , self.concept_embeds )
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
snake_case_ : Tuple = 0.0
snake_case_ : List[Any] = special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
snake_case_ : str = torch.any(special_scores > 0 , dim=1 )
snake_case_ : List[str] = special_care * 0.01
snake_case_ : Optional[int] = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] )
snake_case_ : Optional[Any] = (cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
snake_case_ : str = torch.any(concept_scores > 0 , dim=1 )
return images, has_nsfw_concepts
| 21 | 1 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.