code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : Optional[int] = logging.get_logger(__name__)
a : Dict = {
"""google/fnet-base""": """https://huggingface.co/google/fnet-base/resolve/main/config.json""",
"""google/fnet-large""": """https://huggingface.co/google/fnet-large/resolve/main/config.json"""
# See all FNet models at https://huggingface.co/models?filter=fnet
}
class UpperCamelCase_ ( __lowerCamelCase ):
lowercase = """fnet"""
def __init__( self , A=32000 , A=768 , A=12 , A=3072 , A="gelu_new" , A=0.1 , A=512 , A=4 , A=0.0_2 , A=1e-12 , A=False , A=512 , A=3 , A=1 , A=2 , **A , ) -> Any:
super().__init__(pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase )
UpperCAmelCase : Union[str, Any] = vocab_size
UpperCAmelCase : Any = max_position_embeddings
UpperCAmelCase : List[Any] = hidden_size
UpperCAmelCase : Optional[Any] = num_hidden_layers
UpperCAmelCase : int = intermediate_size
UpperCAmelCase : str = hidden_act
UpperCAmelCase : Optional[Any] = hidden_dropout_prob
UpperCAmelCase : str = initializer_range
UpperCAmelCase : str = type_vocab_size
UpperCAmelCase : Tuple = layer_norm_eps
UpperCAmelCase : str = use_tpu_fourier_optimizations
UpperCAmelCase : Dict = tpu_short_seq_length
| 265 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
A__ = abspath(join(dirname(dirname(dirname(__file__))), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def _lowerCAmelCase ( __lowerCAmelCase ) -> str:
"""simple docstring"""
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__lowerCAmelCase )
def _lowerCAmelCase ( __lowerCAmelCase ) -> List[Any]:
"""simple docstring"""
from transformers.testing_utils import pytest_terminal_summary_main
snake_case__ : Dict = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(__lowerCAmelCase , id=__lowerCAmelCase )
| 230 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class a_ ( unittest.TestCase ):
def __init__( self : List[str] , lowercase : Dict , lowercase : Optional[Any]=13 , lowercase : List[str]=7 , lowercase : Any=True , lowercase : Dict=True , lowercase : Any=True , lowercase : Any=True , lowercase : Tuple=99 , lowercase : List[str]=32 , lowercase : int=5 , lowercase : Dict=4 , lowercase : Dict=37 , lowercase : Dict="gelu" , lowercase : Optional[Any]=0.1 , lowercase : int=0.1 , lowercase : Optional[int]=512 , lowercase : int=16 , lowercase : List[str]=2 , lowercase : str=0.02 , lowercase : Tuple=4 , ):
"""simple docstring"""
lowercase_ :List[Any] = parent
lowercase_ :List[str] = batch_size
lowercase_ :Any = seq_length
lowercase_ :str = is_training
lowercase_ :int = use_attention_mask
lowercase_ :Optional[int] = use_token_type_ids
lowercase_ :Tuple = use_labels
lowercase_ :Any = vocab_size
lowercase_ :Any = hidden_size
lowercase_ :Tuple = num_hidden_layers
lowercase_ :Tuple = num_attention_heads
lowercase_ :Optional[Any] = intermediate_size
lowercase_ :str = hidden_act
lowercase_ :Optional[Any] = hidden_dropout_prob
lowercase_ :int = attention_probs_dropout_prob
lowercase_ :Any = max_position_embeddings
lowercase_ :Optional[Any] = type_vocab_size
lowercase_ :Dict = type_sequence_label_size
lowercase_ :Tuple = initializer_range
lowercase_ :List[str] = num_choices
def lowercase__ ( self : str ):
"""simple docstring"""
lowercase_ :Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase_ :Tuple = None
if self.use_attention_mask:
lowercase_ :Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowercase_ :Dict = None
if self.use_token_type_ids:
lowercase_ :List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase_ :Tuple = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCamelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowercase__ ( self : Any ):
"""simple docstring"""
lowercase_ :int = self.prepare_config_and_inputs()
lowercase_ :Dict = config_and_inputs
lowercase_ :List[Any] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class a_ ( a__ , unittest.TestCase ):
__A = True
__A = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowercase__ ( self : str ):
"""simple docstring"""
lowercase_ :List[Any] = FlaxRoFormerModelTester(self )
@slow
def lowercase__ ( self : Optional[int] ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
lowercase_ :int = model_class_name.from_pretrained("junnyu/roformer_chinese_small" , from_pt=_lowerCamelCase )
lowercase_ :List[Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(_lowerCamelCase )
@require_flax
class a_ ( unittest.TestCase ):
@slow
def lowercase__ ( self : Optional[int] ):
"""simple docstring"""
lowercase_ :int = FlaxRoFormerForMaskedLM.from_pretrained("junnyu/roformer_chinese_base" )
lowercase_ :Tuple = jnp.array([[0, 1, 2, 3, 4, 5]] )
lowercase_ :Optional[Any] = model(_lowerCamelCase )[0]
lowercase_ :Optional[Any] = 50_000
lowercase_ :Dict = (1, 6, vocab_size)
self.assertEqual(output.shape , _lowerCamelCase )
lowercase_ :Any = jnp.array(
[[[-0.12_05, -1.02_65, 0.29_22], [-1.51_34, 0.19_74, 0.15_19], [-5.01_35, -3.90_03, -0.84_04]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , _lowerCamelCase , atol=1e-4 ) )
| 354 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
lowerCAmelCase : Tuple =logging.get_logger(__name__)
def UpperCAmelCase_ ( __lowerCamelCase : Union[tf.Tensor, np.ndarray] ):
if isinstance(__lowerCamelCase ,np.ndarray ):
return list(tensor.shape )
lowercase_ :Optional[int] = tf.shape(__lowerCamelCase )
if tensor.shape == tf.TensorShape(__lowerCamelCase ):
return dynamic
lowercase_ :Union[str, Any] = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(__lowerCamelCase )]
def UpperCAmelCase_ ( __lowerCamelCase : tf.Tensor ,__lowerCamelCase : Optional[int] = None ,__lowerCamelCase : Optional[str] = None ):
return tf.nn.softmax(logits=logits + 1e-9 ,axis=__lowerCamelCase ,name=__lowerCamelCase )
def UpperCAmelCase_ ( __lowerCamelCase : str ,__lowerCamelCase : List[Any] ,__lowerCamelCase : Any ,__lowerCamelCase : List[str]=1e-5 ,__lowerCamelCase : List[str]=-1 ):
# This is a very simplified functional layernorm, designed to duplicate
# the functionality of PyTorch nn.functional.layer_norm when this is needed to port
# models in Transformers.
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(__lowerCamelCase ,__lowerCamelCase ):
raise NotImplementedError("Only 1D weight and bias tensors are supported for now, with only a single axis." )
# Get mean and variance on the axis to be normalized
lowercase_ , lowercase_ :List[str] = tf.nn.moments(__lowerCamelCase ,axes=[axis] ,keepdims=__lowerCamelCase )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
lowercase_ :Union[str, Any] = [1] * inputs.shape.rank
lowercase_ :Optional[Any] = shape_list(__lowerCamelCase )[axis]
lowercase_ :List[str] = tf.reshape(__lowerCamelCase ,__lowerCamelCase )
lowercase_ :Dict = tf.reshape(__lowerCamelCase ,__lowerCamelCase )
# Compute layer normalization using the batch_normalization
# function.
lowercase_ :Union[str, Any] = tf.nn.batch_normalization(
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,offset=__lowerCamelCase ,scale=__lowerCamelCase ,variance_epsilon=__lowerCamelCase ,)
return outputs
def UpperCAmelCase_ ( __lowerCamelCase : Optional[int] ,__lowerCamelCase : Union[str, Any]=0 ,__lowerCamelCase : Dict=-1 ):
# Replicates the behavior of torch.flatten in TF
# If end_dim or start_dim is negative, count them from the end
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
lowercase_ :Optional[int] = tf.shape(__lowerCamelCase )
lowercase_ :Optional[int] = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
lowercase_ :List[str] = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] ,axis=0 )
return tf.reshape(__lowerCamelCase ,__lowerCamelCase )
def UpperCAmelCase_ ( __lowerCamelCase : tf.Tensor ):
if not isinstance(__lowerCamelCase ,tf.Tensor ):
lowercase_ :str = tf.convert_to_tensor(__lowerCamelCase ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
lowercase_ :List[Any] = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
lowercase_ :Optional[int] = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
lowercase_ :str = (
tf.cast(1 ,encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def UpperCAmelCase_ ( __lowerCamelCase : tf.Tensor ,__lowerCamelCase : int ,__lowerCamelCase : str = "input_ids" ):
tf.debugging.assert_less(
__lowerCamelCase ,tf.cast(__lowerCamelCase ,dtype=tensor.dtype ) ,message=(
F'The maximum value of {tensor_name} ({tf.math.reduce_max(__lowerCamelCase )}) must be smaller than the embedding '
F'layer\'s input dimension ({embed_dim}). The likely cause is some problem at tokenization time.'
) ,)
def UpperCAmelCase_ ( __lowerCamelCase : List[str] ,__lowerCamelCase : Tuple ,__lowerCamelCase : Dict ):
lowercase_ :int = 6_45_12
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
lowercase_ :Union[str, Any] = [x for x in data if len(__lowerCamelCase ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
"The following attributes cannot be saved to HDF5 file because "
F'they are larger than {HDF5_OBJECT_HEADER_LIMIT} '
F'bytes: {bad_attributes}' )
lowercase_ :Union[str, Any] = np.asarray(__lowerCamelCase )
lowercase_ :Optional[int] = 1
lowercase_ :int = np.array_split(__lowerCamelCase ,__lowerCamelCase )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
lowercase_ :List[Any] = np.array_split(__lowerCamelCase ,__lowerCamelCase )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(__lowerCamelCase ):
lowercase_ :int = chunk_data
else:
lowercase_ :Tuple = data
def UpperCAmelCase_ ( __lowerCamelCase : str ,__lowerCamelCase : Tuple ):
if name in group.attrs:
lowercase_ :Optional[Any] = [n.decode("utf8" ) if hasattr(__lowerCamelCase ,"decode" ) else n for n in group.attrs[name]]
else:
lowercase_ :List[str] = []
lowercase_ :str = 0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode("utf8" ) if hasattr(__lowerCamelCase ,"decode" ) else n for n in group.attrs["%s%d" % (name, chunk_id)]] )
chunk_id += 1
return data
def UpperCAmelCase_ ( __lowerCamelCase : str ):
def _expand_single_ad_tensor(__lowerCamelCase : Tuple ):
if isinstance(__lowerCamelCase ,tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(__lowerCamelCase ,axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor ,__lowerCamelCase )
| 147 | 0 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
lowercase__ =logging.get_logger(__name__)
lowercase__ ={
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.linear_k": "encoder.layers.*.self_attn.linear_k",
"self_attn.linear_v": "encoder.layers.*.self_attn.linear_v",
"self_attn.linear_q": "encoder.layers.*.self_attn.linear_q",
"self_attn.pos_bias_u": "encoder.layers.*.self_attn.pos_bias_u",
"self_attn.pos_bias_v": "encoder.layers.*.self_attn.pos_bias_v",
"self_attn.linear_out": "encoder.layers.*.self_attn.linear_out",
"self_attn.linear_pos": "encoder.layers.*.self_attn.linear_pos",
"self_attn.rotary_emb": "encoder.embed_positions",
"self_attn_layer_norm": "encoder.layers.*.self_attn_layer_norm",
"conv_module.pointwise_conv1": "encoder.layers.*.conv_module.pointwise_conv1",
"conv_module.pointwise_conv2": "encoder.layers.*.conv_module.pointwise_conv2",
"conv_module.depthwise_conv": "encoder.layers.*.conv_module.depthwise_conv",
"conv_module.batch_norm": "encoder.layers.*.conv_module.batch_norm",
"conv_module.layer_norm": "encoder.layers.*.conv_module.layer_norm",
"ffn1.w_1": "encoder.layers.*.ffn1.intermediate_dense",
"ffn1.w_2": "encoder.layers.*.ffn1.output_dense",
"ffn1.layer_norm": "encoder.layers.*.ffn1_layer_norm",
"ffn2.w_1": "encoder.layers.*.ffn2.intermediate_dense",
"ffn2.w_2": "encoder.layers.*.ffn2.output_dense",
"ffn2.layer_norm": "encoder.layers.*.ffn2_layer_norm",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
lowercase__ =[
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def __UpperCamelCase ( lowerCAmelCase__ : Dict , lowerCAmelCase__ : str , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Dict , lowerCAmelCase__ : List[Any] ):
for attribute in key.split('''.''' ):
__a : Tuple = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
if weight_type is not None:
__a : str = getattr(lowerCAmelCase__ , lowerCAmelCase__ ).shape
else:
__a : int = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}" )
if weight_type == "weight":
__a : List[Any] = value
elif weight_type == "weight_g":
__a : Tuple = value
elif weight_type == "weight_v":
__a : List[str] = value
elif weight_type == "bias":
__a : int = value
elif weight_type == "running_mean":
__a : Optional[int] = value
elif weight_type == "running_var":
__a : Dict = value
elif weight_type == "num_batches_tracked":
__a : Tuple = value
elif weight_type == "inv_freq":
__a : str = value
else:
__a : List[str] = value
logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def __UpperCamelCase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : Any , lowerCAmelCase__ : List[Any] ):
__a : Dict = []
__a : Optional[int] = fairseq_model.state_dict()
__a : Union[str, Any] = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
__a : Union[str, Any] = False
if "conv_layers" in name:
load_conv_layer(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , hf_model.config.feat_extract_norm == '''group''' , )
__a : Tuple = True
else:
for key, mapped_key in MAPPING.items():
__a : int = '''wav2vec2_conformer.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
__a : int = True
if "*" in mapped_key:
__a : str = name.split(lowerCAmelCase__ )[0].split('''.''' )[-2]
__a : List[Any] = mapped_key.replace('''*''' , lowerCAmelCase__ )
if "pos_bias_u" in name:
__a : str = None
elif "pos_bias_v" in name:
__a : Dict = None
elif "weight_g" in name:
__a : str = '''weight_g'''
elif "weight_v" in name:
__a : List[Any] = '''weight_v'''
elif "bias" in name:
__a : Union[str, Any] = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__a : Optional[Any] = '''weight'''
elif "running_mean" in name:
__a : int = '''running_mean'''
elif "inv_freq" in name:
__a : Optional[int] = '''inv_freq'''
elif "running_var" in name:
__a : Tuple = '''running_var'''
elif "num_batches_tracked" in name:
__a : Optional[Any] = '''num_batches_tracked'''
else:
__a : Union[str, Any] = None
set_recursively(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
continue
if not is_used:
unused_weights.append(lowerCAmelCase__ )
logger.warning(f"Unused weights: {unused_weights}" )
def __UpperCamelCase ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Any , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : List[Any] ):
__a : Dict = full_name.split('''conv_layers.''' )[-1]
__a : List[str] = name.split('''.''' )
__a : Optional[int] = int(items[0] )
__a : Any = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." )
__a : List[Any] = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." )
__a : Optional[int] = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found." )
__a : Any = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found." )
__a : Optional[int] = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(lowerCAmelCase__ )
@torch.no_grad()
def __UpperCamelCase ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Optional[Any]=None , lowerCAmelCase__ : Optional[int]=None , lowerCAmelCase__ : Union[str, Any]=True ):
if config_path is not None:
__a : Union[str, Any] = WavaVecaConformerConfig.from_pretrained(lowerCAmelCase__ , hidden_act='''swish''' )
else:
__a : Optional[int] = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
__a : str = '''rotary'''
if is_finetuned:
if dict_path:
__a : Optional[Any] = Dictionary.load(lowerCAmelCase__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__a : Dict = target_dict.pad_index
__a : Optional[int] = target_dict.bos_index
__a : Tuple = target_dict.eos_index
__a : Optional[Any] = len(target_dict.symbols )
__a : Optional[int] = os.path.join(lowerCAmelCase__ , '''vocab.json''' )
if not os.path.isdir(lowerCAmelCase__ ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(lowerCAmelCase__ ) )
return
os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__ )
__a : Optional[Any] = target_dict.indices
# fairseq has the <pad> and <s> switched
__a : int = 0
__a : Union[str, Any] = 1
with open(lowerCAmelCase__ , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
__a : Any = WavaVecaCTCTokenizer(
lowerCAmelCase__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=lowerCAmelCase__ , )
__a : Tuple = True if config.feat_extract_norm == '''layer''' else False
__a : List[str] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , )
__a : Any = WavaVecaProcessor(feature_extractor=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ )
processor.save_pretrained(lowerCAmelCase__ )
__a : int = WavaVecaConformerForCTC(lowerCAmelCase__ )
else:
__a : List[Any] = WavaVecaConformerForPreTraining(lowerCAmelCase__ )
if is_finetuned:
__a : Union[str, Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
__a : Optional[int] = argparse.Namespace(task='''audio_pretraining''' )
__a : int = fairseq.tasks.setup_task(lowerCAmelCase__ )
__a : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=lowerCAmelCase__ )
__a : str = model[0].eval()
recursively_load_weights(lowerCAmelCase__ , lowerCAmelCase__ , not is_finetuned )
hf_wavavec.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
lowercase__ =argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
lowercase__ =parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 216 |
def _a ( SCREAMING_SNAKE_CASE : int = 1000000 ):
"""simple docstring"""
UpperCamelCase__ : Any = set(range(3 , SCREAMING_SNAKE_CASE , 2 ) )
primes.add(2 )
for p in range(3 , SCREAMING_SNAKE_CASE , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) )
UpperCamelCase__ : Union[str, Any] = [float(SCREAMING_SNAKE_CASE ) for n in range(limit + 1 )]
for p in primes:
for n in range(SCREAMING_SNAKE_CASE , limit + 1 , SCREAMING_SNAKE_CASE ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(f"{solution() = }")
| 146 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
a_ : List[str] = {'configuration_beit': ['BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BeitConfig', 'BeitOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : int = ['BeitFeatureExtractor']
a_ : int = ['BeitImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : List[Any] = [
'BEIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BeitForImageClassification',
'BeitForMaskedImageModeling',
'BeitForSemanticSegmentation',
'BeitModel',
'BeitPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Optional[int] = [
'FlaxBeitForImageClassification',
'FlaxBeitForMaskedImageModeling',
'FlaxBeitModel',
'FlaxBeitPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
a_ : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 327 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
a_ : Any = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Dict = ['MLukeTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
a_ : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 327 | 1 |
import argparse
from collections import defaultdict
import yaml
snake_case : Union[str, Any] = '''docs/source/en/_toctree.yml'''
def __lowercase ( __lowerCAmelCase : Union[str, Any] ):
a__ = defaultdict(__lowerCAmelCase )
for doc in model_doc:
counts[doc["local"]] += 1
a__ = [key for key, value in counts.items() if value > 1]
a__ = []
for duplicate_key in duplicates:
a__ = list({doc['title'] for doc in model_doc if doc['local'] == duplicate_key} )
if len(__lowerCAmelCase ) > 1:
raise ValueError(
F'{duplicate_key} is present several times in the documentation table of content at '
'`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '
'others.' )
# Only add this once
new_doc.append({'local': duplicate_key, 'title': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc['local']] == 1] )
# Sort
return sorted(__lowerCAmelCase , key=lambda __lowerCAmelCase : s["title"].lower() )
def __lowercase ( __lowerCAmelCase : Optional[int]=False ):
with open(__lowerCAmelCase , encoding='utf-8' ) as f:
a__ = yaml.safe_load(f.read() )
# Get to the API doc
a__ = 0
while content[api_idx]["title"] != "API":
api_idx += 1
a__ = content[api_idx]['sections']
# Then to the model doc
a__ = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
a__ = api_doc[model_idx]['sections']
a__ = [(idx, section) for idx, section in enumerate(__lowerCAmelCase ) if 'sections' in section]
a__ = False
for idx, modality_doc in modalities_docs:
a__ = modality_doc['sections']
a__ = clean_model_doc_toc(__lowerCAmelCase )
if old_modality_doc != new_modality_doc:
a__ = True
if overwrite:
a__ = new_modality_doc
if diff:
if overwrite:
a__ = model_doc
a__ = api_doc
with open(__lowerCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(__lowerCAmelCase , allow_unicode=__lowerCAmelCase ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
if __name__ == "__main__":
snake_case : Any = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
snake_case : Dict = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 240 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=lowerCamelCase_ )
class snake_case_ (lowerCamelCase_ ):
UpperCAmelCase__ : str = field(default='''audio-classification''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
UpperCAmelCase__ : ClassVar[Features] = Features({'''audio''': Audio()} )
UpperCAmelCase__ : ClassVar[Features] = Features({'''labels''': ClassLabel} )
UpperCAmelCase__ : str = "audio"
UpperCAmelCase__ : str = "labels"
def lowerCamelCase__( self :Optional[int] ,__snake_case :int ) -> str:
if self.label_column not in features:
raise ValueError(F'Column {self.label_column} is not present in features.' )
if not isinstance(features[self.label_column] ,__snake_case ):
raise ValueError(F'Column {self.label_column} is not a ClassLabel.' )
a__ = copy.deepcopy(self )
a__ = self.label_schema.copy()
a__ = features[self.label_column]
a__ = label_schema
return task_template
@property
def lowerCamelCase__( self :Dict ) -> Dict[str, str]:
return {
self.audio_column: "audio",
self.label_column: "labels",
}
| 240 | 1 |
from string import ascii_uppercase
lowerCAmelCase_ = {char: i for i, char in enumerate(ascii_uppercase)}
lowerCAmelCase_ = dict(enumerate(ascii_uppercase))
def snake_case( __magic_name__ , __magic_name__ ) -> str:
'''simple docstring'''
lowercase : Optional[Any] = len(__magic_name__ )
lowercase : Any = 0
while True:
if x == i:
lowercase : Any = 0
if len(__magic_name__ ) == len(__magic_name__ ):
break
key += key[i]
i += 1
return key
def snake_case( __magic_name__ , __magic_name__ ) -> str:
'''simple docstring'''
lowercase : str = ''''''
lowercase : Dict = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
lowercase : Dict = (dicta[letter] - dicta[key_new[i]]) % 26
i += 1
cipher_text += dicta[x]
return cipher_text
def snake_case( __magic_name__ , __magic_name__ ) -> str:
'''simple docstring'''
lowercase : Any = ''''''
lowercase : str = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
lowercase : Any = (dicta[letter] + dicta[key_new[i]] + 26) % 26
i += 1
or_txt += dicta[x]
return or_txt
def snake_case( ) -> None:
'''simple docstring'''
lowercase : Dict = '''THE GERMAN ATTACK'''
lowercase : Dict = '''SECRET'''
lowercase : Union[str, Any] = generate_key(__magic_name__ , __magic_name__ )
lowercase : List[str] = cipher_text(__magic_name__ , __magic_name__ )
print(F"""Encrypted Text = {s}""" )
print(F"""Original Text = {original_text(__magic_name__ , __magic_name__ )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main() | 116 |
class _A : # Public class to implement a graph
def __init__( self : List[Any] , _A : int , _A : int , _A : list[list[bool]] ) -> None:
"""simple docstring"""
lowercase : Tuple = row
lowercase : Union[str, Any] = col
lowercase : int = graph
def __a ( self : List[Any] , _A : int , _A : int , _A : list[list[bool]] ) -> bool:
"""simple docstring"""
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def __a ( self : int , _A : int , _A : int , _A : list[list[bool]] ) -> None:
"""simple docstring"""
lowercase : List[str] = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
lowercase : Dict = [-1, 0, 1, -1, 1, -1, 0, 1]
lowercase : Dict = True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , _A ):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , _A )
def __a ( self : List[str] ) -> int: # And finally, count all islands.
"""simple docstring"""
lowercase : List[str] = [[False for j in range(self.COL )] for i in range(self.ROW )]
lowercase : Optional[Any] = 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(_A , _A , _A )
count += 1
return count | 116 | 1 |
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class _UpperCAmelCase :
"""simple docstring"""
def lowercase ( self : List[str] ) -> Any:
torch.manual_seed(0 )
__lowerCAmelCase = TaEncoderModel.from_pretrained('hf-internal-testing/tiny-random-t5' )
torch.manual_seed(0 )
__lowerCAmelCase = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-t5' )
torch.manual_seed(0 )
__lowerCAmelCase = UNetaDConditionModel(
sample_size=3_2 , layers_per_block=1 , block_out_channels=[3_2, 6_4] , down_block_types=[
'ResnetDownsampleBlock2D',
'SimpleCrossAttnDownBlock2D',
] , mid_block_type='UNetMidBlock2DSimpleCrossAttn' , up_block_types=['SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'] , in_channels=3 , out_channels=6 , cross_attention_dim=3_2 , encoder_hid_dim=3_2 , attention_head_dim=8 , addition_embed_type='text' , addition_embed_type_num_heads=2 , cross_attention_norm='group_norm' , resnet_time_scale_shift='scale_shift' , act_fn='gelu' , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
__lowerCAmelCase = DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule='squaredcos_cap_v2' , beta_start=0.00_01 , beta_end=0.02 , thresholding=lowerCAmelCase_ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='epsilon' , variance_type='learned_range' , )
torch.manual_seed(0 )
__lowerCAmelCase = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def lowercase ( self : Optional[int] ) -> List[Any]:
torch.manual_seed(0 )
__lowerCAmelCase = TaEncoderModel.from_pretrained('hf-internal-testing/tiny-random-t5' )
torch.manual_seed(0 )
__lowerCAmelCase = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-t5' )
torch.manual_seed(0 )
__lowerCAmelCase = UNetaDConditionModel(
sample_size=3_2 , layers_per_block=[1, 2] , block_out_channels=[3_2, 6_4] , down_block_types=[
'ResnetDownsampleBlock2D',
'SimpleCrossAttnDownBlock2D',
] , mid_block_type='UNetMidBlock2DSimpleCrossAttn' , up_block_types=['SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'] , in_channels=6 , out_channels=6 , cross_attention_dim=3_2 , encoder_hid_dim=3_2 , attention_head_dim=8 , addition_embed_type='text' , addition_embed_type_num_heads=2 , cross_attention_norm='group_norm' , resnet_time_scale_shift='scale_shift' , act_fn='gelu' , class_embed_type='timestep' , mid_block_scale_factor=1.4_14 , time_embedding_act_fn='gelu' , time_embedding_dim=3_2 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
__lowerCAmelCase = DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule='squaredcos_cap_v2' , beta_start=0.00_01 , beta_end=0.02 , thresholding=lowerCAmelCase_ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='epsilon' , variance_type='learned_range' , )
torch.manual_seed(0 )
__lowerCAmelCase = DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule='squaredcos_cap_v2' , beta_start=0.00_01 , beta_end=0.02 , )
torch.manual_seed(0 )
__lowerCAmelCase = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def lowercase ( self : List[Any] ) -> Dict:
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = self.pipeline_class(**lowerCAmelCase_ )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
__lowerCAmelCase = self.get_dummy_inputs(lowerCAmelCase_ )
__lowerCAmelCase = inputs['prompt']
__lowerCAmelCase = inputs['generator']
__lowerCAmelCase = inputs['num_inference_steps']
__lowerCAmelCase = inputs['output_type']
if "image" in inputs:
__lowerCAmelCase = inputs['image']
else:
__lowerCAmelCase = None
if "mask_image" in inputs:
__lowerCAmelCase = inputs['mask_image']
else:
__lowerCAmelCase = None
if "original_image" in inputs:
__lowerCAmelCase = inputs['original_image']
else:
__lowerCAmelCase = None
__lowerCAmelCase , __lowerCAmelCase = pipe.encode_prompt(lowerCAmelCase_ )
# inputs with prompt converted to embeddings
__lowerCAmelCase = {
'prompt_embeds': prompt_embeds,
'negative_prompt_embeds': negative_prompt_embeds,
'generator': generator,
'num_inference_steps': num_inference_steps,
'output_type': output_type,
}
if image is not None:
__lowerCAmelCase = image
if mask_image is not None:
__lowerCAmelCase = mask_image
if original_image is not None:
__lowerCAmelCase = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
__lowerCAmelCase = pipe(**lowerCAmelCase_ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(lowerCAmelCase_ )
__lowerCAmelCase = self.pipeline_class.from_pretrained(lowerCAmelCase_ )
pipe_loaded.to(lowerCAmelCase_ )
pipe_loaded.set_progress_bar_config(disable=lowerCAmelCase_ )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(lowerCAmelCase_ , lowerCAmelCase_ ) is None , f"""`{optional_component}` did not stay set to None after loading.""" , )
__lowerCAmelCase = self.get_dummy_inputs(lowerCAmelCase_ )
__lowerCAmelCase = inputs['generator']
__lowerCAmelCase = inputs['num_inference_steps']
__lowerCAmelCase = inputs['output_type']
# inputs with prompt converted to embeddings
__lowerCAmelCase = {
'prompt_embeds': prompt_embeds,
'negative_prompt_embeds': negative_prompt_embeds,
'generator': generator,
'num_inference_steps': num_inference_steps,
'output_type': output_type,
}
if image is not None:
__lowerCAmelCase = image
if mask_image is not None:
__lowerCAmelCase = mask_image
if original_image is not None:
__lowerCAmelCase = original_image
__lowerCAmelCase = pipe_loaded(**lowerCAmelCase_ )[0]
__lowerCAmelCase = np.abs(to_np(lowerCAmelCase_ ) - to_np(lowerCAmelCase_ ) ).max()
self.assertLess(lowerCAmelCase_ , 1e-4 )
def lowercase ( self : List[str] ) -> Optional[int]:
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = self.pipeline_class(**lowerCAmelCase_ )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
__lowerCAmelCase = self.get_dummy_inputs(lowerCAmelCase_ )
__lowerCAmelCase = pipe(**lowerCAmelCase_ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(lowerCAmelCase_ )
__lowerCAmelCase = self.pipeline_class.from_pretrained(lowerCAmelCase_ )
pipe_loaded.to(lowerCAmelCase_ )
pipe_loaded.set_progress_bar_config(disable=lowerCAmelCase_ )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
__lowerCAmelCase = self.get_dummy_inputs(lowerCAmelCase_ )
__lowerCAmelCase = pipe_loaded(**lowerCAmelCase_ )[0]
__lowerCAmelCase = np.abs(to_np(lowerCAmelCase_ ) - to_np(lowerCAmelCase_ ) ).max()
self.assertLess(lowerCAmelCase_ , 1e-4 )
| 284 |
from __future__ import annotations
import math
def a_ ( lowerCAmelCase_ : int, lowerCAmelCase_ : int, lowerCAmelCase_ : bool, lowerCAmelCase_ : list[int], lowerCAmelCase_ : float ):
if depth < 0:
raise ValueError('Depth cannot be less than 0' )
if len(lowerCAmelCase_ ) == 0:
raise ValueError('Scores cannot be empty' )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1, node_index * 2, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ), minimax(depth + 1, node_index * 2 + 1, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ), )
return min(
minimax(depth + 1, node_index * 2, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ), minimax(depth + 1, node_index * 2 + 1, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ), )
def a_ ( ):
__lowerCAmelCase = [90, 23, 6, 33, 21, 65, 123, 3_4423]
__lowerCAmelCase = math.log(len(lowerCAmelCase_ ), 2 )
print('Optimal value : ', end='' )
print(minimax(0, 0, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 284 | 1 |
"""simple docstring"""
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
lowercase_ = logging.get_logger(__name__)
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = ['pixel_values']
def __init__( self , _a = True , _a = 1 / 255 , _a = True , _a = 8 , **_a , ):
super().__init__(**_a )
__a = do_rescale
__a = rescale_factor
__a = do_pad
__a = pad_size
def __UpperCAmelCase ( self , _a , _a , _a = None , **_a ):
return rescale(_a , scale=_a , data_format=_a , **_a )
def __UpperCAmelCase ( self , _a , _a , _a = None ):
__a , __a = get_image_size(_a )
__a = (old_height // size + 1) * size - old_height
__a = (old_width // size + 1) * size - old_width
return pad(_a , ((0, pad_height), (0, pad_width)) , mode='''symmetric''' , data_format=_a )
def __UpperCAmelCase ( self , _a , _a = None , _a = None , _a = None , _a = None , _a = None , _a = ChannelDimension.FIRST , **_a , ):
__a = do_rescale if do_rescale is not None else self.do_rescale
__a = rescale_factor if rescale_factor is not None else self.rescale_factor
__a = do_pad if do_pad is not None else self.do_pad
__a = pad_size if pad_size is not None else self.pad_size
__a = make_list_of_images(_a )
if not valid_images(_a ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
# All transformations expect numpy arrays.
__a = [to_numpy_array(_a ) for image in images]
if do_rescale:
__a = [self.rescale(image=_a , scale=_a ) for image in images]
if do_pad:
__a = [self.pad(_a , size=_a ) for image in images]
__a = [to_channel_dimension_format(_a , _a ) for image in images]
__a = {'''pixel_values''': images}
return BatchFeature(data=_a , tensor_type=_a )
| 11 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"facebook/vit-mae-base": "https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json",
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Tuple = 'vit_mae'
def __init__( self , _a=768 , _a=12 , _a=12 , _a=3_072 , _a="gelu" , _a=0.0 , _a=0.0 , _a=0.02 , _a=1E-12 , _a=224 , _a=16 , _a=3 , _a=True , _a=16 , _a=512 , _a=8 , _a=2_048 , _a=0.75 , _a=False , **_a , ):
super().__init__(**_a )
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = initializer_range
__a = layer_norm_eps
__a = image_size
__a = patch_size
__a = num_channels
__a = qkv_bias
__a = decoder_num_attention_heads
__a = decoder_hidden_size
__a = decoder_num_hidden_layers
__a = decoder_intermediate_size
__a = mask_ratio
__a = norm_pix_loss
| 11 | 1 |
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = SwinConfig()
SCREAMING_SNAKE_CASE = swin_name.split("""_""" )
SCREAMING_SNAKE_CASE = name_split[1]
SCREAMING_SNAKE_CASE = int(name_split[4] )
SCREAMING_SNAKE_CASE = int(name_split[3][-1] )
if model_size == "tiny":
SCREAMING_SNAKE_CASE = 96
SCREAMING_SNAKE_CASE = (2, 2, 6, 2)
SCREAMING_SNAKE_CASE = (3, 6, 12, 24)
elif model_size == "small":
SCREAMING_SNAKE_CASE = 96
SCREAMING_SNAKE_CASE = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE = (3, 6, 12, 24)
elif model_size == "base":
SCREAMING_SNAKE_CASE = 1_28
SCREAMING_SNAKE_CASE = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE = (4, 8, 16, 32)
else:
SCREAMING_SNAKE_CASE = 1_92
SCREAMING_SNAKE_CASE = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE = (6, 12, 24, 48)
if "in22k" in swin_name:
SCREAMING_SNAKE_CASE = 2_18_41
else:
SCREAMING_SNAKE_CASE = 10_00
SCREAMING_SNAKE_CASE = """huggingface/label-files"""
SCREAMING_SNAKE_CASE = """imagenet-1k-id2label.json"""
SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="""dataset""" ) , """r""" ) )
SCREAMING_SNAKE_CASE = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE = idalabel
SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE = img_size
SCREAMING_SNAKE_CASE = num_classes
SCREAMING_SNAKE_CASE = embed_dim
SCREAMING_SNAKE_CASE = depths
SCREAMING_SNAKE_CASE = num_heads
SCREAMING_SNAKE_CASE = window_size
return config
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
if "patch_embed.proj" in name:
SCREAMING_SNAKE_CASE = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
SCREAMING_SNAKE_CASE = name.replace("""patch_embed.norm""" , """embeddings.norm""" )
if "layers" in name:
SCREAMING_SNAKE_CASE = """encoder.""" + name
if "attn.proj" in name:
SCREAMING_SNAKE_CASE = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
SCREAMING_SNAKE_CASE = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
SCREAMING_SNAKE_CASE = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
SCREAMING_SNAKE_CASE = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
SCREAMING_SNAKE_CASE = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
SCREAMING_SNAKE_CASE = name.replace("""mlp.fc2""" , """output.dense""" )
if name == "norm.weight":
SCREAMING_SNAKE_CASE = """layernorm.weight"""
if name == "norm.bias":
SCREAMING_SNAKE_CASE = """layernorm.bias"""
if "head" in name:
SCREAMING_SNAKE_CASE = name.replace("""head""" , """classifier""" )
else:
SCREAMING_SNAKE_CASE = """swin.""" + name
return name
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
SCREAMING_SNAKE_CASE = orig_state_dict.pop(_SCREAMING_SNAKE_CASE )
if "mask" in key:
continue
elif "qkv" in key:
SCREAMING_SNAKE_CASE = key.split(""".""" )
SCREAMING_SNAKE_CASE = int(key_split[1] )
SCREAMING_SNAKE_CASE = int(key_split[3] )
SCREAMING_SNAKE_CASE = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
SCREAMING_SNAKE_CASE = val[:dim, :]
SCREAMING_SNAKE_CASE = val[
dim : dim * 2, :
]
SCREAMING_SNAKE_CASE = val[-dim:, :]
else:
SCREAMING_SNAKE_CASE = val[
:dim
]
SCREAMING_SNAKE_CASE = val[
dim : dim * 2
]
SCREAMING_SNAKE_CASE = val[
-dim:
]
else:
SCREAMING_SNAKE_CASE = val
return orig_state_dict
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = timm.create_model(_SCREAMING_SNAKE_CASE , pretrained=_SCREAMING_SNAKE_CASE )
timm_model.eval()
SCREAMING_SNAKE_CASE = get_swin_config(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = SwinForImageClassification(_SCREAMING_SNAKE_CASE )
model.eval()
SCREAMING_SNAKE_CASE = convert_state_dict(timm_model.state_dict() , _SCREAMING_SNAKE_CASE )
model.load_state_dict(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = """http://images.cocodataset.org/val2017/000000039769.jpg"""
SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained("""microsoft/{}""".format(swin_name.replace("""_""" , """-""" ) ) )
SCREAMING_SNAKE_CASE = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw )
SCREAMING_SNAKE_CASE = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
SCREAMING_SNAKE_CASE = timm_model(inputs["""pixel_values"""] )
SCREAMING_SNAKE_CASE = model(**_SCREAMING_SNAKE_CASE ).logits
assert torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1E-3 )
print(F"""Saving model {swin_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--swin_name""",
default="""swin_tiny_patch4_window7_224""",
type=str,
help="""Name of the Swin timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
| 296 |
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Base model mapping
("""albert""", """FlaxAlbertModel"""),
("""bart""", """FlaxBartModel"""),
("""beit""", """FlaxBeitModel"""),
("""bert""", """FlaxBertModel"""),
("""big_bird""", """FlaxBigBirdModel"""),
("""blenderbot""", """FlaxBlenderbotModel"""),
("""blenderbot-small""", """FlaxBlenderbotSmallModel"""),
("""clip""", """FlaxCLIPModel"""),
("""distilbert""", """FlaxDistilBertModel"""),
("""electra""", """FlaxElectraModel"""),
("""gpt-sw3""", """FlaxGPT2Model"""),
("""gpt2""", """FlaxGPT2Model"""),
("""gpt_neo""", """FlaxGPTNeoModel"""),
("""gptj""", """FlaxGPTJModel"""),
("""longt5""", """FlaxLongT5Model"""),
("""marian""", """FlaxMarianModel"""),
("""mbart""", """FlaxMBartModel"""),
("""mt5""", """FlaxMT5Model"""),
("""opt""", """FlaxOPTModel"""),
("""pegasus""", """FlaxPegasusModel"""),
("""regnet""", """FlaxRegNetModel"""),
("""resnet""", """FlaxResNetModel"""),
("""roberta""", """FlaxRobertaModel"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormModel"""),
("""roformer""", """FlaxRoFormerModel"""),
("""t5""", """FlaxT5Model"""),
("""vision-text-dual-encoder""", """FlaxVisionTextDualEncoderModel"""),
("""vit""", """FlaxViTModel"""),
("""wav2vec2""", """FlaxWav2Vec2Model"""),
("""whisper""", """FlaxWhisperModel"""),
("""xglm""", """FlaxXGLMModel"""),
("""xlm-roberta""", """FlaxXLMRobertaModel"""),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for pre-training mapping
("""albert""", """FlaxAlbertForPreTraining"""),
("""bart""", """FlaxBartForConditionalGeneration"""),
("""bert""", """FlaxBertForPreTraining"""),
("""big_bird""", """FlaxBigBirdForPreTraining"""),
("""electra""", """FlaxElectraForPreTraining"""),
("""longt5""", """FlaxLongT5ForConditionalGeneration"""),
("""mbart""", """FlaxMBartForConditionalGeneration"""),
("""mt5""", """FlaxMT5ForConditionalGeneration"""),
("""roberta""", """FlaxRobertaForMaskedLM"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMaskedLM"""),
("""roformer""", """FlaxRoFormerForMaskedLM"""),
("""t5""", """FlaxT5ForConditionalGeneration"""),
("""wav2vec2""", """FlaxWav2Vec2ForPreTraining"""),
("""whisper""", """FlaxWhisperForConditionalGeneration"""),
("""xlm-roberta""", """FlaxXLMRobertaForMaskedLM"""),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Masked LM mapping
("""albert""", """FlaxAlbertForMaskedLM"""),
("""bart""", """FlaxBartForConditionalGeneration"""),
("""bert""", """FlaxBertForMaskedLM"""),
("""big_bird""", """FlaxBigBirdForMaskedLM"""),
("""distilbert""", """FlaxDistilBertForMaskedLM"""),
("""electra""", """FlaxElectraForMaskedLM"""),
("""mbart""", """FlaxMBartForConditionalGeneration"""),
("""roberta""", """FlaxRobertaForMaskedLM"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMaskedLM"""),
("""roformer""", """FlaxRoFormerForMaskedLM"""),
("""xlm-roberta""", """FlaxXLMRobertaForMaskedLM"""),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
("""bart""", """FlaxBartForConditionalGeneration"""),
("""blenderbot""", """FlaxBlenderbotForConditionalGeneration"""),
("""blenderbot-small""", """FlaxBlenderbotSmallForConditionalGeneration"""),
("""encoder-decoder""", """FlaxEncoderDecoderModel"""),
("""longt5""", """FlaxLongT5ForConditionalGeneration"""),
("""marian""", """FlaxMarianMTModel"""),
("""mbart""", """FlaxMBartForConditionalGeneration"""),
("""mt5""", """FlaxMT5ForConditionalGeneration"""),
("""pegasus""", """FlaxPegasusForConditionalGeneration"""),
("""t5""", """FlaxT5ForConditionalGeneration"""),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Image-classsification
("""beit""", """FlaxBeitForImageClassification"""),
("""regnet""", """FlaxRegNetForImageClassification"""),
("""resnet""", """FlaxResNetForImageClassification"""),
("""vit""", """FlaxViTForImageClassification"""),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
("""vision-encoder-decoder""", """FlaxVisionEncoderDecoderModel"""),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Causal LM mapping
("""bart""", """FlaxBartForCausalLM"""),
("""bert""", """FlaxBertForCausalLM"""),
("""big_bird""", """FlaxBigBirdForCausalLM"""),
("""electra""", """FlaxElectraForCausalLM"""),
("""gpt-sw3""", """FlaxGPT2LMHeadModel"""),
("""gpt2""", """FlaxGPT2LMHeadModel"""),
("""gpt_neo""", """FlaxGPTNeoForCausalLM"""),
("""gptj""", """FlaxGPTJForCausalLM"""),
("""opt""", """FlaxOPTForCausalLM"""),
("""roberta""", """FlaxRobertaForCausalLM"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForCausalLM"""),
("""xglm""", """FlaxXGLMForCausalLM"""),
("""xlm-roberta""", """FlaxXLMRobertaForCausalLM"""),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Sequence Classification mapping
("""albert""", """FlaxAlbertForSequenceClassification"""),
("""bart""", """FlaxBartForSequenceClassification"""),
("""bert""", """FlaxBertForSequenceClassification"""),
("""big_bird""", """FlaxBigBirdForSequenceClassification"""),
("""distilbert""", """FlaxDistilBertForSequenceClassification"""),
("""electra""", """FlaxElectraForSequenceClassification"""),
("""mbart""", """FlaxMBartForSequenceClassification"""),
("""roberta""", """FlaxRobertaForSequenceClassification"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForSequenceClassification"""),
("""roformer""", """FlaxRoFormerForSequenceClassification"""),
("""xlm-roberta""", """FlaxXLMRobertaForSequenceClassification"""),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Question Answering mapping
("""albert""", """FlaxAlbertForQuestionAnswering"""),
("""bart""", """FlaxBartForQuestionAnswering"""),
("""bert""", """FlaxBertForQuestionAnswering"""),
("""big_bird""", """FlaxBigBirdForQuestionAnswering"""),
("""distilbert""", """FlaxDistilBertForQuestionAnswering"""),
("""electra""", """FlaxElectraForQuestionAnswering"""),
("""mbart""", """FlaxMBartForQuestionAnswering"""),
("""roberta""", """FlaxRobertaForQuestionAnswering"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForQuestionAnswering"""),
("""roformer""", """FlaxRoFormerForQuestionAnswering"""),
("""xlm-roberta""", """FlaxXLMRobertaForQuestionAnswering"""),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Token Classification mapping
("""albert""", """FlaxAlbertForTokenClassification"""),
("""bert""", """FlaxBertForTokenClassification"""),
("""big_bird""", """FlaxBigBirdForTokenClassification"""),
("""distilbert""", """FlaxDistilBertForTokenClassification"""),
("""electra""", """FlaxElectraForTokenClassification"""),
("""roberta""", """FlaxRobertaForTokenClassification"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForTokenClassification"""),
("""roformer""", """FlaxRoFormerForTokenClassification"""),
("""xlm-roberta""", """FlaxXLMRobertaForTokenClassification"""),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
# Model for Multiple Choice mapping
("""albert""", """FlaxAlbertForMultipleChoice"""),
("""bert""", """FlaxBertForMultipleChoice"""),
("""big_bird""", """FlaxBigBirdForMultipleChoice"""),
("""distilbert""", """FlaxDistilBertForMultipleChoice"""),
("""electra""", """FlaxElectraForMultipleChoice"""),
("""roberta""", """FlaxRobertaForMultipleChoice"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMultipleChoice"""),
("""roformer""", """FlaxRoFormerForMultipleChoice"""),
("""xlm-roberta""", """FlaxXLMRobertaForMultipleChoice"""),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
("""bert""", """FlaxBertForNextSentencePrediction"""),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
("""speech-encoder-decoder""", """FlaxSpeechEncoderDecoderModel"""),
("""whisper""", """FlaxWhisperForConditionalGeneration"""),
]
)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
("""whisper""", """FlaxWhisperForAudioClassification"""),
]
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class UpperCamelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
__snake_case : List[str] = FLAX_MODEL_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModel)
class UpperCamelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
__snake_case : Dict = FLAX_MODEL_FOR_PRETRAINING_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForPreTraining, head_doc="""pretraining""")
class UpperCamelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
__snake_case : Optional[Any] = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForCausalLM, head_doc="""causal language modeling""")
class UpperCamelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
__snake_case : Any = FLAX_MODEL_FOR_MASKED_LM_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="""masked language modeling""")
class UpperCamelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
__snake_case : int = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc="""sequence-to-sequence language modeling""", checkpoint_for_example="""t5-base"""
)
class UpperCamelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
__snake_case : Optional[int] = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc="""sequence classification"""
)
class UpperCamelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
__snake_case : List[Any] = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="""question answering""")
class UpperCamelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
__snake_case : Tuple = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc="""token classification"""
)
class UpperCamelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
__snake_case : Union[str, Any] = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="""multiple choice""")
class UpperCamelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
__snake_case : List[str] = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc="""next sentence prediction"""
)
class UpperCamelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
__snake_case : Dict = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(
FlaxAutoModelForImageClassification, head_doc="""image classification"""
)
class UpperCamelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
__snake_case : Dict = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="""vision-to-text modeling""")
class UpperCamelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
__snake_case : Optional[Any] = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
SCREAMING_SNAKE_CASE_ = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc="""sequence-to-sequence speech-to-text modeling"""
)
| 296 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[Any] , lowerCAmelCase : List[str] , lowerCAmelCase : Optional[Any]=7 , lowerCAmelCase : List[str]=3 , lowerCAmelCase : int=18 , lowerCAmelCase : int=30 , lowerCAmelCase : Optional[int]=4_00 , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : Dict=None , lowerCAmelCase : List[str]=True , lowerCAmelCase : Tuple=None , lowerCAmelCase : Any=True , ) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = size if size is not None else {"""shortest_edge""": 20}
__lowerCAmelCase : Any = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
__lowerCAmelCase : str = parent
__lowerCAmelCase : List[str] = batch_size
__lowerCAmelCase : int = num_channels
__lowerCAmelCase : List[str] = image_size
__lowerCAmelCase : Optional[int] = min_resolution
__lowerCAmelCase : List[str] = max_resolution
__lowerCAmelCase : List[Any] = do_resize
__lowerCAmelCase : Optional[int] = size
__lowerCAmelCase : List[Any] = do_center_crop
__lowerCAmelCase : Optional[Any] = crop_size
__lowerCAmelCase : int = do_flip_channel_order
def SCREAMING_SNAKE_CASE ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE ( a_ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : List[str] =MobileViTImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE ( self : Any ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : List[str] = MobileViTImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase , """do_resize""" ) )
self.assertTrue(hasattr(lowerCAmelCase , """size""" ) )
self.assertTrue(hasattr(lowerCAmelCase , """do_center_crop""" ) )
self.assertTrue(hasattr(lowerCAmelCase , """center_crop""" ) )
self.assertTrue(hasattr(lowerCAmelCase , """do_flip_channel_order""" ) )
def SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 20} )
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} )
__lowerCAmelCase : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
def SCREAMING_SNAKE_CASE ( self : str ) -> int:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCAmelCase : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , Image.Image )
# Test not batched input
__lowerCAmelCase : List[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
__lowerCAmelCase : str = image_processing(lowerCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowerCAmelCase : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase , numpify=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , np.ndarray )
# Test not batched input
__lowerCAmelCase : Optional[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
__lowerCAmelCase : Tuple = image_processing(lowerCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any:
"""simple docstring"""
__lowerCAmelCase : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowerCAmelCase : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase , torchify=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , torch.Tensor )
# Test not batched input
__lowerCAmelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
__lowerCAmelCase : Tuple = image_processing(lowerCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 139 |
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__UpperCAmelCase = """2.13.1"""
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("""3.7"""):
raise ImportWarning(
"""To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."""
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
"""To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"""
"""If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."""
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
__UpperCAmelCase = concatenate_datasets
__UpperCAmelCase = DownloadConfig
__UpperCAmelCase = DownloadManager
__UpperCAmelCase = DownloadMode
__UpperCAmelCase = DownloadConfig
__UpperCAmelCase = DownloadMode
__UpperCAmelCase = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 139 | 1 |
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class _a :
"""simple docstring"""
_lowerCamelCase : Tuple = LEDConfig
_lowerCamelCase : Optional[int] = {}
_lowerCamelCase : int = 'gelu'
def __init__( self : Dict , UpperCAmelCase : str , UpperCAmelCase : Any=13 , UpperCAmelCase : List[str]=7 , UpperCAmelCase : Union[str, Any]=True , UpperCAmelCase : Any=False , UpperCAmelCase : Tuple=99 , UpperCAmelCase : Tuple=32 , UpperCAmelCase : Dict=2 , UpperCAmelCase : Any=4 , UpperCAmelCase : List[str]=37 , UpperCAmelCase : Optional[int]=0.1 , UpperCAmelCase : Dict=0.1 , UpperCAmelCase : List[str]=20 , UpperCAmelCase : Optional[int]=2 , UpperCAmelCase : Optional[Any]=1 , UpperCAmelCase : Optional[Any]=0 , UpperCAmelCase : Tuple=4 , ):
A_ = parent
A_ = batch_size
A_ = seq_length
A_ = is_training
A_ = use_labels
A_ = vocab_size
A_ = hidden_size
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = intermediate_size
A_ = hidden_dropout_prob
A_ = attention_probs_dropout_prob
A_ = max_position_embeddings
A_ = eos_token_id
A_ = pad_token_id
A_ = bos_token_id
A_ = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
A_ = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
A_ = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def __A ( self : Tuple ):
A_ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
A_ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
A_ = tf.concat([input_ids, eos_tensor] , axis=1 )
A_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A_ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
A_ = prepare_led_inputs_dict(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
A_ = tf.concat(
[tf.zeros_like(UpperCAmelCase )[:, :-1], tf.ones_like(UpperCAmelCase )[:, -1:]] , axis=-1 , )
A_ = global_attention_mask
return config, inputs_dict
def __A ( self : int , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : int ):
A_ = TFLEDModel(config=UpperCAmelCase ).get_decoder()
A_ = inputs_dict["input_ids"]
A_ = input_ids[:1, :]
A_ = inputs_dict["attention_mask"][:1, :]
A_ = 1
# first forward pass
A_ = model(UpperCAmelCase , attention_mask=UpperCAmelCase , use_cache=UpperCAmelCase )
A_ , A_ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
A_ = ids_tensor((self.batch_size, 3) , config.vocab_size )
A_ = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
A_ = tf.concat([input_ids, next_tokens] , axis=-1 )
A_ = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
A_ = model(UpperCAmelCase , attention_mask=UpperCAmelCase )[0]
A_ = model(UpperCAmelCase , attention_mask=UpperCAmelCase , past_key_values=UpperCAmelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
A_ = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
A_ = output_from_no_past[:, -3:, random_slice_idx]
A_ = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(UpperCAmelCase , UpperCAmelCase , rtol=1E-3 )
def __snake_case ( __UpperCamelCase : Any ,__UpperCamelCase : Dict ,__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : List[Any]=None ,__UpperCamelCase : List[Any]=None ,__UpperCamelCase : Optional[Any]=None ,__UpperCamelCase : int=None ,):
"""simple docstring"""
if attention_mask is None:
A_ = tf.cast(tf.math.not_equal(__UpperCamelCase ,config.pad_token_id ) ,tf.inta )
if decoder_attention_mask is None:
A_ = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape ,dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] ,config.pad_token_id ) ,tf.inta ),
] ,axis=-1 ,)
if head_mask is None:
A_ = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
A_ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class _a ( snake_case_ , snake_case_ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase : Tuple = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
_lowerCamelCase : Union[str, Any] = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
_lowerCamelCase : int = (
{
'conversational': TFLEDForConditionalGeneration,
'feature-extraction': TFLEDModel,
'summarization': TFLEDForConditionalGeneration,
'text2text-generation': TFLEDForConditionalGeneration,
'translation': TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
_lowerCamelCase : List[Any] = True
_lowerCamelCase : str = False
_lowerCamelCase : Optional[Any] = False
_lowerCamelCase : Union[str, Any] = False
def __A ( self : List[Any] ):
A_ = TFLEDModelTester(self )
A_ = ConfigTester(self , config_class=UpperCAmelCase )
def __A ( self : int ):
self.config_tester.run_common_tests()
def __A ( self : Tuple ):
A_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*UpperCAmelCase )
def __A ( self : Dict ):
A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common()
A_ = tf.zeros_like(inputs_dict["attention_mask"] )
A_ = 2
A_ = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict["global_attention_mask"] , )
A_ = True
A_ = self.model_tester.seq_length
A_ = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(UpperCAmelCase : Optional[int] ):
A_ = outputs.decoder_attentions
self.assertEqual(len(UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(UpperCAmelCase : str ):
A_ = [t.numpy() for t in outputs.encoder_attentions]
A_ = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
A_ = True
A_ = False
A_ = False
A_ = model_class(UpperCAmelCase )
A_ = model(self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
A_ = len(UpperCAmelCase )
self.assertEqual(config.output_hidden_states , UpperCAmelCase )
check_encoder_attentions_output(UpperCAmelCase )
if self.is_encoder_decoder:
A_ = model_class(UpperCAmelCase )
A_ = model(self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
self.assertEqual(config.output_hidden_states , UpperCAmelCase )
check_decoder_attentions_output(UpperCAmelCase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
A_ = True
A_ = model_class(UpperCAmelCase )
A_ = model(self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
self.assertEqual(config.output_hidden_states , UpperCAmelCase )
check_encoder_attentions_output(UpperCAmelCase )
# Check attention is always last and order is fine
A_ = True
A_ = True
A_ = model_class(UpperCAmelCase )
A_ = model(self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(UpperCAmelCase ) )
self.assertEqual(model.config.output_hidden_states , UpperCAmelCase )
check_encoder_attentions_output(UpperCAmelCase )
@unittest.skip("LED keeps using potentially symbolic tensors in conditionals and breaks tracing." )
def __A ( self : List[Any] ):
pass
def __A ( self : Optional[int] ):
# TODO: Head-masking not yet implement
pass
def __snake_case ( __UpperCamelCase : List[Any] ):
"""simple docstring"""
return tf.constant(__UpperCamelCase ,dtype=tf.intaa )
__a :Any = 1e-4
@slow
@require_tf
class _a ( unittest.TestCase ):
"""simple docstring"""
def __A ( self : Optional[Any] ):
A_ = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" ).led
# change to intended input here
A_ = _long_tensor([512 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
A_ = _long_tensor([128 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
A_ = prepare_led_inputs_dict(model.config , UpperCAmelCase , UpperCAmelCase )
A_ = model(**UpperCAmelCase )[0]
A_ = (1, 1024, 768)
self.assertEqual(output.shape , UpperCAmelCase )
# change to expected output here
A_ = tf.convert_to_tensor(
[[2.3_050, 2.8_279, 0.6_531], [-1.8_457, -0.1_455, -3.5_661], [-1.0_186, 0.4_586, -2.2_043]] , )
tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase , atol=1E-3 )
def __A ( self : Optional[int] ):
A_ = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" )
# change to intended input here
A_ = _long_tensor([512 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
A_ = _long_tensor([128 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
A_ = prepare_led_inputs_dict(model.config , UpperCAmelCase , UpperCAmelCase )
A_ = model(**UpperCAmelCase )[0]
A_ = (1, 1024, model.config.vocab_size)
self.assertEqual(output.shape , UpperCAmelCase )
# change to expected output here
A_ = tf.convert_to_tensor(
[[33.6_507, 6.4_572, 16.8_089], [5.8_739, -2.4_238, 11.2_902], [-3.2_139, -4.3_149, 4.2_783]] , )
tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase , atol=1E-3 , rtol=1E-3 ) | 312 |
from __future__ import annotations
def __snake_case ( __UpperCamelCase : list[list[int]] ):
"""simple docstring"""
for i in range(1 ,len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1 ,len(__UpperCamelCase ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1 ,len(__UpperCamelCase ) ):
for j in range(1 ,len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j] ,matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod() | 312 | 1 |
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
lowerCamelCase_ = logging.getLogger()
lowerCamelCase_ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class __A( __lowerCamelCase ):
"""simple docstring"""
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ):
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = {"""source""": """What is love ?""", """target""": """life"""}
UpperCamelCase__ = {"""train""": 12, """val""": 2, """test""": 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
UpperCamelCase__ = """\n""".join([contents[field]] * n_lines[split] )
with open(os.path.join(SCREAMING_SNAKE_CASE_ , F"{split}.{field}" ) , """w""" ) as f:
f.write(SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = "pytorch" ):
UpperCamelCase__ = self.get_auto_remove_tmp_dir()
UpperCamelCase__ = os.path.join(SCREAMING_SNAKE_CASE_ , """output""" )
UpperCamelCase__ = os.path.join(SCREAMING_SNAKE_CASE_ , """data""" )
self._create_dummy_data(data_dir=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = F"\n --data_dir {data_dir} \\n --output_dir {output_dir} \\n --model_name_or_path facebook/rag-sequence-base \\n --model_type rag_sequence \\n --do_train \\n --do_predict \\n --n_val -1 \\n --val_check_interval 1.0 \\n --train_batch_size 2 \\n --eval_batch_size 1 \\n --max_source_length 25 \\n --max_target_length 25 \\n --val_max_target_length 25 \\n --test_max_target_length 25 \\n --label_smoothing 0.1 \\n --dropout 0.1 \\n --attention_dropout 0.1 \\n --weight_decay 0.001 \\n --adam_epsilon 1e-08 \\n --max_grad_norm 0.1 \\n --lr_scheduler polynomial \\n --learning_rate 3e-04 \\n --num_train_epochs 1 \\n --warmup_steps 4 \\n --gradient_accumulation_steps 1 \\n --distributed-port 8787 \\n --use_dummy_dataset 1 \\n --distributed_retriever {distributed_retriever} \\n ".split()
if gpus > 0:
testargs.append(F"--gpus={gpus}" )
if is_apex_available():
testargs.append("""--fp16""" )
else:
testargs.append("""--gpus=0""" )
testargs.append("""--distributed_backend=ddp_cpu""" )
testargs.append("""--num_processes=2""" )
UpperCamelCase__ = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(SCREAMING_SNAKE_CASE_ , env=self.get_env() )
UpperCamelCase__ = os.path.join(SCREAMING_SNAKE_CASE_ , """metrics.json""" )
with open(SCREAMING_SNAKE_CASE_ ) as f:
UpperCamelCase__ = json.load(SCREAMING_SNAKE_CASE_ )
return result
@require_torch_gpu
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_multi_gpu
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_gpu
@require_ray
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self._run_finetune(gpus=1 , distributed_retriever="""ray""" )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_multi_gpu
@require_ray
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self._run_finetune(gpus=1 , distributed_retriever="""ray""" )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
| 354 |
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
def __magic_name__ ( __a : Optional[int] ):
'''simple docstring'''
UpperCamelCase__ = R"""\w+[.]\d+"""
UpperCamelCase__ = re.findall(__a , __a )
for pat in pats:
UpperCamelCase__ = key.replace(__a , """_""".join(pat.split(""".""" ) ) )
return key
def __magic_name__ ( __a : str , __a : Dict , __a : int ):
'''simple docstring'''
UpperCamelCase__ = pt_tuple_key[:-1] + ("""scale""",)
if (
any("""norm""" in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
UpperCamelCase__ = pt_tuple_key[:-1] + ("""scale""",)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
UpperCamelCase__ = pt_tuple_key[:-1] + ("""scale""",)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
UpperCamelCase__ = pt_tuple_key[:-1] + ("""embedding""",)
return renamed_pt_tuple_key, pt_tensor
# conv layer
UpperCamelCase__ = pt_tuple_key[:-1] + ("""kernel""",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
UpperCamelCase__ = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
UpperCamelCase__ = pt_tuple_key[:-1] + ("""kernel""",)
if pt_tuple_key[-1] == "weight":
UpperCamelCase__ = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
UpperCamelCase__ = pt_tuple_key[:-1] + ("""weight""",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
UpperCamelCase__ = pt_tuple_key[:-1] + ("""bias""",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def __magic_name__ ( __a : List[Any] , __a : List[Any] , __a : Optional[int]=42 ):
'''simple docstring'''
UpperCamelCase__ = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
UpperCamelCase__ = flax_model.init_weights(PRNGKey(__a ) )
UpperCamelCase__ = flatten_dict(__a )
UpperCamelCase__ = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
UpperCamelCase__ = rename_key(__a )
UpperCamelCase__ = tuple(renamed_pt_key.split(""".""" ) )
# Correctly rename weight parameters
UpperCamelCase__ , UpperCamelCase__ = rename_key_and_reshape_tensor(__a , __a , __a )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f"PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape "
f"{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}." )
# also add unexpected weight so that warning is thrown
UpperCamelCase__ = jnp.asarray(__a )
return unflatten_dict(__a )
| 178 | 0 |
"""simple docstring"""
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class __A :
'''simple docstring'''
def __init__( self : Any ,_snake_case : str ,_snake_case : List[Any]=2 ,_snake_case : List[Any]=8 ,_snake_case : List[Any]=True ,_snake_case : Optional[int]=True ,_snake_case : str=True ,_snake_case : int=True ,_snake_case : str=99 ,_snake_case : Tuple=16 ,_snake_case : List[str]=5 ,_snake_case : Any=2 ,_snake_case : Dict=36 ,_snake_case : Optional[int]="gelu" ,_snake_case : Any=0.0 ,_snake_case : int=0.0 ,_snake_case : Union[str, Any]=512 ,_snake_case : Optional[Any]=16 ,_snake_case : Optional[Any]=2 ,_snake_case : Optional[Any]=0.02 ,_snake_case : Tuple=3 ,_snake_case : List[str]=4 ,_snake_case : Tuple=None ,) -> int:
"""simple docstring"""
lowercase__ : List[Any] = parent
lowercase__ : List[str] = batch_size
lowercase__ : int = seq_length
lowercase__ : Dict = is_training
lowercase__ : List[Any] = use_input_mask
lowercase__ : Tuple = use_token_type_ids
lowercase__ : List[str] = use_labels
lowercase__ : str = vocab_size
lowercase__ : Optional[int] = hidden_size
lowercase__ : List[Any] = num_hidden_layers
lowercase__ : str = num_attention_heads
lowercase__ : Dict = intermediate_size
lowercase__ : int = hidden_act
lowercase__ : int = hidden_dropout_prob
lowercase__ : Optional[Any] = attention_probs_dropout_prob
lowercase__ : Any = max_position_embeddings
lowercase__ : Optional[Any] = type_vocab_size
lowercase__ : Union[str, Any] = type_sequence_label_size
lowercase__ : int = initializer_range
lowercase__ : Any = num_labels
lowercase__ : List[Any] = num_choices
lowercase__ : Optional[int] = scope
def UpperCAmelCase ( self : str ) -> str:
"""simple docstring"""
lowercase__ : Tuple = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
lowercase__ : Optional[Any] = None
if self.use_input_mask:
lowercase__ : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ : Optional[int] = None
if self.use_token_type_ids:
lowercase__ : Dict = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
lowercase__ : List[Any] = None
lowercase__ : Tuple = None
lowercase__ : List[Any] = None
if self.use_labels:
lowercase__ : Dict = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
lowercase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
lowercase__ : int = ids_tensor([self.batch_size] ,self.num_choices )
lowercase__ : Union[str, Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
return MraConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=_snake_case ,initializer_range=self.initializer_range ,)
def UpperCAmelCase ( self : List[str] ) -> str:
"""simple docstring"""
lowercase__ : Union[str, Any] = self.get_config()
lowercase__ : int = 300
return config
def UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
) : str = self.prepare_config_and_inputs()
lowercase__ : Optional[int] = True
lowercase__ : str = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowercase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def UpperCAmelCase ( self : str ,_snake_case : str ,_snake_case : int ,_snake_case : Optional[Any] ,_snake_case : List[Any] ,_snake_case : Tuple ,_snake_case : Tuple ,_snake_case : Dict ) -> Tuple:
"""simple docstring"""
lowercase__ : List[Any] = MraModel(config=_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ : List[Any] = model(_snake_case ,attention_mask=_snake_case ,token_type_ids=_snake_case )
lowercase__ : Optional[Any] = model(_snake_case ,token_type_ids=_snake_case )
lowercase__ : Union[str, Any] = model(_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self : int ,_snake_case : Optional[Any] ,_snake_case : Optional[Any] ,_snake_case : List[str] ,_snake_case : Any ,_snake_case : Tuple ,_snake_case : int ,_snake_case : Optional[Any] ,_snake_case : Dict ,_snake_case : Tuple ,) -> Tuple:
"""simple docstring"""
lowercase__ : Any = True
lowercase__ : Optional[Any] = MraModel(_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ : Tuple = model(
_snake_case ,attention_mask=_snake_case ,token_type_ids=_snake_case ,encoder_hidden_states=_snake_case ,encoder_attention_mask=_snake_case ,)
lowercase__ : int = model(
_snake_case ,attention_mask=_snake_case ,token_type_ids=_snake_case ,encoder_hidden_states=_snake_case ,)
lowercase__ : Optional[Any] = model(_snake_case ,attention_mask=_snake_case ,token_type_ids=_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : List[str] ,_snake_case : str ,_snake_case : int ,_snake_case : Optional[int] ,_snake_case : Tuple ,_snake_case : Tuple ,_snake_case : str ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : int = MraForMaskedLM(config=_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ : Optional[Any] = model(_snake_case ,attention_mask=_snake_case ,token_type_ids=_snake_case ,labels=_snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : Dict ,_snake_case : Any ,_snake_case : List[Any] ,_snake_case : int ,_snake_case : Union[str, Any] ,_snake_case : Union[str, Any] ,_snake_case : List[str] ) -> Any:
"""simple docstring"""
lowercase__ : List[str] = MraForQuestionAnswering(config=_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ : Union[str, Any] = model(
_snake_case ,attention_mask=_snake_case ,token_type_ids=_snake_case ,start_positions=_snake_case ,end_positions=_snake_case ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def UpperCAmelCase ( self : str ,_snake_case : List[Any] ,_snake_case : Dict ,_snake_case : str ,_snake_case : int ,_snake_case : List[str] ,_snake_case : Tuple ,_snake_case : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : Tuple = self.num_labels
lowercase__ : Tuple = MraForSequenceClassification(_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ : Optional[Any] = model(_snake_case ,attention_mask=_snake_case ,token_type_ids=_snake_case ,labels=_snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def UpperCAmelCase ( self : Optional[int] ,_snake_case : Tuple ,_snake_case : Dict ,_snake_case : Any ,_snake_case : Optional[Any] ,_snake_case : Union[str, Any] ,_snake_case : List[Any] ,_snake_case : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : List[Any] = self.num_labels
lowercase__ : int = MraForTokenClassification(config=_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ : int = model(_snake_case ,attention_mask=_snake_case ,token_type_ids=_snake_case ,labels=_snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase ( self : Optional[int] ,_snake_case : Union[str, Any] ,_snake_case : Optional[int] ,_snake_case : Union[str, Any] ,_snake_case : Any ,_snake_case : str ,_snake_case : Optional[int] ,_snake_case : Optional[int] ) -> Dict:
"""simple docstring"""
lowercase__ : Optional[Any] = self.num_choices
lowercase__ : str = MraForMultipleChoice(config=_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ : List[Any] = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
lowercase__ : Tuple = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
lowercase__ : Union[str, Any] = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
lowercase__ : Optional[Any] = model(
_snake_case ,attention_mask=_snake_case ,token_type_ids=_snake_case ,labels=_snake_case ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def UpperCAmelCase ( self : List[Any] ) -> str:
"""simple docstring"""
lowercase__ : Tuple = self.prepare_config_and_inputs()
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
) : Dict = config_and_inputs
lowercase__ : str = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __A ( A_ ,unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCAmelCase : int = False
lowerCAmelCase : int = False
lowerCAmelCase : List[Any] = False
lowerCAmelCase : Union[str, Any] = False
lowerCAmelCase : Dict = ()
def UpperCAmelCase ( self : int ) -> List[str]:
"""simple docstring"""
lowercase__ : Dict = MraModelTester(self )
lowercase__ : Optional[int] = ConfigTester(self ,config_class=_snake_case ,hidden_size=37 )
def UpperCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase ( self : int ) -> Tuple:
"""simple docstring"""
lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def UpperCAmelCase ( self : Any ) -> Dict:
"""simple docstring"""
lowercase__ : int = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowercase__ : Any = type
self.model_tester.create_and_check_model(*_snake_case )
def UpperCAmelCase ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
lowercase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_snake_case )
def UpperCAmelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_snake_case )
def UpperCAmelCase ( self : int ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_snake_case )
def UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_snake_case )
def UpperCAmelCase ( self : str ) -> str:
"""simple docstring"""
lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_snake_case )
@slow
def UpperCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : str = MraModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
@unittest.skip(reason='''MRA does not output attentions''' )
def UpperCAmelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
return
@require_torch
class __A ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCAmelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
lowercase__ : List[str] = MraModel.from_pretrained('''uw-madison/mra-base-512-4''' )
lowercase__ : int = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
lowercase__ : int = model(_snake_case )[0]
lowercase__ : Optional[Any] = torch.Size((1, 256, 768) )
self.assertEqual(output.shape ,_snake_case )
lowercase__ : str = torch.tensor(
[[[-0.0140, 0.0830, -0.0381], [0.1546, 0.1402, 0.0220], [0.1162, 0.0851, 0.0165]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] ,_snake_case ,atol=1e-4 ) )
@slow
def UpperCAmelCase ( self : str ) -> List[str]:
"""simple docstring"""
lowercase__ : Tuple = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-512-4''' )
lowercase__ : Any = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
lowercase__ : int = model(_snake_case )[0]
lowercase__ : Dict = 50_265
lowercase__ : List[str] = torch.Size((1, 256, vocab_size) )
self.assertEqual(output.shape ,_snake_case )
lowercase__ : Union[str, Any] = torch.tensor(
[[[9.2595, -3.6038, 11.8819], [9.3869, -3.2693, 11.0956], [11.8524, -3.4938, 13.1210]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] ,_snake_case ,atol=1e-4 ) )
@slow
def UpperCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
lowercase__ : int = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-4096-8-d3''' )
lowercase__ : List[str] = torch.arange(4_096 ).unsqueeze(0 )
with torch.no_grad():
lowercase__ : List[str] = model(_snake_case )[0]
lowercase__ : Any = 50_265
lowercase__ : List[str] = torch.Size((1, 4_096, vocab_size) )
self.assertEqual(output.shape ,_snake_case )
lowercase__ : Union[str, Any] = torch.tensor(
[[[5.4789, -2.3564, 7.5064], [7.9067, -1.3369, 9.9668], [9.0712, -1.8106, 7.0380]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] ,_snake_case ,atol=1e-4 ) )
| 16 |
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase = logging.get_logger(__name__)
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = RobertaPreLayerNormConfig.from_pretrained(
SCREAMING_SNAKE_CASE , architectures=['''RobertaPreLayerNormForMaskedLM'''] )
# convert state_dict
lowercase__ = torch.load(hf_hub_download(repo_id=SCREAMING_SNAKE_CASE , filename='''pytorch_model.bin''' ) )
lowercase__ = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith('''roberta.''' ):
lowercase__ = '''roberta_prelayernorm.''' + tensor_key[len('''roberta.''' ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith('''.self.LayerNorm.weight''' ) or tensor_key.endswith('''.self.LayerNorm.bias''' ):
continue
lowercase__ = tensor_value
lowercase__ = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=SCREAMING_SNAKE_CASE , config=SCREAMING_SNAKE_CASE , state_dict=SCREAMING_SNAKE_CASE )
model.save_pretrained(SCREAMING_SNAKE_CASE )
# convert tokenizer
lowercase__ = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE )
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint-repo',
default=None,
type=str,
required=True,
help='Path the official PyTorch dump, e.g. \'andreasmadsen/efficient_mlm_m0.40\'.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowerCAmelCase = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
| 110 | 0 |
'''simple docstring'''
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class _UpperCAmelCase :
"""simple docstring"""
@staticmethod
def lowerCAmelCase ( *__UpperCAmelCase : List[str] , **__UpperCAmelCase : Tuple ):
'''simple docstring'''
pass
def __lowercase ( __lowercase ) -> Optional[Any]:
'''simple docstring'''
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
lowerCamelCase_ = (
'''https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png'''
)
@is_pipeline_test
@require_torch
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
snake_case = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def lowerCAmelCase ( self : int , __UpperCAmelCase : List[str] , __UpperCAmelCase : str , __UpperCAmelCase : Dict ):
'''simple docstring'''
_A = pipeline(
"document-question-answering" , model=__UpperCAmelCase , tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
_A = INVOICE_URL
_A = list(zip(*apply_tesseract(load_image(__UpperCAmelCase ) , __UpperCAmelCase , "" ) ) )
_A = "What is the placebo?"
_A = [
{
"image": load_image(__UpperCAmelCase ),
"question": question,
},
{
"image": image,
"question": question,
},
{
"image": image,
"question": question,
"word_boxes": word_boxes,
},
]
return dqa_pipeline, examples
def lowerCAmelCase ( self : Tuple , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Dict ):
'''simple docstring'''
_A = dqa_pipeline(__UpperCAmelCase , top_k=2 )
self.assertEqual(
__UpperCAmelCase , [
[
{"score": ANY(__UpperCAmelCase ), "answer": ANY(__UpperCAmelCase ), "start": ANY(__UpperCAmelCase ), "end": ANY(__UpperCAmelCase )},
{"score": ANY(__UpperCAmelCase ), "answer": ANY(__UpperCAmelCase ), "start": ANY(__UpperCAmelCase ), "end": ANY(__UpperCAmelCase )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
_A = pipeline("document-question-answering" , model="hf-internal-testing/tiny-random-layoutlmv2" )
_A = INVOICE_URL
_A = "How many cats are there?"
_A = [
{"score": 0.0001, "answer": "oy 2312/2019", "start": 38, "end": 39},
{"score": 0.0001, "answer": "oy 2312/2019 DUE", "start": 38, "end": 40},
]
_A = dqa_pipeline(image=__UpperCAmelCase , question=__UpperCAmelCase , top_k=2 )
self.assertEqual(nested_simplify(__UpperCAmelCase , decimals=4 ) , __UpperCAmelCase )
_A = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(nested_simplify(__UpperCAmelCase , decimals=4 ) , __UpperCAmelCase )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
_A = "./tests/fixtures/tests_samples/COCO/000000039769.png"
_A = dqa_pipeline(image=__UpperCAmelCase , question=__UpperCAmelCase , top_k=2 )
self.assertEqual(__UpperCAmelCase , [] )
# We can optionnally pass directly the words and bounding boxes
_A = "./tests/fixtures/tests_samples/COCO/000000039769.png"
_A = []
_A = []
_A = dqa_pipeline(image=__UpperCAmelCase , question=__UpperCAmelCase , words=__UpperCAmelCase , boxes=__UpperCAmelCase , top_k=2 )
self.assertEqual(__UpperCAmelCase , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
_A = pipeline(
"document-question-answering" , model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa" , revision="9977165" , )
_A = INVOICE_URL
_A = "What is the invoice number?"
_A = dqa_pipeline(image=__UpperCAmelCase , question=__UpperCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
{"score": 0.9944, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0009, "answer": "us-001", "start": 16, "end": 16},
] , )
_A = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
{"score": 0.9944, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0009, "answer": "us-001", "start": 16, "end": 16},
] , )
_A = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
[
{"score": 0.9944, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0009, "answer": "us-001", "start": 16, "end": 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
_A = pipeline(
"document-question-answering" , model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa" , revision="9977165" , max_seq_len=50 , )
_A = INVOICE_URL
_A = "What is the invoice number?"
_A = dqa_pipeline(image=__UpperCAmelCase , question=__UpperCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
{"score": 0.9974, "answer": "1110212019", "start": 23, "end": 23},
{"score": 0.9948, "answer": "us-001", "start": 16, "end": 16},
] , )
_A = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
{"score": 0.9974, "answer": "1110212019", "start": 23, "end": 23},
{"score": 0.9948, "answer": "us-001", "start": 16, "end": 16},
] , )
_A = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
[
{"score": 0.9974, "answer": "1110212019", "start": 23, "end": 23},
{"score": 0.9948, "answer": "us-001", "start": 16, "end": 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
_A = AutoTokenizer.from_pretrained(
"impira/layoutlm-document-qa" , revision="3dc6de3" , add_prefix_space=__UpperCAmelCase )
_A = pipeline(
"document-question-answering" , model="impira/layoutlm-document-qa" , tokenizer=__UpperCAmelCase , revision="3dc6de3" , )
_A = INVOICE_URL
_A = "What is the invoice number?"
_A = dqa_pipeline(image=__UpperCAmelCase , question=__UpperCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
{"score": 0.4251, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0819, "answer": "1110212019", "start": 23, "end": 23},
] , )
_A = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
{"score": 0.4251, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0819, "answer": "1110212019", "start": 23, "end": 23},
] , )
_A = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
[
{"score": 0.4251, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0819, "answer": "1110212019", "start": 23, "end": 23},
]
]
* 2 , )
_A = list(zip(*apply_tesseract(load_image(__UpperCAmelCase ) , __UpperCAmelCase , "" ) ) )
# This model should also work if `image` is set to None
_A = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
{"score": 0.4251, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0819, "answer": "1110212019", "start": 23, "end": 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
_A = AutoTokenizer.from_pretrained(
"impira/layoutlm-document-qa" , revision="3dc6de3" , add_prefix_space=__UpperCAmelCase )
_A = pipeline(
"document-question-answering" , model="impira/layoutlm-document-qa" , tokenizer=__UpperCAmelCase , revision="3dc6de3" , max_seq_len=50 , )
_A = INVOICE_URL
_A = "What is the invoice number?"
_A = dqa_pipeline(image=__UpperCAmelCase , question=__UpperCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
{"score": 0.9999, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.9998, "answer": "us-001", "start": 16, "end": 16},
] , )
_A = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
[
{"score": 0.9999, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.9998, "answer": "us-001", "start": 16, "end": 16},
]
]
* 2 , )
_A = list(zip(*apply_tesseract(load_image(__UpperCAmelCase ) , __UpperCAmelCase , "" ) ) )
# This model should also work if `image` is set to None
_A = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
{"score": 0.9999, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.9998, "answer": "us-001", "start": 16, "end": 16},
] , )
@slow
@require_torch
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
_A = pipeline(
"document-question-answering" , model="naver-clova-ix/donut-base-finetuned-docvqa" , tokenizer=AutoTokenizer.from_pretrained("naver-clova-ix/donut-base-finetuned-docvqa" ) , feature_extractor="naver-clova-ix/donut-base-finetuned-docvqa" , )
_A = INVOICE_URL
_A = "What is the invoice number?"
_A = dqa_pipeline(image=__UpperCAmelCase , question=__UpperCAmelCase , top_k=2 )
self.assertEqual(nested_simplify(__UpperCAmelCase , decimals=4 ) , [{"answer": "us-001"}] )
@require_tf
@unittest.skip("Document question answering not implemented in TF" )
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
pass
| 354 |
'''simple docstring'''
from __future__ import annotations
from math import pow, sqrt
def __lowercase ( __lowercase , __lowercase , __lowercase ) -> dict[str, float]:
'''simple docstring'''
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if resistance == 0:
return {"resistance": sqrt(pow(__lowercase , 2 ) - pow(__lowercase , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(__lowercase , 2 ) - pow(__lowercase , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(__lowercase , 2 ) + pow(__lowercase , 2 ) )}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 174 | 0 |
"""simple docstring"""
def _lowercase ( __snake_case ,__snake_case ,__snake_case ) -> int:
if exponent == 1:
return base
if exponent % 2 == 0:
__lowerCAmelCase : int = _modexpt(__snake_case ,exponent // 2 ,__snake_case ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(__snake_case ,exponent - 1 ,__snake_case )) % modulo_value
def _lowercase ( __snake_case = 1_777 ,__snake_case = 1_855 ,__snake_case = 8 ) -> int:
__lowerCAmelCase : Dict = base
for _ in range(1 ,__snake_case ):
__lowerCAmelCase : Optional[int] = _modexpt(__snake_case ,__snake_case ,10**digits )
return result
if __name__ == "__main__":
print(F"""{solution() = }""") | 269 |
"""simple docstring"""
import re
def _lowercase ( __snake_case ) -> str:
if len(re.findall("[ATCG]" ,__snake_case ) ) != len(__snake_case ):
raise ValueError("Invalid Strand" )
return dna.translate(dna.maketrans("ATCG" ,"TAGC" ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 269 | 1 |
def __snake_case ( __UpperCamelCase : int ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ):
raise ValueError("Input must be an integer" )
if input_num <= 0:
raise ValueError("Input must be positive" )
return sum(
divisor for divisor in range(1 ,input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 329 |
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def __snake_case ( __UpperCamelCase : List[Any] ):
"""simple docstring"""
if (
(cp >= 0X4_E_0_0 and cp <= 0X9_F_F_F)
or (cp >= 0X3_4_0_0 and cp <= 0X4_D_B_F) #
or (cp >= 0X2_0_0_0_0 and cp <= 0X2_A_6_D_F) #
or (cp >= 0X2_A_7_0_0 and cp <= 0X2_B_7_3_F) #
or (cp >= 0X2_B_7_4_0 and cp <= 0X2_B_8_1_F) #
or (cp >= 0X2_B_8_2_0 and cp <= 0X2_C_E_A_F) #
or (cp >= 0XF_9_0_0 and cp <= 0XF_A_F_F)
or (cp >= 0X2_F_8_0_0 and cp <= 0X2_F_A_1_F) #
): #
return True
return False
def __snake_case ( __UpperCamelCase : str ):
"""simple docstring"""
for char in word:
A_ = ord(__UpperCamelCase )
if not _is_chinese_char(__UpperCamelCase ):
return 0
return 1
def __snake_case ( __UpperCamelCase : List[str] ):
"""simple docstring"""
A_ = set()
for token in tokens:
A_ = len(__UpperCamelCase ) > 1 and is_chinese(__UpperCamelCase )
if chinese_word:
word_set.add(__UpperCamelCase )
A_ = list(__UpperCamelCase )
return word_list
def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : set() ):
"""simple docstring"""
if not chinese_word_set:
return bert_tokens
A_ = max([len(__UpperCamelCase ) for w in chinese_word_set] )
A_ = bert_tokens
A_ , A_ = 0, len(__UpperCamelCase )
while start < end:
A_ = True
if is_chinese(bert_word[start] ):
A_ = min(end - start ,__UpperCamelCase )
for i in range(__UpperCamelCase ,1 ,-1 ):
A_ = "".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 ,start + i ):
A_ = "##" + bert_word[j]
A_ = start + i
A_ = False
break
if single_word:
start += 1
return bert_word
def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : LTP ,__UpperCamelCase : BertTokenizer ):
"""simple docstring"""
A_ = []
for i in range(0 ,len(__UpperCamelCase ) ,100 ):
A_ = ltp_tokenizer.seg(lines[i : i + 100] )[0]
A_ = [get_chinese_word(__UpperCamelCase ) for r in res]
ltp_res.extend(__UpperCamelCase )
assert len(__UpperCamelCase ) == len(__UpperCamelCase )
A_ = []
for i in range(0 ,len(__UpperCamelCase ) ,100 ):
A_ = bert_tokenizer(lines[i : i + 100] ,add_special_tokens=__UpperCamelCase ,truncation=__UpperCamelCase ,max_length=512 )
bert_res.extend(res["input_ids"] )
assert len(__UpperCamelCase ) == len(__UpperCamelCase )
A_ = []
for input_ids, chinese_word in zip(__UpperCamelCase ,__UpperCamelCase ):
A_ = []
for id in input_ids:
A_ = bert_tokenizer._convert_id_to_token(__UpperCamelCase )
input_tokens.append(__UpperCamelCase )
A_ = add_sub_symbol(__UpperCamelCase ,__UpperCamelCase )
A_ = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(__UpperCamelCase ):
if token[:2] == "##":
A_ = token[2:]
# save chinese tokens' pos
if len(__UpperCamelCase ) == 1 and _is_chinese_char(ord(__UpperCamelCase ) ):
ref_id.append(__UpperCamelCase )
ref_ids.append(__UpperCamelCase )
assert len(__UpperCamelCase ) == len(__UpperCamelCase )
return ref_ids
def __snake_case ( __UpperCamelCase : Dict ):
"""simple docstring"""
with open(args.file_name ,"r" ,encoding="utf-8" ) as f:
A_ = f.readlines()
A_ = [line.strip() for line in data if len(__UpperCamelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
A_ = LTP(args.ltp ) # faster in GPU device
A_ = BertTokenizer.from_pretrained(args.bert )
A_ = prepare_ref(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
with open(args.save_path ,"w" ,encoding="utf-8" ) as f:
A_ = [json.dumps(__UpperCamelCase ) + "\n" for ref in ref_ids]
f.writelines(__UpperCamelCase )
if __name__ == "__main__":
__a :List[Any] = argparse.ArgumentParser(description='prepare_chinese_ref')
parser.add_argument(
'--file_name',
type=str,
default='./resources/chinese-demo.txt',
help='file need process, same as training data in lm',
)
parser.add_argument(
'--ltp', type=str, default='./resources/ltp', help='resources for LTP tokenizer, usually a path'
)
parser.add_argument('--bert', type=str, default='./resources/robert', help='resources for Bert tokenizer')
parser.add_argument('--save_path', type=str, default='./resources/ref.txt', help='path to save res')
__a :Dict = parser.parse_args()
main(args) | 329 | 1 |
from __future__ import annotations
def a__ ( __UpperCamelCase = 4 ):
SCREAMING_SNAKE_CASE_ = abs(_UpperCAmelCase ) or 4
return [[1 + x + y * row_size for x in range(_UpperCAmelCase )] for y in range(_UpperCAmelCase )]
def a__ ( __UpperCamelCase ):
return reverse_row(transpose(_UpperCAmelCase ) )
# OR.. transpose(reverse_column(matrix))
def a__ ( __UpperCamelCase ):
return reverse_row(reverse_column(_UpperCAmelCase ) )
# OR.. reverse_column(reverse_row(matrix))
def a__ ( __UpperCamelCase ):
return reverse_column(transpose(_UpperCAmelCase ) )
# OR.. transpose(reverse_row(matrix))
def a__ ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = [list(_UpperCAmelCase ) for x in zip(*_UpperCAmelCase )]
return matrix
def a__ ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = matrix[::-1]
return matrix
def a__ ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = [x[::-1] for x in matrix]
return matrix
def a__ ( __UpperCamelCase ):
for i in matrix:
print(*_UpperCAmelCase )
if __name__ == "__main__":
A : List[Any] = make_matrix()
print("\norigin:\n")
print_matrix(matrix)
print("\nrotate 90 counterclockwise:\n")
print_matrix(rotate_aa(matrix))
A : List[Any] = make_matrix()
print("\norigin:\n")
print_matrix(matrix)
print("\nrotate 180:\n")
print_matrix(rotate_aaa(matrix))
A : Union[str, Any] = make_matrix()
print("\norigin:\n")
print_matrix(matrix)
print("\nrotate 270 counterclockwise:\n")
print_matrix(rotate_aaa(matrix))
| 118 |
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
_UpperCamelCase: List[Any] = logging.getLogger(__name__)
@dataclass
class a__ :
_lowerCamelCase = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__, metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__, metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__, metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'}, )
_lowerCamelCase = field(default=SCREAMING_SNAKE_CASE__, metadata={'help': 'Whether tp freeze the encoder.'} )
_lowerCamelCase = field(default=SCREAMING_SNAKE_CASE__, metadata={'help': 'Whether to freeze the embeddings.'} )
@dataclass
class a__ :
_lowerCamelCase = field(
metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'} )
_lowerCamelCase = field(
default='summarization', metadata={'help': 'Task name, summarization (or summarization_{dataset} for pegasus) or translation'}, )
_lowerCamelCase = field(
default=1_024, metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
}, )
_lowerCamelCase = field(
default=128, metadata={
'help': (
'The maximum total sequence length for target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
}, )
_lowerCamelCase = field(
default=142, metadata={
'help': (
'The maximum total sequence length for validation target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded. '
'This argument is also used to override the ``max_length`` param of ``model.generate``, which is used '
'during ``evaluate`` and ``predict``.'
)
}, )
_lowerCamelCase = field(
default=142, metadata={
'help': (
'The maximum total sequence length for test target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
}, )
_lowerCamelCase = field(default=-1, metadata={'help': '# training examples. -1 means use all.'} )
_lowerCamelCase = field(default=-1, metadata={'help': '# validation examples. -1 means use all.'} )
_lowerCamelCase = field(default=-1, metadata={'help': '# test examples. -1 means use all.'} )
_lowerCamelCase = field(default=SCREAMING_SNAKE_CASE__, metadata={'help': 'Source language id for translation.'} )
_lowerCamelCase = field(default=SCREAMING_SNAKE_CASE__, metadata={'help': 'Target language id for translation.'} )
_lowerCamelCase = field(default=SCREAMING_SNAKE_CASE__, metadata={'help': '# num_beams to use for evaluation.'} )
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE__, metadata={'help': 'If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'}, )
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> int:
'''simple docstring'''
logger.info(f'''***** {split} metrics *****''' )
for key in sorted(metrics.keys() ):
logger.info(f''' {key} = {metrics[key]}''' )
save_json(_UpperCAmelCase , os.path.join(_UpperCAmelCase , f'''{split}_results.json''' ) )
def lowercase__ ( ) -> Optional[int]:
'''simple docstring'''
lowercase : Tuple = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase , lowercase , lowercase : str = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase , lowercase , lowercase : Optional[Any] = parser.parse_args_into_dataclasses()
check_output_dir(_UpperCAmelCase )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('Training/evaluation parameters %s' , _UpperCAmelCase )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase : List[str] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
lowercase : int = ('encoder_layerdrop', 'decoder_layerdrop', 'dropout', 'attention_dropout')
for p in extra_model_params:
if getattr(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
assert hasattr(_UpperCAmelCase , _UpperCAmelCase ), f'''({config.__class__.__name__}) doesn\'t have a `{p}` attribute'''
setattr(_UpperCAmelCase , _UpperCAmelCase , getattr(_UpperCAmelCase , _UpperCAmelCase ) )
lowercase : str = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
lowercase : Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf='.ckpt' in model_args.model_name_or_path , config=_UpperCAmelCase , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(_UpperCAmelCase , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
lowercase : Optional[int] = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(_UpperCAmelCase , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
lowercase : Optional[Any] = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
lowercase : List[Any] = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(_UpperCAmelCase )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
lowercase : Dict = SeqaSeqDataset
# Get datasets
lowercase : int = (
dataset_class(
_UpperCAmelCase , type_path='train' , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '' , )
if training_args.do_train
else None
)
lowercase : str = (
dataset_class(
_UpperCAmelCase , type_path='val' , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '' , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
lowercase : Optional[Any] = (
dataset_class(
_UpperCAmelCase , type_path='test' , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '' , )
if training_args.do_predict
else None
)
# Initialize our Trainer
lowercase : List[Any] = (
build_compute_metrics_fn(data_args.task , _UpperCAmelCase ) if training_args.predict_with_generate else None
)
lowercase : List[Any] = SeqaSeqTrainer(
model=_UpperCAmelCase , args=_UpperCAmelCase , data_args=_UpperCAmelCase , train_dataset=_UpperCAmelCase , eval_dataset=_UpperCAmelCase , data_collator=SeqaSeqDataCollator(
_UpperCAmelCase , _UpperCAmelCase , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=_UpperCAmelCase , tokenizer=_UpperCAmelCase , )
lowercase : List[Any] = {}
# Training
if training_args.do_train:
logger.info('*** Train ***' )
lowercase : Union[str, Any] = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
lowercase : List[str] = train_result.metrics
lowercase : Dict = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics('train' , _UpperCAmelCase , training_args.output_dir )
all_metrics.update(_UpperCAmelCase )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , 'trainer_state.json' ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
lowercase : Tuple = trainer.evaluate(metric_key_prefix='val' )
lowercase : Dict = data_args.n_val
lowercase : Tuple = round(metrics['val_loss'] , 4 )
if trainer.is_world_process_zero():
handle_metrics('val' , _UpperCAmelCase , training_args.output_dir )
all_metrics.update(_UpperCAmelCase )
if training_args.do_predict:
logger.info('*** Predict ***' )
lowercase : List[Any] = trainer.predict(test_dataset=_UpperCAmelCase , metric_key_prefix='test' )
lowercase : str = test_output.metrics
lowercase : Dict = data_args.n_test
if trainer.is_world_process_zero():
lowercase : Tuple = round(metrics['test_loss'] , 4 )
handle_metrics('test' , _UpperCAmelCase , training_args.output_dir )
all_metrics.update(_UpperCAmelCase )
if training_args.predict_with_generate:
lowercase : str = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase )
lowercase : Tuple = lmap(str.strip , _UpperCAmelCase )
write_txt_file(_UpperCAmelCase , os.path.join(training_args.output_dir , 'test_generations.txt' ) )
if trainer.is_world_process_zero():
save_json(_UpperCAmelCase , os.path.join(training_args.output_dir , 'all_results.json' ) )
return all_metrics
def lowercase__ ( _UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 255 | 0 |
import os
import re
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {"vocab_file": "spiece.model"}
UpperCAmelCase__ = {
"vocab_file": {
"google/bigbird-roberta-base": "https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model",
"google/bigbird-roberta-large": (
"https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model"
),
"google/bigbird-base-trivia-itc": (
"https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model"
),
}
}
UpperCAmelCase__ = {
"google/bigbird-roberta-base": 4096,
"google/bigbird-roberta-large": 4096,
"google/bigbird-base-trivia-itc": 4096,
}
class __lowerCAmelCase ( A ):
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = ['''input_ids''', '''attention_mask''']
UpperCamelCase = []
def __init__( self : Dict , A : Tuple , A : Dict="<unk>" , A : Optional[int]="<s>" , A : Any="</s>" , A : List[str]="<pad>" , A : Union[str, Any]="[SEP]" , A : List[Any]="[MASK]" , A : List[str]="[CLS]" , A : Optional[Dict[str, Any]] = None , **A : int , ) -> None:
"""simple docstring"""
_UpperCAmelCase = AddedToken(A , lstrip=A , rstrip=A) if isinstance(A , A) else bos_token
_UpperCAmelCase = AddedToken(A , lstrip=A , rstrip=A) if isinstance(A , A) else eos_token
_UpperCAmelCase = AddedToken(A , lstrip=A , rstrip=A) if isinstance(A , A) else unk_token
_UpperCAmelCase = AddedToken(A , lstrip=A , rstrip=A) if isinstance(A , A) else pad_token
_UpperCAmelCase = AddedToken(A , lstrip=A , rstrip=A) if isinstance(A , A) else cls_token
_UpperCAmelCase = AddedToken(A , lstrip=A , rstrip=A) if isinstance(A , A) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
_UpperCAmelCase = AddedToken(A , lstrip=A , rstrip=A) if isinstance(A , A) else mask_token
_UpperCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=A , eos_token=A , unk_token=A , pad_token=A , sep_token=A , mask_token=A , cls_token=A , sp_model_kwargs=self.sp_model_kwargs , **A , )
_UpperCAmelCase = vocab_file
_UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(A)
@property
def _lowerCamelCase ( self : List[Any]) -> Union[str, Any]:
"""simple docstring"""
return self.sp_model.get_piece_size()
def _lowerCamelCase ( self : Optional[Any]) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = {self.convert_ids_to_tokens(A): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self : Tuple) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = self.__dict__.copy()
_UpperCAmelCase = None
return state
def __setstate__( self : Dict , A : Optional[Any]) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs'):
_UpperCAmelCase = {}
_UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def _lowerCamelCase ( self : str , A : str) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(A , out_type=A)
def _lowerCamelCase ( self : List[str] , A : List[str]) -> Union[str, Any]:
"""simple docstring"""
return self.sp_model.piece_to_id(A)
def _lowerCamelCase ( self : List[str] , A : List[Any]) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = self.sp_model.IdToPiece(A)
return token
def _lowerCamelCase ( self : List[str] , A : Union[str, Any]) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = []
_UpperCAmelCase = ''
_UpperCAmelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(A) + token
_UpperCAmelCase = True
_UpperCAmelCase = []
else:
current_sub_tokens.append(A)
_UpperCAmelCase = False
out_string += self.sp_model.decode(A)
return out_string.strip()
def _lowerCamelCase ( self : List[Any] , A : List[int] , A : bool = False , A : bool = None , A : bool = True , **A : Tuple , ) -> str:
"""simple docstring"""
_UpperCAmelCase = kwargs.pop('use_source_tokenizer' , A)
_UpperCAmelCase = self.convert_ids_to_tokens(A , skip_special_tokens=A)
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
_UpperCAmelCase = []
_UpperCAmelCase = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(A))
_UpperCAmelCase = []
sub_texts.append(A)
else:
current_sub_text.append(A)
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(A))
# Mimic the behavior of the Rust tokenizer:
# No space before [MASK] and [SEP]
if spaces_between_special_tokens:
_UpperCAmelCase = re.sub(R' (\[(MASK|SEP)\])' , R'\1' , ' '.join(A))
else:
_UpperCAmelCase = ''.join(A)
_UpperCAmelCase = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
_UpperCAmelCase = self.clean_up_tokenization(A)
return clean_text
else:
return text
def _lowerCamelCase ( self : Optional[Any] , A : str , A : Optional[str] = None) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(A):
logger.error(F"Vocabulary path ({save_directory}) should be a directory")
return
_UpperCAmelCase = os.path.join(
A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(A) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , A)
elif not os.path.isfile(self.vocab_file):
with open(A , 'wb') as fi:
_UpperCAmelCase = self.sp_model.serialized_model_proto()
fi.write(A)
return (out_vocab_file,)
def _lowerCamelCase ( self : Tuple , A : List[int] , A : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
_UpperCAmelCase = [self.sep_token_id]
return cls + token_ids_a + sep + token_ids_a + sep
def _lowerCamelCase ( self : Dict , A : List[int] , A : Optional[List[int]] = None , A : bool = False) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A , token_ids_a=A , already_has_special_tokens=A)
if token_ids_a is None:
return [1] + ([0] * len(A)) + [1]
return [1] + ([0] * len(A)) + [1] + ([0] * len(A)) + [1]
def _lowerCamelCase ( self : Optional[int] , A : List[int] , A : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
| 290 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCAmelCase__ = {
"configuration_mega": ["MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP", "MegaConfig", "MegaOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"MEGA_PRETRAINED_MODEL_ARCHIVE_LIST",
"MegaForCausalLM",
"MegaForMaskedLM",
"MegaForMultipleChoice",
"MegaForQuestionAnswering",
"MegaForSequenceClassification",
"MegaForTokenClassification",
"MegaModel",
"MegaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 290 | 1 |
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format='''%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s''',
datefmt='''%Y-%m-%d %H:%M:%S''',
level=os.environ.get('''LOGLEVEL''', '''INFO''').upper(),
stream=sys.stdout,
)
A_ :Any = logging.getLogger(__name__)
A_ :int = {'''facebook/bart-base''': BartForConditionalGeneration}
A_ :Tuple = {'''facebook/bart-base''': BartTokenizer}
def A ( ) -> Tuple:
__UpperCamelCase : Any =argparse.ArgumentParser(description='Export Bart model + Beam Search to ONNX graph.' )
parser.add_argument(
'--validation_file' ,type=a_ ,default=a_ ,help='A csv or a json file containing the validation data.' )
parser.add_argument(
'--max_length' ,type=a_ ,default=5 ,help='The maximum total input sequence length after tokenization.' ,)
parser.add_argument(
'--num_beams' ,type=a_ ,default=a_ ,help=(
'Number of beams to use for evaluation. This argument will be '
'passed to ``model.generate``, which is used during ``evaluate`` and ``predict``.'
) ,)
parser.add_argument(
'--model_name_or_path' ,type=a_ ,help='Path to pretrained model or model identifier from huggingface.co/models.' ,required=a_ ,)
parser.add_argument(
'--config_name' ,type=a_ ,default=a_ ,help='Pretrained config name or path if not the same as model_name' ,)
parser.add_argument(
'--device' ,type=a_ ,default='cpu' ,help='Device where the model will be run' ,)
parser.add_argument('--output_file_path' ,type=a_ ,default=a_ ,help='Where to store the final ONNX file.' )
__UpperCamelCase : str =parser.parse_args()
return args
def A ( a_ ,a_="cpu" ) -> Union[str, Any]:
__UpperCamelCase : Tuple =model_dict[model_name].from_pretrained(a_ ).to(a_ )
__UpperCamelCase : Dict =tokenizer_dict[model_name].from_pretrained(a_ )
if model_name in ["facebook/bart-base"]:
__UpperCamelCase : Optional[Any] =0
__UpperCamelCase : Optional[Any] =None
__UpperCamelCase : int =0
return huggingface_model, tokenizer
def A ( a_ ,a_ ,a_ ,a_ ,a_ ) -> Optional[int]:
model.eval()
__UpperCamelCase : Union[str, Any] =None
__UpperCamelCase : Optional[Any] =torch.jit.script(BARTBeamSearchGenerator(a_ ) )
with torch.no_grad():
__UpperCamelCase : Optional[int] ='My friends are cool but they eat too many carbs.'
__UpperCamelCase : Tuple =tokenizer([ARTICLE_TO_SUMMARIZE] ,max_length=1_024 ,return_tensors='pt' ).to(model.device )
__UpperCamelCase : str =model.generate(
inputs['input_ids'] ,attention_mask=inputs['attention_mask'] ,num_beams=a_ ,max_length=a_ ,early_stopping=a_ ,decoder_start_token_id=model.config.decoder_start_token_id ,)
torch.onnx.export(
a_ ,(
inputs['input_ids'],
inputs['attention_mask'],
num_beams,
max_length,
model.config.decoder_start_token_id,
) ,a_ ,opset_version=14 ,input_names=['input_ids', 'attention_mask', 'num_beams', 'max_length', 'decoder_start_token_id'] ,output_names=['output_ids'] ,dynamic_axes={
'input_ids': {0: 'batch', 1: 'seq'},
'output_ids': {0: 'batch', 1: 'seq_out'},
} ,example_outputs=a_ ,)
logger.info('Model exported to {}'.format(a_ ) )
__UpperCamelCase : int =remove_dup_initializers(os.path.abspath(a_ ) )
logger.info('Deduplicated and optimized model written to {}'.format(a_ ) )
__UpperCamelCase : List[str] =onnxruntime.InferenceSession(a_ )
__UpperCamelCase : Any =ort_sess.run(
a_ ,{
'input_ids': inputs['input_ids'].cpu().numpy(),
'attention_mask': inputs['attention_mask'].cpu().numpy(),
'num_beams': np.array(a_ ),
'max_length': np.array(a_ ),
'decoder_start_token_id': np.array(model.config.decoder_start_token_id ),
} ,)
np.testing.assert_allclose(summary_ids.cpu().numpy() ,ort_out[0] ,rtol=1e-3 ,atol=1e-3 )
logger.info('Model outputs from torch and ONNX Runtime are similar.' )
logger.info('Success.' )
def A ( ) -> Any:
__UpperCamelCase : Optional[Any] =parse_args()
__UpperCamelCase : Optional[int] =5
__UpperCamelCase : int =4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' ,datefmt='%m/%d/%Y %H:%M:%S' ,level=logging.INFO ,)
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
__UpperCamelCase : int =torch.device(args.device )
__UpperCamelCase , __UpperCamelCase : List[str] =load_model_tokenizer(args.model_name_or_path ,a_ )
if model.config.decoder_start_token_id is None:
raise ValueError('Make sure that `config.decoder_start_token_id` is correctly defined' )
model.to(a_ )
if args.max_length:
__UpperCamelCase : List[Any] =args.max_length
if args.num_beams:
__UpperCamelCase : Any =args.num_beams
if args.output_file_path:
__UpperCamelCase : Tuple =args.output_file_path
else:
__UpperCamelCase : str ='BART.onnx'
logger.info('Exporting model to ONNX' )
export_and_validate_model(a_ ,a_ ,a_ ,a_ ,a_ )
if __name__ == "__main__":
main()
| 71 |
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
A_ :List[str] = '''\
@misc{chen2021evaluating,
title={Evaluating Large Language Models Trained on Code},
author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \
and Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \
and Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \
and Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \
and Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \
and Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \
and Mohammad Bavarian and Clemens Winter and Philippe Tillet \
and Felipe Petroski Such and Dave Cummings and Matthias Plappert \
and Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \
and William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \
and Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \
and William Saunders and Christopher Hesse and Andrew N. Carr \
and Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \
and Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \
and Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \
and Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},
year={2021},
eprint={2107.03374},
archivePrefix={arXiv},
primaryClass={cs.LG}
}
'''
A_ :Any = '''\
This metric implements the evaluation harness for the HumanEval problem solving dataset
described in the paper "Evaluating Large Language Models Trained on Code"
(https://arxiv.org/abs/2107.03374).
'''
A_ :Tuple = '''
Calculates how good are predictions given some references, using certain scores
Args:
predictions: list of candidates to evaluate. Each candidates should be a list
of strings with several code candidates to solve the problem.
references: a list with a test for each prediction. Each test should evaluate the
correctness of a code candidate.
k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])
num_workers: number of workers used to evaluate the canidate programs (Default: 4).
timeout:
Returns:
pass_at_k: dict with pass rates for each k
results: dict with granular results of each unittest
Examples:
>>> code_eval = datasets.load_metric("code_eval")
>>> test_cases = ["assert add(2,3)==5"]
>>> candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]]
>>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])
>>> print(pass_at_k)
{\'pass@1\': 0.5, \'pass@2\': 1.0}
'''
A_ :List[str] = '''
################################################################################
!!!WARNING!!!
################################################################################
The "code_eval" metric executes untrusted model-generated code in Python.
Although it is highly unlikely that model-generated code will do something
overtly malicious in response to this test suite, model-generated code may act
destructively due to a lack of model capability or alignment.
Users are strongly encouraged to sandbox this evaluation suite so that it
does not perform destructive actions on their host or network. For more
information on how OpenAI sandboxes its code, see the paper "Evaluating Large
Language Models Trained on Code" (https://arxiv.org/abs/2107.03374).
Once you have read this disclaimer and taken appropriate precautions,
set the environment variable HF_ALLOW_CODE_EVAL="1". Within Python you can to this
with:
>>> import os
>>> os.environ["HF_ALLOW_CODE_EVAL"] = "1"
################################################################################\
'''
A_ :Tuple = '''The MIT License
Copyright (c) OpenAI (https://openai.com)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
"""simple docstring"""
def __lowercase ( self ):
"""simple docstring"""
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' ) ),
'references': datasets.Value('string' ),
} ) , homepage='https://github.com/openai/human-eval' , codebase_urls=['https://github.com/openai/human-eval'] , reference_urls=['https://github.com/openai/human-eval'] , license=_LICENSE , )
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=[1, 10, 100] , lowerCamelCase__=4 , lowerCamelCase__=3.0 ):
"""simple docstring"""
if os.getenv('HF_ALLOW_CODE_EVAL' , 0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError('This metric is currently not supported on Windows.' )
with ThreadPoolExecutor(max_workers=lowerCamelCase__ ) as executor:
__UpperCamelCase : List[str] =[]
__UpperCamelCase : Any =Counter()
__UpperCamelCase : List[Any] =0
__UpperCamelCase : int =defaultdict(lowerCamelCase__ )
for task_id, (candidates, test_case) in enumerate(zip(lowerCamelCase__ , lowerCamelCase__ ) ):
for candidate in candidates:
__UpperCamelCase : str =candidate + '\n' + test_case
__UpperCamelCase : Any =(test_program, timeout, task_id, completion_id[task_id])
__UpperCamelCase : Optional[Any] =executor.submit(lowerCamelCase__ , *lowerCamelCase__ )
futures.append(lowerCamelCase__ )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(lowerCamelCase__ ):
__UpperCamelCase : str =future.result()
results[result["task_id"]].append((result['completion_id'], result) )
__UpperCamelCase , __UpperCamelCase : int =[], []
for result in results.values():
result.sort()
__UpperCamelCase : str =[r[1]['passed'] for r in result]
total.append(len(lowerCamelCase__ ) )
correct.append(sum(lowerCamelCase__ ) )
__UpperCamelCase : Optional[int] =np.array(lowerCamelCase__ )
__UpperCamelCase : List[str] =np.array(lowerCamelCase__ )
__UpperCamelCase : Union[str, Any] =k
__UpperCamelCase : List[Any] ={f'pass@{k}': estimate_pass_at_k(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def A ( a_ ,a_ ,a_ ) -> Optional[int]:
def estimator(a_ ,a_ ,a_ ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 ,n + 1 ) )
if isinstance(a_ ,a_ ):
__UpperCamelCase : Optional[int] =itertools.repeat(a_ ,len(a_ ) )
else:
assert len(a_ ) == len(a_ )
__UpperCamelCase : List[Any] =iter(a_ )
return np.array([estimator(int(a_ ) ,int(a_ ) ,a_ ) for n, c in zip(a_ ,a_ )] )
| 71 | 1 |
import os
import numpy
import onnx
def snake_case_ ( snake_case , snake_case ) -> int:
lowercase__: Optional[Any] = a.name
lowercase__: int = b.name
lowercase__: Union[str, Any] = ''
lowercase__: int = ''
lowercase__: List[str] = a == b
lowercase__: int = name_a
lowercase__: Any = name_b
return res
def snake_case_ ( snake_case , snake_case , snake_case ) -> Optional[Any]:
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(snake_case , snake_case )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , snake_case , snake_case )
_graph_replace_input_with(node_proto.attribute[1].g , snake_case , snake_case )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , snake_case , snake_case )
def snake_case_ ( snake_case , snake_case , snake_case ) -> List[Any]:
for n in graph_proto.node:
_node_replace_input_with(snake_case , snake_case , snake_case )
def snake_case_ ( snake_case , snake_case , snake_case ) -> List[Any]:
lowercase__: List[str] = list(model.graph.initializer )
lowercase__: Union[str, Any] = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
lowercase__: int = inits[i].name
lowercase__: Optional[Any] = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , snake_case , snake_case )
def snake_case_ ( snake_case ) -> Tuple:
lowercase__: Optional[Any] = os.path.dirname(snake_case )
lowercase__: Optional[int] = os.path.basename(snake_case )
lowercase__: List[str] = onnx.load(os.path.join(snake_case , snake_case ) )
lowercase__: Any = list(model.graph.initializer )
lowercase__: Optional[int] = set()
lowercase__: List[Any] = {}
lowercase__: Union[str, Any] = []
lowercase__: str = 0
for i in range(len(snake_case ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(snake_case ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(snake_case )
dup_set.add(snake_case )
lowercase__: Tuple = inits[j].data_type
lowercase__: Any = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print('unexpected data type: ' , snake_case )
total_reduced_size += mem_size
lowercase__: str = inits[i].name
lowercase__: str = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(snake_case )
else:
lowercase__: Tuple = [name_j]
ind_to_replace.append((j, i) )
print('total reduced size: ' , total_reduced_size / 10_24 / 10_24 / 10_24 , 'GB' )
lowercase__: Dict = sorted(snake_case )
_remove_dup_initializers_from_model(snake_case , snake_case , snake_case )
lowercase__: str = 'optimized_' + model_file_name
lowercase__: Optional[Any] = os.path.join(snake_case , snake_case )
onnx.save(snake_case , snake_case )
return new_model
| 288 |
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def snake_case_ ( snake_case = 3 ) -> qiskit.result.counts.Counts:
if isinstance(snake_case , snake_case ):
raise TypeError('number of qubits must be a integer.' )
if number_of_qubits <= 0:
raise ValueError('number of qubits must be > 0.' )
if math.floor(snake_case ) != number_of_qubits:
raise ValueError('number of qubits must be exact integer.' )
if number_of_qubits > 10:
raise ValueError('number of qubits too large to simulate(>10).' )
lowercase__: str = QuantumRegister(snake_case , 'qr' )
lowercase__: str = ClassicalRegister(snake_case , 'cr' )
lowercase__: List[Any] = QuantumCircuit(snake_case , snake_case )
lowercase__: int = number_of_qubits
for i in range(snake_case ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(snake_case ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , snake_case , snake_case )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(snake_case , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(snake_case , snake_case )
# simulate with 10000 shots
lowercase__: str = Aer.get_backend('qasm_simulator' )
lowercase__: Union[str, Any] = execute(snake_case , snake_case , shots=1_00_00 )
return job.result().get_counts(snake_case )
if __name__ == "__main__":
print(
F'''Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}'''
)
| 288 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ = {
"configuration_mega": ["MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP", "MegaConfig", "MegaOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"MEGA_PRETRAINED_MODEL_ARCHIVE_LIST",
"MegaForCausalLM",
"MegaForMaskedLM",
"MegaForMultipleChoice",
"MegaForQuestionAnswering",
"MegaForSequenceClassification",
"MegaForTokenClassification",
"MegaModel",
"MegaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 46 |
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self, lowerCamelCase__, lowerCamelCase__=13, lowerCamelCase__=7, lowerCamelCase__=True, lowerCamelCase__=True, lowerCamelCase__=True, lowerCamelCase__=True, lowerCamelCase__=99, lowerCamelCase__=32, lowerCamelCase__=5, lowerCamelCase__=4, lowerCamelCase__=37, lowerCamelCase__="gelu", lowerCamelCase__=0.1, lowerCamelCase__=0.1, lowerCamelCase__=512, lowerCamelCase__=16, lowerCamelCase__=2, lowerCamelCase__=0.02, lowerCamelCase__=4, ):
A : List[str] = parent
A : Optional[int] = batch_size
A : Union[str, Any] = seq_length
A : Any = is_training
A : List[str] = use_attention_mask
A : Union[str, Any] = use_token_type_ids
A : Any = use_labels
A : str = vocab_size
A : Union[str, Any] = hidden_size
A : str = num_hidden_layers
A : List[Any] = num_attention_heads
A : Optional[int] = intermediate_size
A : Optional[Any] = hidden_act
A : Dict = hidden_dropout_prob
A : List[Any] = attention_probs_dropout_prob
A : Optional[int] = max_position_embeddings
A : int = type_vocab_size
A : str = type_sequence_label_size
A : List[Any] = initializer_range
A : str = num_choices
def _lowerCAmelCase ( self ):
A : Optional[int] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
A : Union[str, Any] = None
if self.use_attention_mask:
A : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
A : int = None
if self.use_token_type_ids:
A : List[Any] = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
A : Optional[int] = AlbertConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=lowerCamelCase__, initializer_range=self.initializer_range, )
return config, input_ids, token_type_ids, attention_mask
def _lowerCAmelCase ( self ):
A : Dict = self.prepare_config_and_inputs()
A , A , A , A : str = config_and_inputs
A : Union[str, Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Any = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _lowerCAmelCase ( self ):
A : Dict = FlaxAlbertModelTester(self )
@slow
def _lowerCAmelCase ( self ):
for model_class_name in self.all_model_classes:
A : Dict = model_class_name.from_pretrained("""albert-base-v2""" )
A : Optional[int] = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCamelCase__ )
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowerCAmelCase ( self ):
A : Dict = FlaxAlbertModel.from_pretrained("""albert-base-v2""" )
A : List[str] = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
A : str = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
A : Optional[int] = model(lowerCamelCase__, attention_mask=lowerCamelCase__ )[0]
A : str = (1, 11, 768)
self.assertEqual(output.shape, lowerCamelCase__ )
A : Optional[int] = np.array(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4], lowerCamelCase__, atol=1e-4 ) )
| 116 | 0 |
import math
def __lowerCamelCase ( __a :list , __a :int ) -> int:
"""simple docstring"""
A__ = len(__a )
A__ = int(math.floor(math.sqrt(__a ) ) )
A__ = 0
while arr[min(__a , __a ) - 1] < x:
A__ = step
step += int(math.floor(math.sqrt(__a ) ) )
if prev >= n:
return -1
while arr[prev] < x:
A__ = prev + 1
if prev == min(__a , __a ):
return -1
if arr[prev] == x:
return prev
return -1
if __name__ == "__main__":
A : Optional[Any] = input('''Enter numbers separated by a comma:\n''').strip()
A : str = [int(item) for item in user_input.split(''',''')]
A : int = int(input('''Enter the number to be searched:\n'''))
A : Any = jump_search(arr, x)
if res == -1:
print('''Number not found!''')
else:
print(F'''Number {x} is at index {res}''')
| 276 |
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor
| 276 | 1 |
'''simple docstring'''
import tempfile
import unittest
import numpy as np
from diffusers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionPipeline,
PNDMScheduler,
)
from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class A_ ( lowerCAmelCase_ , unittest.TestCase ):
_lowerCamelCase : str = """hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"""
def lowercase ( self : str , snake_case_ : List[str]=0 ):
_UpperCAmelCase = np.random.RandomState(snake_case_ )
_UpperCAmelCase = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def lowercase ( self : Any ):
_UpperCAmelCase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
pipe.set_progress_bar_config(disable=snake_case_ )
_UpperCAmelCase = self.get_dummy_inputs()
_UpperCAmelCase = pipe(**snake_case_ ).images
_UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
_UpperCAmelCase = np.array([0.6_5_0_7_2, 0.5_8_4_9_2, 0.4_8_2_1_9, 0.5_5_5_2_1, 0.5_3_1_8_0, 0.5_5_9_3_9, 0.5_0_6_9_7, 0.3_9_8_0_0, 0.4_6_4_5_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase ( self : int ):
_UpperCAmelCase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
_UpperCAmelCase = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
_UpperCAmelCase = self.get_dummy_inputs()
_UpperCAmelCase = pipe(**snake_case_ ).images
_UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
_UpperCAmelCase = np.array([0.6_5_8_6_3, 0.5_9_4_2_5, 0.4_9_3_2_6, 0.5_6_3_1_3, 0.5_3_8_7_5, 0.5_6_6_2_7, 0.5_1_0_6_5, 0.3_9_7_7_7, 0.4_6_3_3_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase ( self : Optional[int] ):
_UpperCAmelCase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
_UpperCAmelCase = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=snake_case_ )
_UpperCAmelCase = self.get_dummy_inputs()
_UpperCAmelCase = pipe(**snake_case_ ).images
_UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
_UpperCAmelCase = np.array([0.5_3_7_5_5, 0.6_0_7_8_6, 0.4_7_4_0_2, 0.4_9_4_8_8, 0.5_1_8_6_9, 0.4_9_8_1_9, 0.4_7_9_8_5, 0.3_8_9_5_7, 0.4_4_2_7_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase ( self : List[Any] ):
_UpperCAmelCase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
_UpperCAmelCase = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=snake_case_ )
_UpperCAmelCase = self.get_dummy_inputs()
_UpperCAmelCase = pipe(**snake_case_ ).images
_UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
_UpperCAmelCase = np.array([0.5_3_7_5_5, 0.6_0_7_8_6, 0.4_7_4_0_2, 0.4_9_4_8_8, 0.5_1_8_6_9, 0.4_9_8_1_9, 0.4_7_9_8_5, 0.3_8_9_5_7, 0.4_4_2_7_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase ( self : Any ):
_UpperCAmelCase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
_UpperCAmelCase = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=snake_case_ )
_UpperCAmelCase = self.get_dummy_inputs()
_UpperCAmelCase = pipe(**snake_case_ ).images
_UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
_UpperCAmelCase = np.array([0.5_3_8_1_7, 0.6_0_8_1_2, 0.4_7_3_8_4, 0.4_9_5_3_0, 0.5_1_8_9_4, 0.4_9_8_1_4, 0.4_7_9_8_4, 0.3_8_9_5_8, 0.4_4_2_7_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase ( self : Optional[int] ):
_UpperCAmelCase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
_UpperCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=snake_case_ )
_UpperCAmelCase = self.get_dummy_inputs()
_UpperCAmelCase = pipe(**snake_case_ ).images
_UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
_UpperCAmelCase = np.array([0.5_3_8_9_5, 0.6_0_8_0_8, 0.4_7_9_3_3, 0.4_9_6_0_8, 0.5_1_8_8_6, 0.4_9_9_5_0, 0.4_8_0_5_3, 0.3_8_9_5_7, 0.4_4_2_0_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase ( self : Optional[int] ):
_UpperCAmelCase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
pipe.set_progress_bar_config(disable=snake_case_ )
_UpperCAmelCase = self.get_dummy_inputs()
_UpperCAmelCase = 3 * [inputs["prompt"]]
# forward
_UpperCAmelCase = pipe(**snake_case_ )
_UpperCAmelCase = output.images[0, -3:, -3:, -1]
_UpperCAmelCase = self.get_dummy_inputs()
_UpperCAmelCase = 3 * [inputs.pop("prompt" )]
_UpperCAmelCase = pipe.tokenizer(
snake_case_ , padding="max_length" , max_length=pipe.tokenizer.model_max_length , truncation=snake_case_ , return_tensors="np" , )
_UpperCAmelCase = text_inputs["input_ids"]
_UpperCAmelCase = pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0]
_UpperCAmelCase = prompt_embeds
# forward
_UpperCAmelCase = pipe(**snake_case_ )
_UpperCAmelCase = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
def lowercase ( self : Optional[Any] ):
_UpperCAmelCase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
pipe.set_progress_bar_config(disable=snake_case_ )
_UpperCAmelCase = self.get_dummy_inputs()
_UpperCAmelCase = 3 * ["this is a negative prompt"]
_UpperCAmelCase = negative_prompt
_UpperCAmelCase = 3 * [inputs["prompt"]]
# forward
_UpperCAmelCase = pipe(**snake_case_ )
_UpperCAmelCase = output.images[0, -3:, -3:, -1]
_UpperCAmelCase = self.get_dummy_inputs()
_UpperCAmelCase = 3 * [inputs.pop("prompt" )]
_UpperCAmelCase = []
for p in [prompt, negative_prompt]:
_UpperCAmelCase = pipe.tokenizer(
snake_case_ , padding="max_length" , max_length=pipe.tokenizer.model_max_length , truncation=snake_case_ , return_tensors="np" , )
_UpperCAmelCase = text_inputs["input_ids"]
embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0] )
_UpperCAmelCase , _UpperCAmelCase = embeds
# forward
_UpperCAmelCase = pipe(**snake_case_ )
_UpperCAmelCase = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
@nightly
@require_onnxruntime
@require_torch_gpu
class A_ ( unittest.TestCase ):
@property
def lowercase ( self : Tuple ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowercase ( self : int ):
_UpperCAmelCase = ort.SessionOptions()
_UpperCAmelCase = False
return options
def lowercase ( self : Any ):
# using the PNDM scheduler by default
_UpperCAmelCase = OnnxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="onnx" , safety_checker=snake_case_ , feature_extractor=snake_case_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=snake_case_ )
_UpperCAmelCase = "A painting of a squirrel eating a burger"
np.random.seed(0 )
_UpperCAmelCase = sd_pipe([prompt] , guidance_scale=6.0 , num_inference_steps=1_0 , output_type="np" )
_UpperCAmelCase = output.images
_UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_UpperCAmelCase = np.array([0.0_4_5_2, 0.0_3_9_0, 0.0_0_8_7, 0.0_3_5_0, 0.0_6_1_7, 0.0_3_6_4, 0.0_5_4_4, 0.0_5_2_3, 0.0_7_2_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowercase ( self : List[str] ):
_UpperCAmelCase = DDIMScheduler.from_pretrained(
"runwayml/stable-diffusion-v1-5" , subfolder="scheduler" , revision="onnx" )
_UpperCAmelCase = OnnxStableDiffusionPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , revision="onnx" , scheduler=snake_case_ , safety_checker=snake_case_ , feature_extractor=snake_case_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=snake_case_ )
_UpperCAmelCase = "open neural network exchange"
_UpperCAmelCase = np.random.RandomState(0 )
_UpperCAmelCase = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=1_0 , generator=snake_case_ , output_type="np" )
_UpperCAmelCase = output.images
_UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_UpperCAmelCase = np.array([0.2_8_6_7, 0.1_9_7_4, 0.1_4_8_1, 0.7_2_9_4, 0.7_2_5_1, 0.6_6_6_7, 0.4_1_9_4, 0.5_6_4_2, 0.6_4_8_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowercase ( self : List[str] ):
_UpperCAmelCase = LMSDiscreteScheduler.from_pretrained(
"runwayml/stable-diffusion-v1-5" , subfolder="scheduler" , revision="onnx" )
_UpperCAmelCase = OnnxStableDiffusionPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , revision="onnx" , scheduler=snake_case_ , safety_checker=snake_case_ , feature_extractor=snake_case_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=snake_case_ )
_UpperCAmelCase = "open neural network exchange"
_UpperCAmelCase = np.random.RandomState(0 )
_UpperCAmelCase = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=1_0 , generator=snake_case_ , output_type="np" )
_UpperCAmelCase = output.images
_UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_UpperCAmelCase = np.array([0.2_3_0_6, 0.1_9_5_9, 0.1_5_9_3, 0.6_5_4_9, 0.6_3_9_4, 0.5_4_0_8, 0.5_0_6_5, 0.6_0_1_0, 0.6_1_6_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowercase ( self : Optional[int] ):
_UpperCAmelCase = 0
def test_callback_fn(snake_case_ : int , snake_case_ : int , snake_case_ : np.ndarray ) -> None:
_UpperCAmelCase = True
nonlocal number_of_steps
number_of_steps += 1
if step == 0:
assert latents.shape == (1, 4, 6_4, 6_4)
_UpperCAmelCase = latents[0, -3:, -3:, -1]
_UpperCAmelCase = np.array(
[-0.6_7_7_2, -0.3_8_3_5, -1.2_4_5_6, 0.1_9_0_5, -1.0_9_7_4, 0.6_9_6_7, -1.9_3_5_3, 0.0_1_7_8, 1.0_1_6_7] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1e-3
elif step == 5:
assert latents.shape == (1, 4, 6_4, 6_4)
_UpperCAmelCase = latents[0, -3:, -3:, -1]
_UpperCAmelCase = np.array(
[-0.3_3_5_1, 0.2_2_4_1, -0.1_8_3_7, -0.2_3_2_5, -0.6_5_7_7, 0.3_3_9_3, -0.0_2_4_1, 0.5_8_9_9, 1.3_8_7_5] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1e-3
_UpperCAmelCase = False
_UpperCAmelCase = OnnxStableDiffusionPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , revision="onnx" , safety_checker=snake_case_ , feature_extractor=snake_case_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=snake_case_ )
_UpperCAmelCase = "Andromeda galaxy in a bottle"
_UpperCAmelCase = np.random.RandomState(0 )
pipe(
prompt=snake_case_ , num_inference_steps=5 , guidance_scale=7.5 , generator=snake_case_ , callback=snake_case_ , callback_steps=1 , )
assert test_callback_fn.has_been_called
assert number_of_steps == 6
def lowercase ( self : Dict ):
_UpperCAmelCase = OnnxStableDiffusionPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , revision="onnx" , safety_checker=snake_case_ , feature_extractor=snake_case_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
assert isinstance(snake_case_ , snake_case_ )
assert pipe.safety_checker is None
_UpperCAmelCase = pipe("example prompt" , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(snake_case_ )
_UpperCAmelCase = OnnxStableDiffusionPipeline.from_pretrained(snake_case_ )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
_UpperCAmelCase = pipe("example prompt" , num_inference_steps=2 ).images[0]
assert image is not None
| 22 |
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class A_ ( unittest.TestCase ):
def lowercase ( self : int ):
_UpperCAmelCase = tempfile.mkdtemp()
_UpperCAmelCase = BlipImageProcessor()
_UpperCAmelCase = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-BertModel" )
_UpperCAmelCase = BlipProcessor(snake_case_ , snake_case_ )
processor.save_pretrained(self.tmpdirname )
def lowercase ( self : Tuple , **snake_case_ : int ):
return AutoProcessor.from_pretrained(self.tmpdirname , **snake_case_ ).tokenizer
def lowercase ( self : Dict , **snake_case_ : Any ):
return AutoProcessor.from_pretrained(self.tmpdirname , **snake_case_ ).image_processor
def lowercase ( self : int ):
shutil.rmtree(self.tmpdirname )
def lowercase ( self : Optional[Any] ):
_UpperCAmelCase = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
_UpperCAmelCase = [Image.fromarray(np.moveaxis(snake_case_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowercase ( self : int ):
_UpperCAmelCase = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_UpperCAmelCase = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
_UpperCAmelCase = self.get_image_processor(do_normalize=snake_case_ , padding_value=1.0 )
_UpperCAmelCase = BlipProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=snake_case_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , snake_case_ )
def lowercase ( self : Any ):
_UpperCAmelCase = self.get_image_processor()
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = BlipProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
_UpperCAmelCase = self.prepare_image_inputs()
_UpperCAmelCase = image_processor(snake_case_ , return_tensors="np" )
_UpperCAmelCase = processor(images=snake_case_ , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowercase ( self : Optional[int] ):
_UpperCAmelCase = self.get_image_processor()
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = BlipProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
_UpperCAmelCase = "lower newer"
_UpperCAmelCase = processor(text=snake_case_ )
_UpperCAmelCase = tokenizer(snake_case_ , return_token_type_ids=snake_case_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowercase ( self : Optional[Any] ):
_UpperCAmelCase = self.get_image_processor()
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = BlipProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
_UpperCAmelCase = "lower newer"
_UpperCAmelCase = self.prepare_image_inputs()
_UpperCAmelCase = processor(text=snake_case_ , images=snake_case_ )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
# test if it raises when no input is passed
with pytest.raises(snake_case_ ):
processor()
def lowercase ( self : Union[str, Any] ):
_UpperCAmelCase = self.get_image_processor()
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = BlipProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
_UpperCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_UpperCAmelCase = processor.batch_decode(snake_case_ )
_UpperCAmelCase = tokenizer.batch_decode(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
def lowercase ( self : str ):
_UpperCAmelCase = self.get_image_processor()
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = BlipProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
_UpperCAmelCase = "lower newer"
_UpperCAmelCase = self.prepare_image_inputs()
_UpperCAmelCase = processor(text=snake_case_ , images=snake_case_ )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
| 22 | 1 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
lowerCAmelCase: str = logging.get_logger(__name__)
@dataclass
class a__( lowerCamelCase__ ):
lowercase__ = [
"""no_inference""",
"""no_cuda""",
"""no_tpu""",
"""no_speed""",
"""no_memory""",
"""no_env_print""",
"""no_multi_process""",
]
def __init__( self : int , **__snake_case : Dict ):
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
a : int = deprecated_arg[3:]
a : Union[str, Any] = not kwargs.pop(__snake_case )
logger.warning(
F"""{deprecated_arg} is depreciated. Please use --no-{positive_arg} or"""
F""" {positive_arg}={kwargs[positive_arg]}""" )
a : Optional[Any] = kwargs.pop('tpu_name' , self.tpu_name )
a : Tuple = kwargs.pop('device_idx' , self.device_idx )
a : Tuple = kwargs.pop('eager_mode' , self.eager_mode )
a : str = kwargs.pop('use_xla' , self.use_xla )
super().__init__(**__snake_case )
lowercase__ = field(
default=lowerCamelCase__ , metadata={"""help""": """Name of TPU"""} , )
lowercase__ = field(
default=0 , metadata={"""help""": """CPU / GPU device index. Defaults to 0."""} , )
lowercase__ = field(default=lowerCamelCase__ , metadata={"""help""": """Benchmark models in eager model."""} )
lowercase__ = field(
default=lowerCamelCase__ , metadata={
"""help""": """Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`."""
} , )
@cached_property
def lowercase_ ( self : Dict ):
requires_backends(self , ['tf'] )
a : Optional[Any] = None
if self.tpu:
try:
if self.tpu_name:
a : Optional[int] = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
a : Tuple = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
a : Any = None
return tpu
@cached_property
def lowercase_ ( self : Optional[int] ):
requires_backends(self , ['tf'] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
a : int = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , 'GPU' )
a : List[str] = tf.distribute.OneDeviceStrategy(device=F"""/gpu:{self.device_idx}""" )
else:
tf.config.set_visible_devices([] , 'GPU' ) # disable GPU
a : str = tf.distribute.OneDeviceStrategy(device=F"""/cpu:{self.device_idx}""" )
return strategy
@property
def lowercase_ ( self : Dict ):
requires_backends(self , ['tf'] )
return self._setup_tpu is not None
@property
def lowercase_ ( self : Tuple ):
requires_backends(self , ['tf'] )
return self._setup_strategy
@property
def lowercase_ ( self : Union[str, Any] ):
requires_backends(self , ['tf'] )
return tf.config.list_physical_devices('GPU' )
@property
def lowercase_ ( self : int ):
requires_backends(self , ['tf'] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def lowercase_ ( self : Any ):
return self.n_gpu > 0 | 96 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase: Any = logging.get_logger(__name__)
class a__( lowerCamelCase__ ):
lowercase__ = """encoder-decoder"""
lowercase__ = True
def __init__( self : Dict , **__snake_case : Union[str, Any] ):
super().__init__(**__snake_case )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
a : List[str] = kwargs.pop('encoder' )
a : Optional[Any] = encoder_config.pop('model_type' )
a : Tuple = kwargs.pop('decoder' )
a : Optional[int] = decoder_config.pop('model_type' )
from ..auto.configuration_auto import AutoConfig
a : Any = AutoConfig.for_model(__snake_case , **__snake_case )
a : Optional[int] = AutoConfig.for_model(__snake_case , **__snake_case )
a : Tuple = True
@classmethod
def lowercase_ ( cls : int , __snake_case : PretrainedConfig , __snake_case : PretrainedConfig , **__snake_case : Union[str, Any] ):
logger.info('Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config' )
a : List[Any] = True
a : Tuple = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__snake_case )
def lowercase_ ( self : List[Any] ):
a : int = copy.deepcopy(self.__dict__ )
a : List[str] = self.encoder.to_dict()
a : Optional[int] = self.decoder.to_dict()
a : Optional[Any] = self.__class__.model_type
return output | 96 | 1 |
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
_UpperCAmelCase : Optional[Any] = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class lowerCAmelCase ( unittest.TestCase ):
def __init__( self : int , UpperCAmelCase : List[Any] , UpperCAmelCase : List[Any]=7 , UpperCAmelCase : Any=3 , UpperCAmelCase : Union[str, Any]=18 , UpperCAmelCase : str=30 , UpperCAmelCase : List[str]=400 , UpperCAmelCase : Union[str, Any]=None , UpperCAmelCase : str=True , UpperCAmelCase : Union[str, Any]=True , UpperCAmelCase : Union[str, Any]=None , ) -> Optional[int]:
lowerCamelCase__ : Optional[int] = size if size is not None else {'height': 20, 'width': 20}
lowerCamelCase__ : List[Any] = parent
lowerCamelCase__ : str = batch_size
lowerCamelCase__ : List[str] = num_channels
lowerCamelCase__ : Optional[int] = image_size
lowerCamelCase__ : Any = min_resolution
lowerCamelCase__ : Dict = max_resolution
lowerCamelCase__ : Optional[int] = size
lowerCamelCase__ : Any = do_normalize
lowerCamelCase__ : Any = do_convert_rgb
lowerCamelCase__ : Optional[Any] = [512, 1024, 2048, 4096]
lowerCamelCase__ : Optional[Any] = patch_size if patch_size is not None else {'height': 16, 'width': 16}
def A_ ( self : Any ) -> Union[str, Any]:
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def A_ ( self : Union[str, Any] ) -> Tuple:
lowerCamelCase__ : Dict = 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg'
lowerCamelCase__ : Union[str, Any] = Image.open(requests.get(UpperCAmelCase , stream=UpperCAmelCase ).raw ).convert('RGB' )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11, reason="""`Pix2StructImageProcessor` requires `torch>=1.11.0`.""", )
@require_torch
@require_vision
class lowerCAmelCase ( __SCREAMING_SNAKE_CASE, unittest.TestCase ):
UpperCAmelCase__ = PixaStructImageProcessor if is_vision_available() else None
def A_ ( self : str ) -> Optional[Any]:
lowerCamelCase__ : List[str] = PixaStructImageProcessingTester(self )
@property
def A_ ( self : str ) -> List[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def A_ ( self : Tuple ) -> Optional[int]:
lowerCamelCase__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase , 'do_normalize' ) )
self.assertTrue(hasattr(UpperCAmelCase , 'do_convert_rgb' ) )
def A_ ( self : Any ) -> Optional[int]:
lowerCamelCase__ : List[str] = self.image_processor_tester.prepare_dummy_image()
lowerCamelCase__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
lowerCamelCase__ : str = 2048
lowerCamelCase__ : List[Any] = image_processor(UpperCAmelCase , return_tensors='pt' , max_patches=UpperCAmelCase )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0_6_0_6 ) , atol=1e-3 , rtol=1e-3 ) )
def A_ ( self : Optional[int] ) -> str:
# Initialize image_processor
lowerCamelCase__ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase__ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase , Image.Image )
# Test not batched input
lowerCamelCase__ : List[Any] = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
lowerCamelCase__ : Optional[Any] = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
lowerCamelCase__ : Optional[int] = image_processor(
UpperCAmelCase , return_tensors='pt' , max_patches=UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def A_ ( self : Tuple ) -> Dict:
# Initialize image_processor
lowerCamelCase__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase , Image.Image )
# Test not batched input
lowerCamelCase__ : int = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
lowerCamelCase__ : Dict = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(UpperCAmelCase ):
lowerCamelCase__ : Dict = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=UpperCAmelCase ).flattened_patches
lowerCamelCase__ : int = 'Hello'
lowerCamelCase__ : Union[str, Any] = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=UpperCAmelCase , header_text=UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
lowerCamelCase__ : str = image_processor(
UpperCAmelCase , return_tensors='pt' , max_patches=UpperCAmelCase , header_text=UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def A_ ( self : Tuple ) -> List[str]:
# Initialize image_processor
lowerCamelCase__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase__ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase , numpify=UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase , np.ndarray )
lowerCamelCase__ : Optional[int] = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
lowerCamelCase__ : Optional[Any] = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
lowerCamelCase__ : Any = image_processor(
UpperCAmelCase , return_tensors='pt' , max_patches=UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def A_ ( self : Optional[int] ) -> Union[str, Any]:
# Initialize image_processor
lowerCamelCase__ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase__ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase , torchify=UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase , torch.Tensor )
# Test not batched input
lowerCamelCase__ : Optional[Any] = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
lowerCamelCase__ : str = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
lowerCamelCase__ : Any = image_processor(
UpperCAmelCase , return_tensors='pt' , max_patches=UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11, reason="""`Pix2StructImageProcessor` requires `torch>=1.11.0`.""", )
@require_torch
@require_vision
class lowerCAmelCase ( __SCREAMING_SNAKE_CASE, unittest.TestCase ):
UpperCAmelCase__ = PixaStructImageProcessor if is_vision_available() else None
def A_ ( self : Union[str, Any] ) -> str:
lowerCamelCase__ : List[Any] = PixaStructImageProcessingTester(self , num_channels=4 )
lowerCamelCase__ : List[str] = 3
@property
def A_ ( self : Dict ) -> Optional[int]:
return self.image_processor_tester.prepare_image_processor_dict()
def A_ ( self : Tuple ) -> Tuple:
lowerCamelCase__ : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase , 'do_normalize' ) )
self.assertTrue(hasattr(UpperCAmelCase , 'do_convert_rgb' ) )
def A_ ( self : Dict ) -> Tuple:
# Initialize image_processor
lowerCamelCase__ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase , Image.Image )
# Test not batched input
lowerCamelCase__ : Any = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
lowerCamelCase__ : List[str] = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
lowerCamelCase__ : Any = image_processor(
UpperCAmelCase , return_tensors='pt' , max_patches=UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 50 |
def UpperCAmelCase ( a_ ) -> list:
"""simple docstring"""
if len(a_ ) <= 1:
return [tuple(a_ )]
__A = []
def generate(a_ , a_ ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1 , a_ )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
__A , __A = arr[k - 1], arr[i]
else: # k is odd
__A , __A = arr[k - 1], arr[0]
generate(k - 1 , a_ )
generate(len(a_ ) , a_ )
return res
if __name__ == "__main__":
SCREAMING_SNAKE_CASE :int = input('Enter numbers separated by a comma:\n').strip()
SCREAMING_SNAKE_CASE :Dict = [int(item) for item in user_input.split(',')]
print(heaps(arr))
| 15 | 0 |
'''simple docstring'''
import math
from collections.abc import Callable
def UpperCAmelCase ( lowerCamelCase_ :Callable[[float], float] , lowerCamelCase_ :float , lowerCamelCase_ :float ):
'''simple docstring'''
snake_case_ : float = xa
snake_case_ : float = xa
while True:
if x_n == x_na or function(lowerCamelCase_ ) == function(lowerCamelCase_ ):
raise ZeroDivisionError("""float division by zero, could not find root""" )
snake_case_ : float = x_na - (
function(lowerCamelCase_ ) / ((function(lowerCamelCase_ ) - function(lowerCamelCase_ )) / (x_na - x_n))
)
if abs(x_na - x_na ) < 10**-5:
return x_na
snake_case_ : Optional[Any] = x_na
snake_case_ : Dict = x_na
def UpperCAmelCase ( lowerCamelCase_ :float ):
'''simple docstring'''
return math.pow(lowerCamelCase_ , 3 ) - (2 * x) - 5
if __name__ == "__main__":
print(intersection(f, 3, 3.5)) | 364 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A : int = {
'configuration_whisper': ['WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'WhisperConfig', 'WhisperOnnxConfig'],
'feature_extraction_whisper': ['WhisperFeatureExtractor'],
'processing_whisper': ['WhisperProcessor'],
'tokenization_whisper': ['WhisperTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Union[str, Any] = ['WhisperTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Any = [
'WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST',
'WhisperForConditionalGeneration',
'WhisperModel',
'WhisperPreTrainedModel',
'WhisperForAudioClassification',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[Any] = [
'TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFWhisperForConditionalGeneration',
'TFWhisperModel',
'TFWhisperPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Union[str, Any] = [
'FlaxWhisperForConditionalGeneration',
'FlaxWhisperModel',
'FlaxWhisperPreTrainedModel',
'FlaxWhisperForAudioClassification',
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
__A : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 8 | 0 |
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class __lowerCAmelCase ( lowerCamelCase__ ):
__lowerCamelCase = (PNDMScheduler,)
__lowerCamelCase = (('''num_inference_steps''', 50),)
def snake_case ( self , **_snake_case ):
"""simple docstring"""
_lowerCAmelCase = {
"""num_train_timesteps""": 1000,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
}
config.update(**_snake_case )
return config
def snake_case ( self , _snake_case=0 , **_snake_case ):
"""simple docstring"""
_lowerCAmelCase = dict(self.forward_default_kwargs )
_lowerCAmelCase = kwargs.pop("""num_inference_steps""" , _snake_case )
_lowerCAmelCase = self.dummy_sample
_lowerCAmelCase = 0.1 * sample
_lowerCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase = self.get_scheduler_config(**_snake_case )
_lowerCAmelCase = scheduler_class(**_snake_case )
scheduler.set_timesteps(_snake_case )
# copy over dummy past residuals
_lowerCAmelCase = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_snake_case )
_lowerCAmelCase = scheduler_class.from_pretrained(_snake_case )
new_scheduler.set_timesteps(_snake_case )
# copy over dummy past residuals
_lowerCAmelCase = dummy_past_residuals[:]
_lowerCAmelCase = scheduler.step_prk(_snake_case , _snake_case , _snake_case , **_snake_case ).prev_sample
_lowerCAmelCase = new_scheduler.step_prk(_snake_case , _snake_case , _snake_case , **_snake_case ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
_lowerCAmelCase = scheduler.step_plms(_snake_case , _snake_case , _snake_case , **_snake_case ).prev_sample
_lowerCAmelCase = new_scheduler.step_plms(_snake_case , _snake_case , _snake_case , **_snake_case ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def snake_case ( self ):
"""simple docstring"""
pass
def snake_case ( self , _snake_case=0 , **_snake_case ):
"""simple docstring"""
_lowerCAmelCase = dict(self.forward_default_kwargs )
_lowerCAmelCase = kwargs.pop("""num_inference_steps""" , _snake_case )
_lowerCAmelCase = self.dummy_sample
_lowerCAmelCase = 0.1 * sample
_lowerCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_snake_case )
scheduler.set_timesteps(_snake_case )
# copy over dummy past residuals (must be after setting timesteps)
_lowerCAmelCase = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_snake_case )
_lowerCAmelCase = scheduler_class.from_pretrained(_snake_case )
# copy over dummy past residuals
new_scheduler.set_timesteps(_snake_case )
# copy over dummy past residual (must be after setting timesteps)
_lowerCAmelCase = dummy_past_residuals[:]
_lowerCAmelCase = scheduler.step_prk(_snake_case , _snake_case , _snake_case , **_snake_case ).prev_sample
_lowerCAmelCase = new_scheduler.step_prk(_snake_case , _snake_case , _snake_case , **_snake_case ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
_lowerCAmelCase = scheduler.step_plms(_snake_case , _snake_case , _snake_case , **_snake_case ).prev_sample
_lowerCAmelCase = new_scheduler.step_plms(_snake_case , _snake_case , _snake_case , **_snake_case ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def snake_case ( self , **_snake_case ):
"""simple docstring"""
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config(**_snake_case )
_lowerCAmelCase = scheduler_class(**_snake_case )
_lowerCAmelCase = 10
_lowerCAmelCase = self.dummy_model()
_lowerCAmelCase = self.dummy_sample_deter
scheduler.set_timesteps(_snake_case )
for i, t in enumerate(scheduler.prk_timesteps ):
_lowerCAmelCase = model(_snake_case , _snake_case )
_lowerCAmelCase = scheduler.step_prk(_snake_case , _snake_case , _snake_case ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
_lowerCAmelCase = model(_snake_case , _snake_case )
_lowerCAmelCase = scheduler.step_plms(_snake_case , _snake_case , _snake_case ).prev_sample
return sample
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = dict(self.forward_default_kwargs )
_lowerCAmelCase = kwargs.pop("""num_inference_steps""" , _snake_case )
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_snake_case )
_lowerCAmelCase = self.dummy_sample
_lowerCAmelCase = 0.1 * sample
if num_inference_steps is not None and hasattr(_snake_case , """set_timesteps""" ):
scheduler.set_timesteps(_snake_case )
elif num_inference_steps is not None and not hasattr(_snake_case , """set_timesteps""" ):
_lowerCAmelCase = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_lowerCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
_lowerCAmelCase = dummy_past_residuals[:]
_lowerCAmelCase = scheduler.step_prk(_snake_case , 0 , _snake_case , **_snake_case ).prev_sample
_lowerCAmelCase = scheduler.step_prk(_snake_case , 1 , _snake_case , **_snake_case ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
_lowerCAmelCase = scheduler.step_plms(_snake_case , 0 , _snake_case , **_snake_case ).prev_sample
_lowerCAmelCase = scheduler.step_plms(_snake_case , 1 , _snake_case , **_snake_case ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def snake_case ( self ):
"""simple docstring"""
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=_snake_case )
def snake_case ( self ):
"""simple docstring"""
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=_snake_case )
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config(steps_offset=1 )
_lowerCAmelCase = scheduler_class(**_snake_case )
scheduler.set_timesteps(10 )
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1] ) , )
def snake_case ( self ):
"""simple docstring"""
for beta_start, beta_end in zip([0.0001, 0.001] , [0.002, 0.02] ):
self.check_over_configs(beta_start=_snake_case , beta_end=_snake_case )
def snake_case ( self ):
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_snake_case )
def snake_case ( self ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_snake_case )
def snake_case ( self ):
"""simple docstring"""
for t in [1, 5, 10]:
self.check_over_forward(time_step=_snake_case )
def snake_case ( self ):
"""simple docstring"""
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=_snake_case )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = 27
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase = self.dummy_sample
_lowerCAmelCase = 0.1 * sample
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_snake_case )
scheduler.set_timesteps(_snake_case )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
_lowerCAmelCase = scheduler.step_prk(_snake_case , _snake_case , _snake_case ).prev_sample
def snake_case ( self ):
"""simple docstring"""
with self.assertRaises(_snake_case ):
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_snake_case )
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.full_loop()
_lowerCAmelCase = torch.sum(torch.abs(_snake_case ) )
_lowerCAmelCase = torch.mean(torch.abs(_snake_case ) )
assert abs(result_sum.item() - 198.1318 ) < 1e-2
assert abs(result_mean.item() - 0.2580 ) < 1e-3
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.full_loop(prediction_type="""v_prediction""" )
_lowerCAmelCase = torch.sum(torch.abs(_snake_case ) )
_lowerCAmelCase = torch.mean(torch.abs(_snake_case ) )
assert abs(result_sum.item() - 67.3986 ) < 1e-2
assert abs(result_mean.item() - 0.0878 ) < 1e-3
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.full_loop(set_alpha_to_one=_snake_case , beta_start=0.01 )
_lowerCAmelCase = torch.sum(torch.abs(_snake_case ) )
_lowerCAmelCase = torch.mean(torch.abs(_snake_case ) )
assert abs(result_sum.item() - 230.0399 ) < 1e-2
assert abs(result_mean.item() - 0.2995 ) < 1e-3
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.full_loop(set_alpha_to_one=_snake_case , beta_start=0.01 )
_lowerCAmelCase = torch.sum(torch.abs(_snake_case ) )
_lowerCAmelCase = torch.mean(torch.abs(_snake_case ) )
assert abs(result_sum.item() - 186.9482 ) < 1e-2
assert abs(result_mean.item() - 0.2434 ) < 1e-3
| 82 | def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> str:
if isinstance(snake_case__ , snake_case__ ):
raise TypeError('''\'float\' object cannot be interpreted as an integer''' )
if isinstance(snake_case__ , snake_case__ ):
raise TypeError('''\'str\' object cannot be interpreted as an integer''' )
if num == 0:
return "0b0"
lowerCAmelCase = False
if num < 0:
lowerCAmelCase = True
lowerCAmelCase = -num
lowerCAmelCase = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(snake_case__ ) for e in binary )
return "0b" + "".join(str(snake_case__ ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 338 | 0 |
'''simple docstring'''
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
lowercase =logging.get_logger(__name__)
lowercase ={
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""encoder.layer_norm_for_extract""": """layer_norm_for_extract""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""label_embs_concat""": """label_embeddings_concat""",
"""mask_emb""": """masked_spec_embed""",
"""spk_proj""": """speaker_proj""",
}
lowercase =[
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
"""label_embeddings_concat""",
"""speaker_proj""",
"""layer_norm_for_extract""",
]
def lowerCamelCase__ ( __lowerCamelCase : List[str] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Any , __lowerCamelCase : Optional[Any] ):
'''simple docstring'''
for attribute in key.split('.' ):
_UpperCAmelCase : Optional[int] =getattr(_snake_case , _snake_case )
if weight_type is not None:
_UpperCAmelCase : List[str] =getattr(_snake_case , _snake_case ).shape
else:
_UpperCAmelCase : Dict =hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}" )
if weight_type == "weight":
_UpperCAmelCase : Optional[int] =value
elif weight_type == "weight_g":
_UpperCAmelCase : Union[str, Any] =value
elif weight_type == "weight_v":
_UpperCAmelCase : Tuple =value
elif weight_type == "bias":
_UpperCAmelCase : Optional[Any] =value
else:
_UpperCAmelCase : Dict =value
logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def lowerCamelCase__ ( __lowerCamelCase : Any , __lowerCamelCase : Optional[Any] ):
'''simple docstring'''
_UpperCAmelCase : str =[]
_UpperCAmelCase : int =fairseq_model.state_dict()
_UpperCAmelCase : str =hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
_UpperCAmelCase : List[Any] =False
if "conv_layers" in name:
load_conv_layer(
_snake_case , _snake_case , _snake_case , _snake_case , hf_model.config.feat_extract_norm == 'group' , )
_UpperCAmelCase : Union[str, Any] =True
else:
for key, mapped_key in MAPPING.items():
_UpperCAmelCase : List[Any] ='''unispeech_sat.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split('.' )[:-1] ) != key):
# special case since naming is very similar
continue
_UpperCAmelCase : List[str] =True
if "*" in mapped_key:
_UpperCAmelCase : Optional[Any] =name.split(_snake_case )[0].split('.' )[-2]
_UpperCAmelCase : Dict =mapped_key.replace('*' , _snake_case )
if "weight_g" in name:
_UpperCAmelCase : List[Any] ='''weight_g'''
elif "weight_v" in name:
_UpperCAmelCase : Optional[Any] ='''weight_v'''
elif "bias" in name:
_UpperCAmelCase : Union[str, Any] ='''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
_UpperCAmelCase : List[str] ='''weight'''
else:
_UpperCAmelCase : Dict =None
set_recursively(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
continue
if not is_used:
unused_weights.append(_snake_case )
logger.warning(f"Unused weights: {unused_weights}" )
def lowerCamelCase__ ( __lowerCamelCase : List[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Tuple , __lowerCamelCase : List[str] , __lowerCamelCase : str ):
'''simple docstring'''
_UpperCAmelCase : Optional[Any] =full_name.split('conv_layers.' )[-1]
_UpperCAmelCase : Any =name.split('.' )
_UpperCAmelCase : Tuple =int(items[0] )
_UpperCAmelCase : Union[str, Any] =int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." )
_UpperCAmelCase : List[Any] =value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." )
_UpperCAmelCase : Optional[int] =value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found." )
_UpperCAmelCase : str =value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found." )
_UpperCAmelCase : Any =value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(_snake_case )
@torch.no_grad()
def lowerCamelCase__ ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any]=None , __lowerCamelCase : int=None , __lowerCamelCase : Union[str, Any]=True ):
'''simple docstring'''
if config_path is not None:
_UpperCAmelCase : int =UniSpeechSatConfig.from_pretrained(_snake_case )
else:
_UpperCAmelCase : str =UniSpeechSatConfig()
_UpperCAmelCase : Optional[Any] =''''''
if is_finetuned:
_UpperCAmelCase : Optional[Any] =UniSpeechSatForCTC(_snake_case )
else:
_UpperCAmelCase : Tuple =UniSpeechSatForPreTraining(_snake_case )
_UpperCAmelCase : Tuple =fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
_UpperCAmelCase : Any =model[0].eval()
recursively_load_weights(_snake_case , _snake_case )
hf_wavavec.save_pretrained(_snake_case )
if __name__ == "__main__":
lowercase =argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
lowercase =parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 352 |
'''simple docstring'''
from __future__ import annotations
from random import choice
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] ):
'''simple docstring'''
return choice(__lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : list[int] , __lowerCamelCase : int ):
'''simple docstring'''
_UpperCAmelCase : int =random_pivot(__lowerCamelCase )
# partition based on pivot
# linear time
_UpperCAmelCase : str =[e for e in lst if e < pivot]
_UpperCAmelCase : Dict =[e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(__lowerCamelCase ) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(__lowerCamelCase ) < k - 1:
return kth_number(__lowerCamelCase , k - len(__lowerCamelCase ) - 1 )
# pivot is in elements smaller than k
else:
return kth_number(__lowerCamelCase , __lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 242 | 0 |
'''simple docstring'''
import random
def lowercase_ ( lowerCAmelCase__ : Dict , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : List[str] ):
"""simple docstring"""
__UpperCAmelCase : List[str] = a[left_index]
__UpperCAmelCase : Union[str, Any] = left_index + 1
for j in range(left_index + 1 , lowerCAmelCase__ ):
if a[j] < pivot:
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = a[i], a[j]
i += 1
__UpperCAmelCase , __UpperCAmelCase : Tuple = a[i - 1], a[left_index]
return i - 1
def lowercase_ ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Dict ):
"""simple docstring"""
if left < right:
__UpperCAmelCase : Dict = random.randint(lowerCAmelCase__ , right - 1 )
__UpperCAmelCase , __UpperCAmelCase : Tuple = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
__UpperCAmelCase : Any = partition(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
quick_sort_random(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) # recursive quicksort to the left of the pivot point
quick_sort_random(
lowerCAmelCase__ , pivot_index + 1 , lowerCAmelCase__ ) # recursive quicksort to the right of the pivot point
def lowercase_ ( ):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = input("""Enter numbers separated by a comma:\n""" ).strip()
__UpperCAmelCase : Optional[int] = [int(lowerCAmelCase__ ) for item in user_input.split(""",""" )]
quick_sort_random(lowerCAmelCase__ , 0 , len(lowerCAmelCase__ ) )
print(lowerCAmelCase__ )
if __name__ == "__main__":
main()
| 254 |
'''simple docstring'''
def lowercase_ ( lowerCAmelCase__ : int ):
"""simple docstring"""
__UpperCAmelCase : list[list[int]] = [[0 for _ in range(lowerCAmelCase__ )] for _ in range(m + 1 )]
for i in range(m + 1 ):
__UpperCAmelCase : str = 1
for n in range(m + 1 ):
for k in range(1 , lowerCAmelCase__ ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
_UpperCamelCase = int(input('''Enter a number: ''').strip())
print(partition(n))
except ValueError:
print('''Please enter a number.''')
else:
try:
_UpperCamelCase = int(sys.argv[1])
print(partition(n))
except ValueError:
print('''Please pass a number.''')
| 254 | 1 |
"""simple docstring"""
import cmath
import math
def lowercase_ ( _lowerCamelCase: float , _lowerCamelCase: float , _lowerCamelCase: float , _lowerCamelCase: float ) -> complex:
'''simple docstring'''
__lowerCamelCase : Dict = math.radians(_lowerCamelCase )
__lowerCamelCase : Optional[int] = math.radians(_lowerCamelCase )
# Convert voltage and current to rectangular form
__lowerCamelCase : Tuple = cmath.rect(_lowerCamelCase , _lowerCamelCase )
__lowerCamelCase : Optional[Any] = cmath.rect(_lowerCamelCase , _lowerCamelCase )
# Calculate apparent power
return voltage_rect * current_rect
if __name__ == "__main__":
import doctest
doctest.testmod() | 64 | """simple docstring"""
def lowercase_ ( _lowerCamelCase: int ) -> int:
'''simple docstring'''
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise TypeError("Input value must be an 'int' type" )
__lowerCamelCase : Dict = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod() | 64 | 1 |
'''simple docstring'''
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
__SCREAMING_SNAKE_CASE :int = logging.get_logger(__name__)
class A_ :
_lowerCamelCase : str
_lowerCamelCase : str = None
@staticmethod
def lowercase ( ):
raise NotImplementedError
def lowercase ( self : Union[str, Any] , snake_case_ : Optional[int] , snake_case_ : int , snake_case_ : str , **snake_case_ : List[Any] ):
raise NotImplementedError
def lowercase ( self : Any , snake_case_ : int ):
raise NotImplementedError
def lowercase ( self : List[str] ):
if not self.is_available():
raise RuntimeError(
f'You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.' )
@classmethod
def lowercase ( cls : List[Any] ):
return f'`pip install {cls.pip_package or cls.name}`'
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : int = """optuna"""
@staticmethod
def lowercase ( ):
return is_optuna_available()
def lowercase ( self : List[str] , snake_case_ : Any , snake_case_ : int , snake_case_ : str , **snake_case_ : Tuple ):
return run_hp_search_optuna(snake_case_ , snake_case_ , snake_case_ , **snake_case_ )
def lowercase ( self : int , snake_case_ : Optional[int] ):
return default_hp_space_optuna(snake_case_ )
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : Any = """ray"""
_lowerCamelCase : Tuple = """'ray[tune]'"""
@staticmethod
def lowercase ( ):
return is_ray_available()
def lowercase ( self : Optional[Any] , snake_case_ : Any , snake_case_ : int , snake_case_ : str , **snake_case_ : List[str] ):
return run_hp_search_ray(snake_case_ , snake_case_ , snake_case_ , **snake_case_ )
def lowercase ( self : Any , snake_case_ : str ):
return default_hp_space_ray(snake_case_ )
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : int = """sigopt"""
@staticmethod
def lowercase ( ):
return is_sigopt_available()
def lowercase ( self : Any , snake_case_ : int , snake_case_ : int , snake_case_ : str , **snake_case_ : Dict ):
return run_hp_search_sigopt(snake_case_ , snake_case_ , snake_case_ , **snake_case_ )
def lowercase ( self : Dict , snake_case_ : Optional[Any] ):
return default_hp_space_sigopt(snake_case_ )
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : Optional[int] = """wandb"""
@staticmethod
def lowercase ( ):
return is_wandb_available()
def lowercase ( self : Optional[Any] , snake_case_ : Optional[Any] , snake_case_ : int , snake_case_ : str , **snake_case_ : Optional[Any] ):
return run_hp_search_wandb(snake_case_ , snake_case_ , snake_case_ , **snake_case_ )
def lowercase ( self : Any , snake_case_ : Union[str, Any] ):
return default_hp_space_wandb(snake_case_ )
__SCREAMING_SNAKE_CASE :Dict = {
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def UpperCAmelCase_ ( ) -> str:
'''simple docstring'''
_UpperCAmelCase = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(__lowercase ) > 0:
_UpperCAmelCase = available_backends[0].name
if len(__lowercase ) > 1:
logger.info(
f'{len(__lowercase )} hyperparameter search backends available. Using {name} as the default.' )
return name
raise RuntimeError(
"No hyperparameter search backend available.\n"
+ "\n".join(
f' - To install {backend.name} run {backend.pip_install()}'
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
| 22 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class a ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
snake_case_ = CycleDiffusionPipeline
snake_case_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"negative_prompt",
"height",
"width",
"negative_prompt_embeds",
}
snake_case_ = PipelineTesterMixin.required_optional_params - {"latents"}
snake_case_ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"source_prompt"} )
snake_case_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
snake_case_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def A_ ( self : Tuple ):
torch.manual_seed(0 )
snake_case_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
snake_case_ = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , num_train_timesteps=1000 , clip_sample=lowercase_ , set_alpha_to_one=lowercase_ , )
torch.manual_seed(0 )
snake_case_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
snake_case_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
snake_case_ = CLIPTextModel(lowercase_ )
snake_case_ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
snake_case_ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def A_ ( self : Any , lowercase_ : int , lowercase_ : Optional[Any]=0 ):
snake_case_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowercase_ ) ).to(lowercase_ )
snake_case_ = image / 2 + 0.5
if str(lowercase_ ).startswith('''mps''' ):
snake_case_ = torch.manual_seed(lowercase_ )
else:
snake_case_ = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
snake_case_ = {
'''prompt''': '''An astronaut riding an elephant''',
'''source_prompt''': '''An astronaut riding a horse''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''eta''': 0.1,
'''strength''': 0.8,
'''guidance_scale''': 3,
'''source_guidance_scale''': 1,
'''output_type''': '''numpy''',
}
return inputs
def A_ ( self : Union[str, Any] ):
snake_case_ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
snake_case_ = self.get_dummy_components()
snake_case_ = CycleDiffusionPipeline(**lowercase_ )
snake_case_ = pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
snake_case_ = self.get_dummy_inputs(lowercase_ )
snake_case_ = pipe(**lowercase_ )
snake_case_ = output.images
snake_case_ = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
snake_case_ = np.array([0.4459, 0.4943, 0.4544, 0.6643, 0.5474, 0.4327, 0.5701, 0.5959, 0.5179] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def A_ ( self : Union[str, Any] ):
snake_case_ = self.get_dummy_components()
for name, module in components.items():
if hasattr(lowercase_ , '''half''' ):
snake_case_ = module.half()
snake_case_ = CycleDiffusionPipeline(**lowercase_ )
snake_case_ = pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
snake_case_ = self.get_dummy_inputs(lowercase_ )
snake_case_ = pipe(**lowercase_ )
snake_case_ = output.images
snake_case_ = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
snake_case_ = np.array([0.3506, 0.4543, 0.446, 0.4575, 0.5195, 0.4155, 0.5273, 0.518, 0.4116] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def A_ ( self : Optional[int] ):
return super().test_save_load_local()
@unittest.skip('''non-deterministic pipeline''' )
def A_ ( self : List[Any] ):
return super().test_inference_batch_single_identical()
@skip_mps
def A_ ( self : Union[str, Any] ):
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def A_ ( self : Union[str, Any] ):
return super().test_save_load_optional_components()
@skip_mps
def A_ ( self : Union[str, Any] ):
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
def A_ ( self : List[Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A_ ( self : Union[str, Any] ):
snake_case_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/cycle-diffusion/black_colored_car.png''' )
snake_case_ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy''' )
snake_case_ = init_image.resize((512, 512) )
snake_case_ = '''CompVis/stable-diffusion-v1-4'''
snake_case_ = DDIMScheduler.from_pretrained(lowercase_ , subfolder='''scheduler''' )
snake_case_ = CycleDiffusionPipeline.from_pretrained(
lowercase_ , scheduler=lowercase_ , safety_checker=lowercase_ , torch_dtype=torch.floataa , revision='''fp16''' )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
pipe.enable_attention_slicing()
snake_case_ = '''A black colored car'''
snake_case_ = '''A blue colored car'''
snake_case_ = torch.manual_seed(0 )
snake_case_ = pipe(
prompt=lowercase_ , source_prompt=lowercase_ , image=lowercase_ , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=lowercase_ , output_type='''np''' , )
snake_case_ = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5e-1
def A_ ( self : List[str] ):
snake_case_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/cycle-diffusion/black_colored_car.png''' )
snake_case_ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy''' )
snake_case_ = init_image.resize((512, 512) )
snake_case_ = '''CompVis/stable-diffusion-v1-4'''
snake_case_ = DDIMScheduler.from_pretrained(lowercase_ , subfolder='''scheduler''' )
snake_case_ = CycleDiffusionPipeline.from_pretrained(lowercase_ , scheduler=lowercase_ , safety_checker=lowercase_ )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
pipe.enable_attention_slicing()
snake_case_ = '''A black colored car'''
snake_case_ = '''A blue colored car'''
snake_case_ = torch.manual_seed(0 )
snake_case_ = pipe(
prompt=lowercase_ , source_prompt=lowercase_ , image=lowercase_ , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=lowercase_ , output_type='''np''' , )
snake_case_ = output.images
assert np.abs(image - expected_image ).max() < 2e-2
| 56 | 0 |
import re
import tempfile
from pathlib import Path
import pytest
import yaml
from datasets.utils.readme import ReadMe
# @pytest.fixture
# def example_yaml_structure():
__a :Optional[Any] = yaml.safe_load(
'\\nname: ""\nallow_empty: false\nallow_empty_text: true\nsubsections:\n - name: "Dataset Card for X" # First-level markdown heading\n allow_empty: false\n allow_empty_text: true\n subsections:\n - name: "Table of Contents"\n allow_empty: false\n allow_empty_text: false\n subsections: null\n - name: "Dataset Description"\n allow_empty: false\n allow_empty_text: false\n subsections:\n - name: "Dataset Summary"\n allow_empty: false\n allow_empty_text: false\n subsections: null\n - name: "Supported Tasks and Leaderboards"\n allow_empty: true\n allow_empty_text: true\n subsections: null\n - name: Languages\n allow_empty: false\n allow_empty_text: true\n subsections: null\n'
)
__a :Dict = {
'name': 'root',
'text': '',
'is_empty_text': True,
'subsections': [
{
'name': 'Dataset Card for My Dataset',
'text': '',
'is_empty_text': True,
'subsections': [
{'name': 'Table of Contents', 'text': 'Some text here.', 'is_empty_text': False, 'subsections': []},
{
'name': 'Dataset Description',
'text': 'Some text here.',
'is_empty_text': False,
'subsections': [
{
'name': 'Dataset Summary',
'text': 'Some text here.',
'is_empty_text': False,
'subsections': [],
},
{
'name': 'Supported Tasks and Leaderboards',
'text': '',
'is_empty_text': True,
'subsections': [],
},
{'name': 'Languages', 'text': 'Language Text', 'is_empty_text': False, 'subsections': []},
],
},
],
}
],
}
__a :int = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
__a :int = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n#### Extra Ignored Subsection\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
__a :Tuple = {
'name': 'root',
'text': '',
'is_empty_text': True,
'subsections': [
{
'name': 'Dataset Card for My Dataset',
'text': '',
'is_empty_text': True,
'subsections': [
{'name': 'Table of Contents', 'text': 'Some text here.', 'is_empty_text': False, 'subsections': []},
{
'name': 'Dataset Description',
'text': 'Some text here.',
'is_empty_text': False,
'subsections': [
{
'name': 'Dataset Summary',
'text': 'Some text here.',
'is_empty_text': False,
'subsections': [
{
'name': 'Extra Ignored Subsection',
'text': '',
'is_empty_text': True,
'subsections': [],
}
],
},
{
'name': 'Supported Tasks and Leaderboards',
'text': '',
'is_empty_text': True,
'subsections': [],
},
{'name': 'Languages', 'text': 'Language Text', 'is_empty_text': False, 'subsections': []},
],
},
],
}
],
}
__a :Optional[Any] = '\\n---\n---\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
__a :Optional[int] = (
'The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README.'
)
__a :Optional[Any] = '\\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
__a :List[Any] = (
'The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README.'
)
__a :Any = '\\n---\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
__a :Union[str, Any] = 'The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README.'
__a :Optional[int] = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
__a :Optional[Any] = 'The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored).'
__a :Any = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n'
__a :Optional[Any] = 'The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found \'None\'.'
__a :List[str] = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Languages\nLanguage Text\n'
__a :int = 'The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`.'
__a :Tuple = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\n'
__a :Optional[int] = 'The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty.'
__a :int = '\\n---\nlanguage:\n- zh\n- en\n---\n\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
__a :Any = 'The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.'
__a :Any = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n# Dataset Card My Dataset\n'
__a :Any = 'The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README.'
__a :List[Any] = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
__a :Optional[Any] = 'The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README.'
__a :str = ''
__a :Optional[Any] = 'The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README.'
__a :Dict = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
__a :List[str] = 'The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections.'
@pytest.mark.parametrize(
"readme_md, expected_dict" ,[
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] ,)
def __snake_case ( __UpperCamelCase : Any ,__UpperCamelCase : List[str] ):
"""simple docstring"""
assert ReadMe.from_string(__UpperCamelCase ,__UpperCamelCase ).to_dict() == expected_dict
@pytest.mark.parametrize(
"readme_md, expected_error" ,[
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] ,)
def __snake_case ( __UpperCamelCase : Optional[int] ,__UpperCamelCase : Optional[Any] ):
"""simple docstring"""
with pytest.raises(__UpperCamelCase ,match=re.escape(expected_error.format(path="root" ) ) ):
A_ = ReadMe.from_string(__UpperCamelCase ,__UpperCamelCase )
readme.validate()
@pytest.mark.parametrize(
"readme_md, expected_error" ,[
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] ,)
def __snake_case ( __UpperCamelCase : List[Any] ,__UpperCamelCase : Dict ):
"""simple docstring"""
with pytest.raises(__UpperCamelCase ,match=re.escape(expected_error.format(path="root" ) ) ):
ReadMe.from_string(__UpperCamelCase ,__UpperCamelCase )
@pytest.mark.parametrize(
"readme_md," ,[
(README_MULTIPLE_SAME_HEADING_1),
] ,)
def __snake_case ( __UpperCamelCase : Optional[int] ):
"""simple docstring"""
ReadMe.from_string(__UpperCamelCase ,__UpperCamelCase ,suppress_parsing_errors=__UpperCamelCase )
@pytest.mark.parametrize(
"readme_md, expected_dict" ,[
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] ,)
def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : List[Any] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
A_ = Path(__UpperCamelCase ) / "README.md"
with open(__UpperCamelCase ,"w+" ) as readme_file:
readme_file.write(__UpperCamelCase )
A_ = ReadMe.from_readme(__UpperCamelCase ,__UpperCamelCase ).to_dict()
assert out["name"] == path
assert out["text"] == ""
assert out["is_empty_text"]
assert out["subsections"] == expected_dict["subsections"]
@pytest.mark.parametrize(
"readme_md, expected_error" ,[
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] ,)
def __snake_case ( __UpperCamelCase : List[Any] ,__UpperCamelCase : List[str] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
A_ = Path(__UpperCamelCase ) / "README.md"
with open(__UpperCamelCase ,"w+" ) as readme_file:
readme_file.write(__UpperCamelCase )
A_ = expected_error.format(path=__UpperCamelCase )
with pytest.raises(__UpperCamelCase ,match=re.escape(__UpperCamelCase ) ):
A_ = ReadMe.from_readme(__UpperCamelCase ,__UpperCamelCase )
readme.validate()
@pytest.mark.parametrize(
"readme_md, expected_error" ,[
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] ,)
def __snake_case ( __UpperCamelCase : int ,__UpperCamelCase : Dict ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
A_ = Path(__UpperCamelCase ) / "README.md"
with open(__UpperCamelCase ,"w+" ) as readme_file:
readme_file.write(__UpperCamelCase )
A_ = expected_error.format(path=__UpperCamelCase )
with pytest.raises(__UpperCamelCase ,match=re.escape(__UpperCamelCase ) ):
ReadMe.from_readme(__UpperCamelCase ,__UpperCamelCase )
@pytest.mark.parametrize(
"readme_md," ,[
(README_MULTIPLE_SAME_HEADING_1),
] ,)
def __snake_case ( __UpperCamelCase : List[Any] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
A_ = Path(__UpperCamelCase ) / "README.md"
with open(__UpperCamelCase ,"w+" ) as readme_file:
readme_file.write(__UpperCamelCase )
ReadMe.from_readme(__UpperCamelCase ,__UpperCamelCase ,suppress_parsing_errors=__UpperCamelCase ) | 329 |
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def __snake_case ( __UpperCamelCase : List[Any] ):
"""simple docstring"""
if (
(cp >= 0X4_E_0_0 and cp <= 0X9_F_F_F)
or (cp >= 0X3_4_0_0 and cp <= 0X4_D_B_F) #
or (cp >= 0X2_0_0_0_0 and cp <= 0X2_A_6_D_F) #
or (cp >= 0X2_A_7_0_0 and cp <= 0X2_B_7_3_F) #
or (cp >= 0X2_B_7_4_0 and cp <= 0X2_B_8_1_F) #
or (cp >= 0X2_B_8_2_0 and cp <= 0X2_C_E_A_F) #
or (cp >= 0XF_9_0_0 and cp <= 0XF_A_F_F)
or (cp >= 0X2_F_8_0_0 and cp <= 0X2_F_A_1_F) #
): #
return True
return False
def __snake_case ( __UpperCamelCase : str ):
"""simple docstring"""
for char in word:
A_ = ord(__UpperCamelCase )
if not _is_chinese_char(__UpperCamelCase ):
return 0
return 1
def __snake_case ( __UpperCamelCase : List[str] ):
"""simple docstring"""
A_ = set()
for token in tokens:
A_ = len(__UpperCamelCase ) > 1 and is_chinese(__UpperCamelCase )
if chinese_word:
word_set.add(__UpperCamelCase )
A_ = list(__UpperCamelCase )
return word_list
def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : set() ):
"""simple docstring"""
if not chinese_word_set:
return bert_tokens
A_ = max([len(__UpperCamelCase ) for w in chinese_word_set] )
A_ = bert_tokens
A_ , A_ = 0, len(__UpperCamelCase )
while start < end:
A_ = True
if is_chinese(bert_word[start] ):
A_ = min(end - start ,__UpperCamelCase )
for i in range(__UpperCamelCase ,1 ,-1 ):
A_ = "".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 ,start + i ):
A_ = "##" + bert_word[j]
A_ = start + i
A_ = False
break
if single_word:
start += 1
return bert_word
def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : LTP ,__UpperCamelCase : BertTokenizer ):
"""simple docstring"""
A_ = []
for i in range(0 ,len(__UpperCamelCase ) ,100 ):
A_ = ltp_tokenizer.seg(lines[i : i + 100] )[0]
A_ = [get_chinese_word(__UpperCamelCase ) for r in res]
ltp_res.extend(__UpperCamelCase )
assert len(__UpperCamelCase ) == len(__UpperCamelCase )
A_ = []
for i in range(0 ,len(__UpperCamelCase ) ,100 ):
A_ = bert_tokenizer(lines[i : i + 100] ,add_special_tokens=__UpperCamelCase ,truncation=__UpperCamelCase ,max_length=512 )
bert_res.extend(res["input_ids"] )
assert len(__UpperCamelCase ) == len(__UpperCamelCase )
A_ = []
for input_ids, chinese_word in zip(__UpperCamelCase ,__UpperCamelCase ):
A_ = []
for id in input_ids:
A_ = bert_tokenizer._convert_id_to_token(__UpperCamelCase )
input_tokens.append(__UpperCamelCase )
A_ = add_sub_symbol(__UpperCamelCase ,__UpperCamelCase )
A_ = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(__UpperCamelCase ):
if token[:2] == "##":
A_ = token[2:]
# save chinese tokens' pos
if len(__UpperCamelCase ) == 1 and _is_chinese_char(ord(__UpperCamelCase ) ):
ref_id.append(__UpperCamelCase )
ref_ids.append(__UpperCamelCase )
assert len(__UpperCamelCase ) == len(__UpperCamelCase )
return ref_ids
def __snake_case ( __UpperCamelCase : Dict ):
"""simple docstring"""
with open(args.file_name ,"r" ,encoding="utf-8" ) as f:
A_ = f.readlines()
A_ = [line.strip() for line in data if len(__UpperCamelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
A_ = LTP(args.ltp ) # faster in GPU device
A_ = BertTokenizer.from_pretrained(args.bert )
A_ = prepare_ref(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
with open(args.save_path ,"w" ,encoding="utf-8" ) as f:
A_ = [json.dumps(__UpperCamelCase ) + "\n" for ref in ref_ids]
f.writelines(__UpperCamelCase )
if __name__ == "__main__":
__a :List[Any] = argparse.ArgumentParser(description='prepare_chinese_ref')
parser.add_argument(
'--file_name',
type=str,
default='./resources/chinese-demo.txt',
help='file need process, same as training data in lm',
)
parser.add_argument(
'--ltp', type=str, default='./resources/ltp', help='resources for LTP tokenizer, usually a path'
)
parser.add_argument('--bert', type=str, default='./resources/robert', help='resources for Bert tokenizer')
parser.add_argument('--save_path', type=str, default='./resources/ref.txt', help='path to save res')
__a :Dict = parser.parse_args()
main(args) | 329 | 1 |
def A ( _SCREAMING_SNAKE_CASE ) -> list:
# bit count represents no. of bits in the gray code
if bit_count < 0:
raise ValueError("The given input must be positive" )
# get the generated string sequence
lowerCamelCase : int = gray_code_sequence_string(_SCREAMING_SNAKE_CASE )
#
# convert them to integers
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
lowerCamelCase : Any = int(sequence[i] ,2 )
return sequence
def A ( _SCREAMING_SNAKE_CASE ) -> list:
# The approach is a recursive one
# Base case achieved when either n = 0 or n=1
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
lowerCamelCase : Tuple = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
lowerCamelCase : str = gray_code_sequence_string(bit_count - 1 )
lowerCamelCase : Tuple = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
lowerCamelCase : int = '0' + smaller_sequence[i]
sequence.append(_SCREAMING_SNAKE_CASE )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
lowerCamelCase : List[Any] = '1' + smaller_sequence[i]
sequence.append(_SCREAMING_SNAKE_CASE )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 48 |
import math
def _SCREAMING_SNAKE_CASE ( a ) -> list[int]:
__A : List[str] = []
__A : Any = 2
__A : Union[str, Any] = int(math.sqrt(a ) ) # Size of every segment
__A : Any = [True] * (end + 1)
__A : List[Any] = []
while start <= end:
if temp[start] is True:
in_prime.append(a )
for i in range(start * start , end + 1 , a ):
__A : Optional[int] = False
start += 1
prime += in_prime
__A : Any = end + 1
__A : Any = min(2 * end , a )
while low <= n:
__A : List[Any] = [True] * (high - low + 1)
for each in in_prime:
__A : List[str] = math.floor(low / each ) * each
if t < low:
t += each
for j in range(a , high + 1 , a ):
__A : Optional[int] = False
for j in range(len(a ) ):
if temp[j] is True:
prime.append(j + low )
__A : Optional[int] = high + 1
__A : Tuple = min(high + end , a )
return prime
print(sieve(10**6))
| 280 | 0 |
'''simple docstring'''
from collections import namedtuple
import requests
from lxml import html # type: ignore
UpperCamelCase_ : Optional[Any] = namedtuple('''covid_data''', '''cases deaths recovered''')
def __a ( _UpperCamelCase: str = "https://www.worldometers.info/coronavirus/" ) -> covid_data:
"""simple docstring"""
_snake_case = "//div[@class = \"maincounter-number\"]/span/text()"
return covid_data(*html.fromstring(requests.get(_UpperCamelCase ).content ).xpath(_UpperCamelCase ) )
UpperCamelCase_ : int = '''Total COVID-19 cases in the world: {}
Total deaths due to COVID-19 in the world: {}
Total COVID-19 patients recovered in the world: {}'''
print(fmt.format(*covid_stats()))
| 354 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from packaging import version
from .. import __version__
from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD
from .doc import (
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
copy_func,
replace_return_docstrings,
)
from .generic import (
ContextManagers,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
TensorType,
add_model_info_to_auto_map,
cached_property,
can_return_loss,
expand_dims,
find_labels,
flatten_dict,
infer_framework,
is_jax_tensor,
is_numpy_array,
is_tensor,
is_tf_symbolic_tensor,
is_tf_tensor,
is_torch_device,
is_torch_dtype,
is_torch_tensor,
reshape,
squeeze,
strtobool,
tensor_size,
to_numpy,
to_py_obj,
transpose,
working_or_temp_dir,
)
from .hub import (
CLOUDFRONT_DISTRIB_PREFIX,
DISABLE_TELEMETRY,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
EntryNotFoundError,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
cached_file,
default_cache_path,
define_sagemaker_information,
download_url,
extract_commit_hash,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
has_file,
http_user_agent,
is_offline_mode,
is_remote_url,
move_cache,
send_example_telemetry,
try_to_load_from_cache,
)
from .import_utils import (
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
TORCH_FX_REQUIRED_VERSION,
USE_JAX,
USE_TF,
USE_TORCH,
DummyObject,
OptionalDependencyNotAvailable,
_LazyModule,
ccl_version,
direct_transformers_import,
get_torch_version,
is_accelerate_available,
is_apex_available,
is_bitsandbytes_available,
is_bsa_available,
is_coloredlogs_available,
is_cython_available,
is_datasets_available,
is_decord_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_jieba_available,
is_jumanpp_available,
is_kenlm_available,
is_keras_nlp_available,
is_librosa_available,
is_natten_available,
is_ninja_available,
is_onnx_available,
is_openai_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_sudachi_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_bfaa_cpu_available,
is_torch_bfaa_gpu_available,
is_torch_compile_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_neuroncore_available,
is_torch_tensorrt_fx_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_torchdistx_available,
is_torchdynamo_available,
is_torchvision_available,
is_training_run_on_sagemaker,
is_vision_available,
requires_backends,
torch_only_method,
)
UpperCamelCase_ : int = '''pytorch_model.bin'''
UpperCamelCase_ : str = '''pytorch_model.bin.index.json'''
UpperCamelCase_ : int = '''adapter_config.json'''
UpperCamelCase_ : str = '''adapter_model.bin'''
UpperCamelCase_ : str = '''adapter_model.safetensors'''
UpperCamelCase_ : List[Any] = '''tf_model.h5'''
UpperCamelCase_ : Union[str, Any] = '''tf_model.h5.index.json'''
UpperCamelCase_ : Tuple = '''model.ckpt'''
UpperCamelCase_ : Union[str, Any] = '''flax_model.msgpack'''
UpperCamelCase_ : Union[str, Any] = '''flax_model.msgpack.index.json'''
UpperCamelCase_ : Dict = '''model.safetensors'''
UpperCamelCase_ : List[Any] = '''model.safetensors.index.json'''
UpperCamelCase_ : Tuple = '''config.json'''
UpperCamelCase_ : List[str] = '''preprocessor_config.json'''
UpperCamelCase_ : List[Any] = FEATURE_EXTRACTOR_NAME
UpperCamelCase_ : Union[str, Any] = '''generation_config.json'''
UpperCamelCase_ : str = '''modelcard.json'''
UpperCamelCase_ : List[Any] = '''▁'''
UpperCamelCase_ : Tuple = SENTENCEPIECE_UNDERLINE # Kept for backward compatibility
UpperCamelCase_ : Any = [
[[0, 1, 0, 1], [1, 0, 0, 1]]
] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too.
UpperCamelCase_ : Tuple = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]
UpperCamelCase_ : str = [[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]]
def __a ( _UpperCamelCase: Optional[Any] ) -> int:
"""simple docstring"""
if version.parse(_UpperCamelCase ) < version.parse(_UpperCamelCase ):
if "dev" in min_version:
_snake_case = (
"This example requires a source install from HuggingFace Transformers (see "
"`https://huggingface.co/docs/transformers/installation#install-from-source`),"
)
else:
_snake_case = F"""This example requires a minimum version of {min_version},"""
error_message += F""" but the version found is {__version__}.\n"""
raise ImportError(
error_message
+ "Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other "
"versions of HuggingFace Transformers." )
| 142 | 0 |
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __A :
def __init__(self : List[Any] , __a : List[Any] , __a : int=3 , __a : Optional[int]=32 , __a : Optional[Any]=3 , __a : List[Any]=10 , __a : str=[10, 20, 30, 40] , __a : Any=[1, 1, 2, 1] , __a : str=True , __a : Optional[Any]=True , __a : Optional[int]="relu" , __a : Optional[Any]=3 , __a : Union[str, Any]=None , ):
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = image_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = embeddings_size
UpperCAmelCase_ = hidden_sizes
UpperCAmelCase_ = depths
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = num_labels
UpperCAmelCase_ = scope
UpperCAmelCase_ = len(__a )
def _lowercase (self : Tuple ):
UpperCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ = None
if self.use_labels:
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase_ = self.get_config()
return config, pixel_values, labels
def _lowercase (self : int ):
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def _lowercase (self : str , __a : List[Any] , __a : Tuple , __a : Dict ):
UpperCAmelCase_ = TFResNetModel(config=__a )
UpperCAmelCase_ = model(__a )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _lowercase (self : List[Any] , __a : Tuple , __a : Dict , __a : Tuple ):
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = TFResNetForImageClassification(__a )
UpperCAmelCase_ = model(__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowercase (self : Any ):
UpperCAmelCase_ = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = config_and_inputs
UpperCAmelCase_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class __A ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
a__ : Union[str, Any] = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
a__ : List[Any] = (
{"""feature-extraction""": TFResNetModel, """image-classification""": TFResNetForImageClassification}
if is_tf_available()
else {}
)
a__ : int = False
a__ : str = False
a__ : Optional[Any] = False
a__ : List[str] = False
a__ : str = False
def _lowercase (self : Tuple ):
UpperCAmelCase_ = TFResNetModelTester(self )
UpperCAmelCase_ = ConfigTester(self , config_class=__a , has_text_modality=__a )
def _lowercase (self : Union[str, Any] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowercase (self : Optional[int] ):
return
@unittest.skip(reason="ResNet does not use inputs_embeds" )
def _lowercase (self : Optional[Any] ):
pass
@unittest.skip(reason="ResNet does not support input and output embeddings" )
def _lowercase (self : str ):
pass
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(__a )
UpperCAmelCase_ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ = [*signature.parameters.keys()]
UpperCAmelCase_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __a )
def _lowercase (self : str ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def _lowercase (self : List[Any] ):
def check_hidden_states_output(__a : str , __a : Dict , __a : Tuple ):
UpperCAmelCase_ = model_class(__a )
UpperCAmelCase_ = model(**self._prepare_for_class(__a , __a ) )
UpperCAmelCase_ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase_ = self.model_tester.num_stages
self.assertEqual(len(__a ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ = ["basic", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
UpperCAmelCase_ = layer_type
UpperCAmelCase_ = True
check_hidden_states_output(__a , __a , __a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ = True
check_hidden_states_output(__a , __a , __a )
def _lowercase (self : Tuple ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
@slow
def _lowercase (self : List[str] ):
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ = TFResNetModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def lowerCAmelCase_ ( ) -> str:
'''simple docstring'''
UpperCAmelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class __A ( unittest.TestCase ):
@cached_property
def _lowercase (self : List[str] ):
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def _lowercase (self : str ):
UpperCAmelCase_ = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
UpperCAmelCase_ = self.default_image_processor
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(images=__a , return_tensors="tf" )
# forward pass
UpperCAmelCase_ = model(**__a )
# verify the logits
UpperCAmelCase_ = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , __a )
UpperCAmelCase_ = tf.constant([-11.10_69, -9.78_77, -8.37_77] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , __a , atol=1E-4 ) )
| 1 |
import os
def lowerCamelCase__ ( ):
with open(os.path.dirname(_a) + "/p022_names.txt") as file:
SCREAMING_SNAKE_CASE : List[str] = str(file.readlines()[0])
SCREAMING_SNAKE_CASE : List[Any] = names.replace("\"" , "").split(",")
names.sort()
SCREAMING_SNAKE_CASE : Dict = 0
SCREAMING_SNAKE_CASE : Dict = 0
for i, name in enumerate(_a):
for letter in name:
name_score += ord(_a) - 64
total_score += (i + 1) * name_score
SCREAMING_SNAKE_CASE : str = 0
return total_score
if __name__ == "__main__":
print(solution()) | 76 | 0 |
'''simple docstring'''
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
lowerCamelCase : Dict = 2_0_0
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
lowerCamelCase : Union[str, Any] = 5_0
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
lowerCamelCase : Tuple = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1_0_0_0))
def _lowerCAmelCase ( _UpperCamelCase : str , _UpperCamelCase : str ) -> tuple[str, float]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =len([g for position, g in enumerate(_UpperCamelCase ) if g == main_target[position]] )
return (item, float(_UpperCamelCase ))
def _lowerCAmelCase ( _UpperCamelCase : str , _UpperCamelCase : str ) -> tuple[str, str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =random.randint(0 , len(_UpperCamelCase ) - 1 )
_SCREAMING_SNAKE_CASE =parent_a[:random_slice] + parent_a[random_slice:]
_SCREAMING_SNAKE_CASE =parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def _lowerCAmelCase ( _UpperCamelCase : str , _UpperCamelCase : list[str] ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =list(_UpperCamelCase )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
_SCREAMING_SNAKE_CASE =random.choice(_UpperCamelCase )
return "".join(_UpperCamelCase )
def _lowerCAmelCase ( _UpperCamelCase : tuple[str, float] , _UpperCamelCase : list[tuple[str, float]] , _UpperCamelCase : list[str] , ) -> list[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[]
# Generate more children proportionally to the fitness score.
_SCREAMING_SNAKE_CASE =int(parent_a[1] * 1_00 ) + 1
_SCREAMING_SNAKE_CASE =10 if child_n >= 10 else child_n
for _ in range(_UpperCamelCase ):
_SCREAMING_SNAKE_CASE =population_score[random.randint(0 , _UpperCamelCase )][0]
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =crossover(parent_a[0] , _UpperCamelCase )
# Append new string to the population list.
pop.append(mutate(_UpperCamelCase , _UpperCamelCase ) )
pop.append(mutate(_UpperCamelCase , _UpperCamelCase ) )
return pop
def _lowerCAmelCase ( _UpperCamelCase : str , _UpperCamelCase : list[str] , _UpperCamelCase : bool = True ) -> tuple[int, int, str]:
"""simple docstring"""
if N_POPULATION < N_SELECTED:
_SCREAMING_SNAKE_CASE =f"{N_POPULATION} must be bigger than {N_SELECTED}"
raise ValueError(_UpperCamelCase )
# Verify that the target contains no genes besides the ones inside genes variable.
_SCREAMING_SNAKE_CASE =sorted({c for c in target if c not in genes} )
if not_in_genes_list:
_SCREAMING_SNAKE_CASE =f"{not_in_genes_list} is not in genes list, evolution cannot converge"
raise ValueError(_UpperCamelCase )
# Generate random starting population.
_SCREAMING_SNAKE_CASE =[]
for _ in range(_UpperCamelCase ):
population.append(''.join([random.choice(_UpperCamelCase ) for i in range(len(_UpperCamelCase ) )] ) )
# Just some logs to know what the algorithms is doing.
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(_UpperCamelCase )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
_SCREAMING_SNAKE_CASE =[evaluate(_UpperCamelCase , _UpperCamelCase ) for item in population]
# Check if there is a matching evolution.
_SCREAMING_SNAKE_CASE =sorted(_UpperCamelCase , key=lambda _UpperCamelCase : x[1] , reverse=_UpperCamelCase )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
f"\nGeneration: {generation}"
f"\nTotal Population:{total_population}"
f"\nBest score: {population_score[0][1]}"
f"\nBest string: {population_score[0][0]}" )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
_SCREAMING_SNAKE_CASE =population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(_UpperCamelCase )
# Normalize population score to be between 0 and 1.
_SCREAMING_SNAKE_CASE =[
(item, score / len(_UpperCamelCase )) for item, score in population_score
]
# This is selection
for i in range(_UpperCamelCase ):
population.extend(select(population_score[int(_UpperCamelCase )] , _UpperCamelCase , _UpperCamelCase ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(_UpperCamelCase ) > N_POPULATION:
break
if __name__ == "__main__":
lowerCamelCase : int = (
"This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!"
)
lowerCamelCase : Any = list(
" ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm"
"nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\"
)
lowerCamelCase , lowerCamelCase , lowerCamelCase : Any = basic(target_str, genes_list)
print(
f'''\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'''
)
| 114 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase : List[str] = {"configuration_ibert": ["IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "IBertConfig", "IBertOnnxConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Union[str, Any] = [
"IBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"IBertForMaskedLM",
"IBertForMultipleChoice",
"IBertForQuestionAnswering",
"IBertForSequenceClassification",
"IBertForTokenClassification",
"IBertModel",
"IBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
lowerCamelCase : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 114 | 1 |
import random
def lowerCAmelCase__ ( lowerCamelCase_ : Dict ,lowerCamelCase_ : int ,lowerCamelCase_ : Optional[int]):
'''simple docstring'''
lowerCAmelCase__ : str = a[left_index]
lowerCAmelCase__ : int = left_index + 1
for j in range(left_index + 1 ,lowerCamelCase_):
if a[j] < pivot:
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = a[i], a[j]
i += 1
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = a[i - 1], a[left_index]
return i - 1
def lowerCAmelCase__ ( lowerCamelCase_ : List[Any] ,lowerCamelCase_ : List[str] ,lowerCamelCase_ : Dict):
'''simple docstring'''
if left < right:
lowerCAmelCase__ : Tuple = random.randint(lowerCamelCase_ ,right - 1)
lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
lowerCAmelCase__ : List[str] = partition(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_)
quick_sort_random(
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_) # recursive quicksort to the left of the pivot point
quick_sort_random(
lowerCamelCase_ ,pivot_index + 1 ,lowerCamelCase_) # recursive quicksort to the right of the pivot point
def lowerCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase__ : int = input('''Enter numbers separated by a comma:\n''').strip()
lowerCAmelCase__ : int = [int(lowerCamelCase_) for item in user_input.split(''',''')]
quick_sort_random(lowerCamelCase_ ,0 ,len(lowerCamelCase_))
print(lowerCamelCase_)
if __name__ == "__main__":
main()
| 129 |
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
__snake_case : int =logging.get_logger(__name__)
enable_full_determinism()
class lowerCamelCase__ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase):
'''simple docstring'''
snake_case_ =UNetaDModel
snake_case_ ="""sample"""
@property
def lowerCAmelCase__ (self ) -> Any:
"""simple docstring"""
lowerCAmelCase__ : List[str] = 4
lowerCAmelCase__ : List[str] = 3
lowerCAmelCase__ : Any = (32, 32)
lowerCAmelCase__ : Optional[Any] = floats_tensor((batch_size, num_channels) + sizes ).to(__lowerCamelCase )
lowerCAmelCase__ : Union[str, Any] = torch.tensor([10] ).to(__lowerCamelCase )
return {"sample": noise, "timestep": time_step}
@property
def lowerCAmelCase__ (self ) -> List[Any]:
"""simple docstring"""
return (3, 32, 32)
@property
def lowerCAmelCase__ (self ) -> Optional[int]:
"""simple docstring"""
return (3, 32, 32)
def lowerCAmelCase__ (self ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : str = {
'''block_out_channels''': (32, 64),
'''down_block_types''': ('''DownBlock2D''', '''AttnDownBlock2D'''),
'''up_block_types''': ('''AttnUpBlock2D''', '''UpBlock2D'''),
'''attention_head_dim''': 3,
'''out_channels''': 3,
'''in_channels''': 3,
'''layers_per_block''': 2,
'''sample_size''': 32,
}
lowerCAmelCase__ : List[str] = self.dummy_input
return init_dict, inputs_dict
class lowerCamelCase__ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase):
'''simple docstring'''
snake_case_ =UNetaDModel
snake_case_ ="""sample"""
@property
def lowerCAmelCase__ (self ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : str = 4
lowerCAmelCase__ : Optional[int] = 4
lowerCAmelCase__ : Optional[Any] = (32, 32)
lowerCAmelCase__ : Optional[Any] = floats_tensor((batch_size, num_channels) + sizes ).to(__lowerCamelCase )
lowerCAmelCase__ : Optional[int] = torch.tensor([10] ).to(__lowerCamelCase )
return {"sample": noise, "timestep": time_step}
@property
def lowerCAmelCase__ (self ) -> List[Any]:
"""simple docstring"""
return (4, 32, 32)
@property
def lowerCAmelCase__ (self ) -> Any:
"""simple docstring"""
return (4, 32, 32)
def lowerCAmelCase__ (self ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : Tuple = {
'''sample_size''': 32,
'''in_channels''': 4,
'''out_channels''': 4,
'''layers_per_block''': 2,
'''block_out_channels''': (32, 64),
'''attention_head_dim''': 32,
'''down_block_types''': ('''DownBlock2D''', '''DownBlock2D'''),
'''up_block_types''': ('''UpBlock2D''', '''UpBlock2D'''),
}
lowerCAmelCase__ : Optional[Any] = self.dummy_input
return init_dict, inputs_dict
def lowerCAmelCase__ (self ) -> int:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' ,output_loading_info=__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertEqual(len(loading_info['''missing_keys'''] ) ,0 )
model.to(__lowerCamelCase )
lowerCAmelCase__ : Tuple = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != '''cuda''' ,'''This test is supposed to run on GPU''' )
def lowerCAmelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ : Dict = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' ,output_loading_info=__lowerCamelCase )
model.to(__lowerCamelCase )
lowerCAmelCase__ : Optional[Any] = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != '''cuda''' ,'''This test is supposed to run on GPU''' )
def lowerCAmelCase__ (self ) -> str:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' ,output_loading_info=__lowerCamelCase )
model_accelerate.to(__lowerCamelCase )
model_accelerate.eval()
lowerCAmelCase__ : Union[str, Any] = torch.randn(
1 ,model_accelerate.config.in_channels ,model_accelerate.config.sample_size ,model_accelerate.config.sample_size ,generator=torch.manual_seed(0 ) ,)
lowerCAmelCase__ : Dict = noise.to(__lowerCamelCase )
lowerCAmelCase__ : List[Any] = torch.tensor([10] * noise.shape[0] ).to(__lowerCamelCase )
lowerCAmelCase__ : Tuple = model_accelerate(__lowerCamelCase ,__lowerCamelCase )['''sample''']
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = UNetaDModel.from_pretrained(
'''fusing/unet-ldm-dummy-update''' ,output_loading_info=__lowerCamelCase ,low_cpu_mem_usage=__lowerCamelCase )
model_normal_load.to(__lowerCamelCase )
model_normal_load.eval()
lowerCAmelCase__ : List[Any] = model_normal_load(__lowerCamelCase ,__lowerCamelCase )['''sample''']
assert torch_all_close(__lowerCamelCase ,__lowerCamelCase ,rtol=1e-3 )
def lowerCAmelCase__ (self ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : List[str] = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' )
model.eval()
model.to(__lowerCamelCase )
lowerCAmelCase__ : Union[str, Any] = torch.randn(
1 ,model.config.in_channels ,model.config.sample_size ,model.config.sample_size ,generator=torch.manual_seed(0 ) ,)
lowerCAmelCase__ : str = noise.to(__lowerCamelCase )
lowerCAmelCase__ : List[Any] = torch.tensor([10] * noise.shape[0] ).to(__lowerCamelCase )
with torch.no_grad():
lowerCAmelCase__ : str = model(__lowerCamelCase ,__lowerCamelCase ).sample
lowerCAmelCase__ : List[str] = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
lowerCAmelCase__ : str = torch.tensor([-13.3258, -20.1100, -15.9873, -17.6617, -23.0596, -17.9419, -13.3675, -16.1889, -12.3800] )
# fmt: on
self.assertTrue(torch_all_close(__lowerCamelCase ,__lowerCamelCase ,rtol=1e-3 ) )
class lowerCamelCase__ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase):
'''simple docstring'''
snake_case_ =UNetaDModel
snake_case_ ="""sample"""
@property
def lowerCAmelCase__ (self ,__lowerCamelCase=(32, 32) ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : str = 4
lowerCAmelCase__ : Optional[int] = 3
lowerCAmelCase__ : Tuple = floats_tensor((batch_size, num_channels) + sizes ).to(__lowerCamelCase )
lowerCAmelCase__ : Optional[int] = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa ,device=__lowerCamelCase )
return {"sample": noise, "timestep": time_step}
@property
def lowerCAmelCase__ (self ) -> str:
"""simple docstring"""
return (3, 32, 32)
@property
def lowerCAmelCase__ (self ) -> Optional[int]:
"""simple docstring"""
return (3, 32, 32)
def lowerCAmelCase__ (self ) -> Any:
"""simple docstring"""
lowerCAmelCase__ : Tuple = {
'''block_out_channels''': [32, 64, 64, 64],
'''in_channels''': 3,
'''layers_per_block''': 1,
'''out_channels''': 3,
'''time_embedding_type''': '''fourier''',
'''norm_eps''': 1e-6,
'''mid_block_scale_factor''': math.sqrt(2.0 ),
'''norm_num_groups''': None,
'''down_block_types''': [
'''SkipDownBlock2D''',
'''AttnSkipDownBlock2D''',
'''SkipDownBlock2D''',
'''SkipDownBlock2D''',
],
'''up_block_types''': [
'''SkipUpBlock2D''',
'''SkipUpBlock2D''',
'''AttnSkipUpBlock2D''',
'''SkipUpBlock2D''',
],
}
lowerCAmelCase__ : Tuple = self.dummy_input
return init_dict, inputs_dict
@slow
def lowerCAmelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ : Dict = UNetaDModel.from_pretrained('''google/ncsnpp-celebahq-256''' ,output_loading_info=__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertEqual(len(loading_info['''missing_keys'''] ) ,0 )
model.to(__lowerCamelCase )
lowerCAmelCase__ : Optional[int] = self.dummy_input
lowerCAmelCase__ : Tuple = floats_tensor((4, 3) + (2_56, 2_56) ).to(__lowerCamelCase )
lowerCAmelCase__ : Union[str, Any] = noise
lowerCAmelCase__ : Union[str, Any] = model(**__lowerCamelCase )
assert image is not None, "Make sure output is not None"
@slow
def lowerCAmelCase__ (self ) -> str:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = UNetaDModel.from_pretrained('''google/ncsnpp-celebahq-256''' )
model.to(__lowerCamelCase )
lowerCAmelCase__ : Dict = 4
lowerCAmelCase__ : Optional[Any] = 3
lowerCAmelCase__ : List[Any] = (2_56, 2_56)
lowerCAmelCase__ : str = torch.ones((batch_size, num_channels) + sizes ).to(__lowerCamelCase )
lowerCAmelCase__ : Optional[int] = torch.tensor(batch_size * [1e-4] ).to(__lowerCamelCase )
with torch.no_grad():
lowerCAmelCase__ : List[Any] = model(__lowerCamelCase ,__lowerCamelCase ).sample
lowerCAmelCase__ : Any = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
lowerCAmelCase__ : Optional[Any] = torch.tensor([-4842.8691, -6499.6631, -3800.1953, -7978.2686, -1_0980.7129, -2_0028.8535, 8148.2822, 2342.2905, 567.7608] )
# fmt: on
self.assertTrue(torch_all_close(__lowerCamelCase ,__lowerCamelCase ,rtol=1e-2 ) )
def lowerCAmelCase__ (self ) -> Any:
"""simple docstring"""
lowerCAmelCase__ : int = UNetaDModel.from_pretrained('''fusing/ncsnpp-ffhq-ve-dummy-update''' )
model.to(__lowerCamelCase )
lowerCAmelCase__ : Optional[int] = 4
lowerCAmelCase__ : Dict = 3
lowerCAmelCase__ : str = (32, 32)
lowerCAmelCase__ : Tuple = torch.ones((batch_size, num_channels) + sizes ).to(__lowerCamelCase )
lowerCAmelCase__ : Tuple = torch.tensor(batch_size * [1e-4] ).to(__lowerCamelCase )
with torch.no_grad():
lowerCAmelCase__ : Optional[int] = model(__lowerCamelCase ,__lowerCamelCase ).sample
lowerCAmelCase__ : List[Any] = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
lowerCAmelCase__ : Union[str, Any] = torch.tensor([-0.0325, -0.0900, -0.0869, -0.0332, -0.0725, -0.0270, -0.0101, 0.0227, 0.0256] )
# fmt: on
self.assertTrue(torch_all_close(__lowerCamelCase ,__lowerCamelCase ,rtol=1e-2 ) )
def lowerCAmelCase__ (self ) -> Dict:
"""simple docstring"""
pass
| 129 | 1 |
"""simple docstring"""
def _snake_case ( UpperCAmelCase_ : dict ):
A__ = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
A__ = set()
return any(
node not in visited and depth_first_search(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
for node in graph )
def _snake_case ( UpperCAmelCase_ : dict , UpperCAmelCase_ : int , UpperCAmelCase_ : set , UpperCAmelCase_ : set ):
visited.add(UpperCAmelCase_ )
rec_stk.add(UpperCAmelCase_ )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(UpperCAmelCase_ )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 359 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
SCREAMING_SNAKE_CASE_ : Tuple = logging.get_logger(__name__)
class a ( _lowerCamelCase ):
"""simple docstring"""
def __init__( self: int , *UpperCamelCase: Optional[Any] , **UpperCamelCase: str ):
"""simple docstring"""
warnings.warn(
"""The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use OwlViTImageProcessor instead.""" , UpperCamelCase , )
super().__init__(*UpperCamelCase , **UpperCamelCase )
| 69 | 0 |
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class lowerCAmelCase__ ( unittest.TestCase ):
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any]=13 , SCREAMING_SNAKE_CASE__ : Optional[Any]=7 , SCREAMING_SNAKE_CASE__ : Any=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : Optional[int]=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : str=99 , SCREAMING_SNAKE_CASE__ : Dict=32 , SCREAMING_SNAKE_CASE__ : Dict=5 , SCREAMING_SNAKE_CASE__ : int=4 , SCREAMING_SNAKE_CASE__ : Tuple=37 , SCREAMING_SNAKE_CASE__ : Optional[int]="gelu" , SCREAMING_SNAKE_CASE__ : int=0.1 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE__ : str=5_12 , SCREAMING_SNAKE_CASE__ : List[str]=16 , SCREAMING_SNAKE_CASE__ : str=2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.02 , SCREAMING_SNAKE_CASE__ : Tuple=4 , ) -> Tuple:
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = seq_length
__lowerCamelCase = is_training
__lowerCamelCase = use_attention_mask
__lowerCamelCase = use_token_type_ids
__lowerCamelCase = use_labels
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = type_sequence_label_size
__lowerCamelCase = initializer_range
__lowerCamelCase = num_choices
def __A ( self : Dict ) -> Union[str, Any]:
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase = None
if self.use_attention_mask:
__lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCamelCase = None
if self.use_token_type_ids:
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCamelCase = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_A , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def __A ( self : Optional[Any] ) -> int:
__lowerCamelCase = self.prepare_config_and_inputs()
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = config_and_inputs
__lowerCamelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class lowerCAmelCase__ ( __lowercase , unittest.TestCase ):
a__ : List[str] = True
a__ : int = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __A ( self : List[Any] ) -> str:
__lowerCamelCase = FlaxRoFormerModelTester(self )
@slow
def __A ( self : Any ) -> Dict:
for model_class_name in self.all_model_classes:
__lowerCamelCase = model_class_name.from_pretrained('''junnyu/roformer_chinese_small''' , from_pt=_A )
__lowerCamelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(_A )
@require_flax
class lowerCAmelCase__ ( unittest.TestCase ):
@slow
def __A ( self : Optional[int] ) -> Union[str, Any]:
__lowerCamelCase = FlaxRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''' )
__lowerCamelCase = jnp.array([[0, 1, 2, 3, 4, 5]] )
__lowerCamelCase = model(_A )[0]
__lowerCamelCase = 5_00_00
__lowerCamelCase = (1, 6, vocab_size)
self.assertEqual(output.shape , _A )
__lowerCamelCase = jnp.array(
[[[-0.1205, -1.0265, 0.2922], [-1.5134, 0.1974, 0.1519], [-5.0135, -3.9003, -0.8404]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , _A , atol=1e-4 ) )
| 270 |
'''simple docstring'''
import math
import unittest
def snake_case ( UpperCAmelCase )-> bool:
"""simple docstring"""
assert isinstance(UpperCAmelCase , UpperCAmelCase ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(UpperCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class UpperCamelCase__ ( unittest.TestCase):
def lowercase_ ( self :List[Any] ) -> str:
'''simple docstring'''
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(11 ) )
self.assertTrue(is_prime(13 ) )
self.assertTrue(is_prime(17 ) )
self.assertTrue(is_prime(19 ) )
self.assertTrue(is_prime(23 ) )
self.assertTrue(is_prime(29 ) )
def lowercase_ ( self :Union[str, Any] ) -> Tuple:
'''simple docstring'''
with self.assertRaises(_A ):
is_prime(-19 )
self.assertFalse(
is_prime(0 ) , 'Zero doesn\'t have any positive factors, primes must have exactly two.' , )
self.assertFalse(
is_prime(1 ) , 'One only has 1 positive factor, primes must have exactly two.' , )
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main()
| 161 | 0 |
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=_a )
class A ( _a ):
lowercase_ = field(default='audio-classification' ,metadata={'include_in_asdict_even_if_is_default': True} )
lowercase_ = Features({'audio': Audio()} )
lowercase_ = Features({'labels': ClassLabel} )
lowercase_ = 'audio'
lowercase_ = 'labels'
def __lowerCAmelCase ( self : Any , lowerCAmelCase_ : Optional[Any] ) -> List[Any]:
"""simple docstring"""
if self.label_column not in features:
raise ValueError(F'Column {self.label_column} is not present in features.' )
if not isinstance(features[self.label_column] , lowerCAmelCase_ ):
raise ValueError(F'Column {self.label_column} is not a ClassLabel.' )
_a = copy.deepcopy(self )
_a = self.label_schema.copy()
_a = features[self.label_column]
_a = label_schema
return task_template
@property
def __lowerCAmelCase ( self : Any ) -> Dict[str, str]:
"""simple docstring"""
return {
self.audio_column: "audio",
self.label_column: "labels",
}
| 369 |
'''simple docstring'''
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class A ( _a ):
lowercase_ = 42
lowercase_ = jnp.floataa
lowercase_ = True
def __lowerCAmelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
super().setup()
_a = nn.Dense(5 , dtype=self.dtype )
def __call__( self : List[Any] , *lowerCAmelCase_ : str , **lowerCAmelCase_ : Dict ) -> List[Any]:
"""simple docstring"""
_a = super().__call__(*lowerCAmelCase_ , **lowerCAmelCase_ )
_a = self.cls(outputs[2] )
return outputs[:2] + (cls_out,)
class A ( _a ):
lowercase_ = FlaxBigBirdForNaturalQuestionsModule
def snake_case_ (UpperCamelCase : Any , UpperCamelCase : str , UpperCamelCase : int , UpperCamelCase : Tuple , UpperCamelCase : Union[str, Any] , UpperCamelCase : int ):
'''simple docstring'''
def cross_entropy(UpperCamelCase : int , UpperCamelCase : Optional[Any] , UpperCamelCase : List[str]=None ):
_a = logits.shape[-1]
_a = (labels[..., None] == jnp.arange(UpperCamelCase )[None]).astype('''f4''' )
_a = jax.nn.log_softmax(UpperCamelCase , axis=-1 )
_a = -jnp.sum(labels * logits , axis=-1 )
if reduction is not None:
_a = reduction(UpperCamelCase )
return loss
_a = partial(UpperCamelCase , reduction=jnp.mean )
_a = cross_entropy(UpperCamelCase , UpperCamelCase )
_a = cross_entropy(UpperCamelCase , UpperCamelCase )
_a = cross_entropy(UpperCamelCase , UpperCamelCase )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class A :
lowercase_ = "google/bigbird-roberta-base"
lowercase_ = 3000
lowercase_ = 1_0500
lowercase_ = 128
lowercase_ = 3
lowercase_ = 1
lowercase_ = 5
# tx_args
lowercase_ = 3e-5
lowercase_ = 0.0
lowercase_ = 2_0000
lowercase_ = 0.0095
lowercase_ = "bigbird-roberta-natural-questions"
lowercase_ = "training-expt"
lowercase_ = "data/nq-training.jsonl"
lowercase_ = "data/nq-validation.jsonl"
def __lowerCAmelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
os.makedirs(self.base_dir , exist_ok=lowerCAmelCase_ )
_a = os.path.join(self.base_dir , self.save_dir )
_a = self.batch_size_per_device * jax.device_count()
@dataclass
class A :
lowercase_ = 42
lowercase_ = 4096 # no dynamic padding on TPUs
def __call__( self : str , lowerCAmelCase_ : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
_a = self.collate_fn(lowerCAmelCase_ )
_a = jax.tree_util.tree_map(lowerCAmelCase_ , lowerCAmelCase_ )
return batch
def __lowerCAmelCase ( self : List[Any] , lowerCAmelCase_ : List[Any] ) -> int:
"""simple docstring"""
_a , _a = self.fetch_inputs(features['''input_ids'''] )
_a = {
'''input_ids''': jnp.array(lowerCAmelCase_ , dtype=jnp.intaa ),
'''attention_mask''': jnp.array(lowerCAmelCase_ , dtype=jnp.intaa ),
'''start_labels''': jnp.array(features['''start_token'''] , dtype=jnp.intaa ),
'''end_labels''': jnp.array(features['''end_token'''] , dtype=jnp.intaa ),
'''pooled_labels''': jnp.array(features['''category'''] , dtype=jnp.intaa ),
}
return batch
def __lowerCAmelCase ( self : Dict , lowerCAmelCase_ : list ) -> List[Any]:
"""simple docstring"""
_a = [self._fetch_inputs(lowerCAmelCase_ ) for ids in input_ids]
return zip(*lowerCAmelCase_ )
def __lowerCAmelCase ( self : Dict , lowerCAmelCase_ : list ) -> str:
"""simple docstring"""
_a = [1 for _ in range(len(lowerCAmelCase_ ) )]
while len(lowerCAmelCase_ ) < self.max_length:
input_ids.append(self.pad_id )
attention_mask.append(0 )
return input_ids, attention_mask
def snake_case_ (UpperCamelCase : List[Any] , UpperCamelCase : List[Any] , UpperCamelCase : Dict=None ):
'''simple docstring'''
if seed is not None:
_a = dataset.shuffle(seed=UpperCamelCase )
for i in range(len(UpperCamelCase ) // batch_size ):
_a = dataset[i * batch_size : (i + 1) * batch_size]
yield dict(UpperCamelCase )
@partial(jax.pmap , axis_name='''batch''' )
def snake_case_ (UpperCamelCase : Optional[int] , UpperCamelCase : Tuple , **UpperCamelCase : str ):
'''simple docstring'''
def loss_fn(UpperCamelCase : List[str] ):
_a = model_inputs.pop('''start_labels''' )
_a = model_inputs.pop('''end_labels''' )
_a = model_inputs.pop('''pooled_labels''' )
_a = state.apply_fn(**UpperCamelCase , params=UpperCamelCase , dropout_rng=UpperCamelCase , train=UpperCamelCase )
_a , _a , _a = outputs
return state.loss_fn(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , )
_a , _a = jax.random.split(UpperCamelCase )
_a = jax.value_and_grad(UpperCamelCase )
_a , _a = grad_fn(state.params )
_a = jax.lax.pmean({'''loss''': loss} , axis_name='''batch''' )
_a = jax.lax.pmean(UpperCamelCase , '''batch''' )
_a = state.apply_gradients(grads=UpperCamelCase )
return state, metrics, new_drp_rng
@partial(jax.pmap , axis_name='''batch''' )
def snake_case_ (UpperCamelCase : Any , **UpperCamelCase : Optional[int] ):
'''simple docstring'''
_a = model_inputs.pop('''start_labels''' )
_a = model_inputs.pop('''end_labels''' )
_a = model_inputs.pop('''pooled_labels''' )
_a = state.apply_fn(**UpperCamelCase , params=state.params , train=UpperCamelCase )
_a , _a , _a = outputs
_a = state.loss_fn(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
_a = jax.lax.pmean({'''loss''': loss} , axis_name='''batch''' )
return metrics
class A ( train_state.TrainState ):
lowercase_ = struct.field(pytree_node=_a )
@dataclass
class A :
lowercase_ = 42
lowercase_ = 42
lowercase_ = 42
lowercase_ = 42
lowercase_ = 42
lowercase_ = 42
lowercase_ = None
def __lowerCAmelCase ( self : Tuple , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[int]=None ) -> List[str]:
"""simple docstring"""
_a = model.params
_a = TrainState.create(
apply_fn=model.__call__ , params=lowerCAmelCase_ , tx=lowerCAmelCase_ , loss_fn=lowerCAmelCase_ , )
if ckpt_dir is not None:
_a , _a , _a , _a , _a = restore_checkpoint(lowerCAmelCase_ , lowerCAmelCase_ )
_a = {
'''lr''': args.lr,
'''init_lr''': args.init_lr,
'''warmup_steps''': args.warmup_steps,
'''num_train_steps''': num_train_steps,
'''weight_decay''': args.weight_decay,
}
_a , _a = build_tx(**lowerCAmelCase_ )
_a = train_state.TrainState(
step=lowerCAmelCase_ , apply_fn=model.__call__ , params=lowerCAmelCase_ , tx=lowerCAmelCase_ , opt_state=lowerCAmelCase_ , )
_a = args
_a = data_collator
_a = lr
_a = params
_a = jax_utils.replicate(lowerCAmelCase_ )
return state
def __lowerCAmelCase ( self : str , lowerCAmelCase_ : Any , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[Any] ) -> int:
"""simple docstring"""
_a = self.args
_a = len(lowerCAmelCase_ ) // args.batch_size
_a = jax.random.PRNGKey(0 )
_a = jax.random.split(lowerCAmelCase_ , jax.device_count() )
for epoch in range(args.max_epochs ):
_a = jnp.array(0 , dtype=jnp.floataa )
_a = get_batched_dataset(lowerCAmelCase_ , args.batch_size , seed=lowerCAmelCase_ )
_a = 0
for batch in tqdm(lowerCAmelCase_ , total=lowerCAmelCase_ , desc=F'Running EPOCH-{epoch}' ):
_a = self.data_collator(lowerCAmelCase_ )
_a , _a , _a = self.train_step_fn(lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ )
running_loss += jax_utils.unreplicate(metrics['''loss'''] )
i += 1
if i % args.logging_steps == 0:
_a = jax_utils.unreplicate(state.step )
_a = running_loss.item() / i
_a = self.scheduler_fn(state_step - 1 )
_a = self.evaluate(lowerCAmelCase_ , lowerCAmelCase_ )
_a = {
'''step''': state_step.item(),
'''eval_loss''': eval_loss.item(),
'''tr_loss''': tr_loss,
'''lr''': lr.item(),
}
tqdm.write(str(lowerCAmelCase_ ) )
self.logger.log(lowerCAmelCase_ , commit=lowerCAmelCase_ )
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + F'-e{epoch}-s{i}' , state=lowerCAmelCase_ )
def __lowerCAmelCase ( self : List[str] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str ) -> List[str]:
"""simple docstring"""
_a = get_batched_dataset(lowerCAmelCase_ , self.args.batch_size )
_a = len(lowerCAmelCase_ ) // self.args.batch_size
_a = jnp.array(0 , dtype=jnp.floataa )
_a = 0
for batch in tqdm(lowerCAmelCase_ , total=lowerCAmelCase_ , desc='''Evaluating ... ''' ):
_a = self.data_collator(lowerCAmelCase_ )
_a = self.val_step_fn(lowerCAmelCase_ , **lowerCAmelCase_ )
running_loss += jax_utils.unreplicate(metrics['''loss'''] )
i += 1
return running_loss / i
def __lowerCAmelCase ( self : int , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : str ) -> int:
"""simple docstring"""
_a = jax_utils.unreplicate(lowerCAmelCase_ )
print(F'SAVING CHECKPOINT IN {save_dir}' , end=''' ... ''' )
self.model_save_fn(lowerCAmelCase_ , params=state.params )
with open(os.path.join(lowerCAmelCase_ , '''opt_state.msgpack''' ) , '''wb''' ) as f:
f.write(to_bytes(state.opt_state ) )
joblib.dump(self.args , os.path.join(lowerCAmelCase_ , '''args.joblib''' ) )
joblib.dump(self.data_collator , os.path.join(lowerCAmelCase_ , '''data_collator.joblib''' ) )
with open(os.path.join(lowerCAmelCase_ , '''training_state.json''' ) , '''w''' ) as f:
json.dump({'''step''': state.step.item()} , lowerCAmelCase_ )
print('''DONE''' )
def snake_case_ (UpperCamelCase : int , UpperCamelCase : Dict ):
'''simple docstring'''
print(f'RESTORING CHECKPOINT FROM {save_dir}' , end=''' ... ''' )
with open(os.path.join(UpperCamelCase , '''flax_model.msgpack''' ) , '''rb''' ) as f:
_a = from_bytes(state.params , f.read() )
with open(os.path.join(UpperCamelCase , '''opt_state.msgpack''' ) , '''rb''' ) as f:
_a = from_bytes(state.opt_state , f.read() )
_a = joblib.load(os.path.join(UpperCamelCase , '''args.joblib''' ) )
_a = joblib.load(os.path.join(UpperCamelCase , '''data_collator.joblib''' ) )
with open(os.path.join(UpperCamelCase , '''training_state.json''' ) , '''r''' ) as f:
_a = json.load(UpperCamelCase )
_a = training_state['''step''']
print('''DONE''' )
return params, opt_state, step, args, data_collator
def snake_case_ (UpperCamelCase : Dict , UpperCamelCase : Optional[int] , UpperCamelCase : Dict , UpperCamelCase : List[Any] ):
'''simple docstring'''
_a = num_train_steps - warmup_steps
_a = optax.linear_schedule(init_value=UpperCamelCase , end_value=UpperCamelCase , transition_steps=UpperCamelCase )
_a = optax.linear_schedule(init_value=UpperCamelCase , end_value=1e-7 , transition_steps=UpperCamelCase )
_a = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] )
return lr
def snake_case_ (UpperCamelCase : Tuple , UpperCamelCase : Optional[Any] , UpperCamelCase : str , UpperCamelCase : Union[str, Any] , UpperCamelCase : int ):
'''simple docstring'''
def weight_decay_mask(UpperCamelCase : Dict ):
_a = traverse_util.flatten_dict(UpperCamelCase )
_a = {k: (v[-1] != '''bias''' and v[-2:] != ('''LayerNorm''', '''scale''')) for k, v in params.items()}
return traverse_util.unflatten_dict(UpperCamelCase )
_a = scheduler_fn(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
_a = optax.adamw(learning_rate=UpperCamelCase , weight_decay=UpperCamelCase , mask=UpperCamelCase )
return tx, lr
| 179 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""google/mobilenet_v1_1.0_224""": """https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json""",
"""google/mobilenet_v1_0.75_192""": """https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json""",
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : Optional[Any] ='mobilenet_v1'
def __init__( self : Dict , __lowercase : Union[str, Any]=3 , __lowercase : Optional[int]=224 , __lowercase : str=1.0 , __lowercase : str=8 , __lowercase : List[Any]="relu6" , __lowercase : Dict=True , __lowercase : List[str]=0.999 , __lowercase : Optional[Any]=0.02 , __lowercase : Optional[Any]=0.001 , **__lowercase : List[str] , ):
'''simple docstring'''
super().__init__(**__lowercase )
if depth_multiplier <= 0:
raise ValueError("""depth_multiplier must be greater than zero.""" )
__a = num_channels
__a = image_size
__a = depth_multiplier
__a = min_depth
__a = hidden_act
__a = tf_padding
__a = classifier_dropout_prob
__a = initializer_range
__a = layer_norm_eps
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : Union[str, Any] =version.parse('1.11' )
@property
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
return OrderedDict([("""pixel_values""", {0: """batch"""})] )
@property
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
if self.task == "image-classification":
return OrderedDict([("""logits""", {0: """batch"""})] )
else:
return OrderedDict([("""last_hidden_state""", {0: """batch"""}), ("""pooler_output""", {0: """batch"""})] )
@property
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
return 1E-4
| 302 |
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : Optional[int] =(IPNDMScheduler,)
__lowerCamelCase : int =(('num_inference_steps', 50),)
def UpperCamelCase_ ( self : str , **__lowercase : Dict ):
'''simple docstring'''
__a = {"""num_train_timesteps""": 1000}
config.update(**__lowercase )
return config
def UpperCamelCase_ ( self : Any , __lowercase : Tuple=0 , **__lowercase : Dict ):
'''simple docstring'''
__a = dict(self.forward_default_kwargs )
__a = kwargs.pop("""num_inference_steps""" , __lowercase )
__a = self.dummy_sample
__a = 0.1 * sample
__a = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
__a = self.get_scheduler_config(**__lowercase )
__a = scheduler_class(**__lowercase )
scheduler.set_timesteps(__lowercase )
# copy over dummy past residuals
__a = dummy_past_residuals[:]
if time_step is None:
__a = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__lowercase )
__a = scheduler_class.from_pretrained(__lowercase )
new_scheduler.set_timesteps(__lowercase )
# copy over dummy past residuals
__a = dummy_past_residuals[:]
__a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
__a = new_scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
__a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
__a = new_scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self : str , __lowercase : int=0 , **__lowercase : Dict ):
'''simple docstring'''
__a = dict(self.forward_default_kwargs )
__a = kwargs.pop("""num_inference_steps""" , __lowercase )
__a = self.dummy_sample
__a = 0.1 * sample
__a = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
__a = self.get_scheduler_config()
__a = scheduler_class(**__lowercase )
scheduler.set_timesteps(__lowercase )
# copy over dummy past residuals (must be after setting timesteps)
__a = dummy_past_residuals[:]
if time_step is None:
__a = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__lowercase )
__a = scheduler_class.from_pretrained(__lowercase )
# copy over dummy past residuals
new_scheduler.set_timesteps(__lowercase )
# copy over dummy past residual (must be after setting timesteps)
__a = dummy_past_residuals[:]
__a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
__a = new_scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
__a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
__a = new_scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCamelCase_ ( self : List[str] , **__lowercase : Dict ):
'''simple docstring'''
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config(**__lowercase )
__a = scheduler_class(**__lowercase )
__a = 10
__a = self.dummy_model()
__a = self.dummy_sample_deter
scheduler.set_timesteps(__lowercase )
for i, t in enumerate(scheduler.timesteps ):
__a = model(__lowercase , __lowercase )
__a = scheduler.step(__lowercase , __lowercase , __lowercase ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
__a = model(__lowercase , __lowercase )
__a = scheduler.step(__lowercase , __lowercase , __lowercase ).prev_sample
return sample
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
__a = dict(self.forward_default_kwargs )
__a = kwargs.pop("""num_inference_steps""" , __lowercase )
for scheduler_class in self.scheduler_classes:
__a = self.get_scheduler_config()
__a = scheduler_class(**__lowercase )
__a = self.dummy_sample
__a = 0.1 * sample
if num_inference_steps is not None and hasattr(__lowercase , """set_timesteps""" ):
scheduler.set_timesteps(__lowercase )
elif num_inference_steps is not None and not hasattr(__lowercase , """set_timesteps""" ):
__a = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
__a = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
__a = dummy_past_residuals[:]
__a = scheduler.timesteps[5]
__a = scheduler.timesteps[6]
__a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
__a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
__a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
__a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=__lowercase , time_step=__lowercase )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=__lowercase , time_step=__lowercase )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
__a = self.full_loop()
__a = torch.mean(torch.abs(__lowercase ) )
assert abs(result_mean.item() - 2540529 ) < 10
| 302 | 1 |
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class _UpperCamelCase :
'''simple docstring'''
@staticmethod
def lowerCAmelCase__ ( *snake_case_ : Any , **snake_case_ : str ):
pass
def A__ ( lowerCamelCase ) -> List[str]:
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
lowerCamelCase_ : Union[str, Any] = (
"""https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png"""
)
@is_pipeline_test
@require_torch
@require_vision
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : int = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def lowerCAmelCase__ ( self : Dict , snake_case_ : Union[str, Any] , snake_case_ : Optional[Any] , snake_case_ : List[str] ):
UpperCamelCase_: List[Any] = pipeline(
"""document-question-answering""" , model=snake_case_ , tokenizer=snake_case_ , image_processor=snake_case_ )
UpperCamelCase_: Optional[int] = INVOICE_URL
UpperCamelCase_: Any = list(zip(*apply_tesseract(load_image(snake_case_ ) , snake_case_ , """""" ) ) )
UpperCamelCase_: List[Any] = """What is the placebo?"""
UpperCamelCase_: Optional[Any] = [
{
"""image""": load_image(snake_case_ ),
"""question""": question,
},
{
"""image""": image,
"""question""": question,
},
{
"""image""": image,
"""question""": question,
"""word_boxes""": word_boxes,
},
]
return dqa_pipeline, examples
def lowerCAmelCase__ ( self : str , snake_case_ : List[str] , snake_case_ : Optional[int] ):
UpperCamelCase_: List[str] = dqa_pipeline(snake_case_ , top_k=2 )
self.assertEqual(
snake_case_ , [
[
{"""score""": ANY(snake_case_ ), """answer""": ANY(snake_case_ ), """start""": ANY(snake_case_ ), """end""": ANY(snake_case_ )},
{"""score""": ANY(snake_case_ ), """answer""": ANY(snake_case_ ), """start""": ANY(snake_case_ ), """end""": ANY(snake_case_ )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def lowerCAmelCase__ ( self : List[str] ):
UpperCamelCase_: Union[str, Any] = pipeline("""document-question-answering""" , model="""hf-internal-testing/tiny-random-layoutlmv2""" )
UpperCamelCase_: List[str] = INVOICE_URL
UpperCamelCase_: Dict = """How many cats are there?"""
UpperCamelCase_: str = [
{"""score""": 0.0001, """answer""": """oy 2312/2019""", """start""": 38, """end""": 39},
{"""score""": 0.0001, """answer""": """oy 2312/2019 DUE""", """start""": 38, """end""": 40},
]
UpperCamelCase_: List[Any] = dqa_pipeline(image=snake_case_ , question=snake_case_ , top_k=2 )
self.assertEqual(nested_simplify(snake_case_ , decimals=4 ) , snake_case_ )
UpperCamelCase_: Optional[int] = dqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(nested_simplify(snake_case_ , decimals=4 ) , snake_case_ )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
UpperCamelCase_: Any = """./tests/fixtures/tests_samples/COCO/000000039769.png"""
UpperCamelCase_: Optional[Any] = dqa_pipeline(image=snake_case_ , question=snake_case_ , top_k=2 )
self.assertEqual(snake_case_ , [] )
# We can optionnally pass directly the words and bounding boxes
UpperCamelCase_: Tuple = """./tests/fixtures/tests_samples/COCO/000000039769.png"""
UpperCamelCase_: List[str] = []
UpperCamelCase_: Optional[int] = []
UpperCamelCase_: Dict = dqa_pipeline(image=snake_case_ , question=snake_case_ , words=snake_case_ , boxes=snake_case_ , top_k=2 )
self.assertEqual(snake_case_ , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def lowerCAmelCase__ ( self : Union[str, Any] ):
UpperCamelCase_: int = pipeline(
"""document-question-answering""" , model="""tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa""" , revision="""9977165""" , )
UpperCamelCase_: str = INVOICE_URL
UpperCamelCase_: Dict = """What is the invoice number?"""
UpperCamelCase_: Any = dqa_pipeline(image=snake_case_ , question=snake_case_ , top_k=2 )
self.assertEqual(
nested_simplify(snake_case_ , decimals=4 ) , [
{"""score""": 0.9944, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.0009, """answer""": """us-001""", """start""": 16, """end""": 16},
] , )
UpperCamelCase_: List[str] = dqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(snake_case_ , decimals=4 ) , [
{"""score""": 0.9944, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.0009, """answer""": """us-001""", """start""": 16, """end""": 16},
] , )
UpperCamelCase_: Optional[int] = dqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 )
self.assertEqual(
nested_simplify(snake_case_ , decimals=4 ) , [
[
{"""score""": 0.9944, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.0009, """answer""": """us-001""", """start""": 16, """end""": 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def lowerCAmelCase__ ( self : Optional[int] ):
UpperCamelCase_: List[Any] = pipeline(
"""document-question-answering""" , model="""tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa""" , revision="""9977165""" , max_seq_len=50 , )
UpperCamelCase_: int = INVOICE_URL
UpperCamelCase_: int = """What is the invoice number?"""
UpperCamelCase_: Optional[int] = dqa_pipeline(image=snake_case_ , question=snake_case_ , top_k=2 )
self.assertEqual(
nested_simplify(snake_case_ , decimals=4 ) , [
{"""score""": 0.9974, """answer""": """1110212019""", """start""": 23, """end""": 23},
{"""score""": 0.9948, """answer""": """us-001""", """start""": 16, """end""": 16},
] , )
UpperCamelCase_: Union[str, Any] = dqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(snake_case_ , decimals=4 ) , [
{"""score""": 0.9974, """answer""": """1110212019""", """start""": 23, """end""": 23},
{"""score""": 0.9948, """answer""": """us-001""", """start""": 16, """end""": 16},
] , )
UpperCamelCase_: Dict = dqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 )
self.assertEqual(
nested_simplify(snake_case_ , decimals=4 ) , [
[
{"""score""": 0.9974, """answer""": """1110212019""", """start""": 23, """end""": 23},
{"""score""": 0.9948, """answer""": """us-001""", """start""": 16, """end""": 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def lowerCAmelCase__ ( self : str ):
UpperCamelCase_: Dict = AutoTokenizer.from_pretrained(
"""impira/layoutlm-document-qa""" , revision="""3dc6de3""" , add_prefix_space=snake_case_ )
UpperCamelCase_: str = pipeline(
"""document-question-answering""" , model="""impira/layoutlm-document-qa""" , tokenizer=snake_case_ , revision="""3dc6de3""" , )
UpperCamelCase_: str = INVOICE_URL
UpperCamelCase_: str = """What is the invoice number?"""
UpperCamelCase_: List[Any] = dqa_pipeline(image=snake_case_ , question=snake_case_ , top_k=2 )
self.assertEqual(
nested_simplify(snake_case_ , decimals=4 ) , [
{"""score""": 0.4251, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.0819, """answer""": """1110212019""", """start""": 23, """end""": 23},
] , )
UpperCamelCase_: List[Any] = dqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(snake_case_ , decimals=4 ) , [
{"""score""": 0.4251, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.0819, """answer""": """1110212019""", """start""": 23, """end""": 23},
] , )
UpperCamelCase_: Dict = dqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 )
self.assertEqual(
nested_simplify(snake_case_ , decimals=4 ) , [
[
{"""score""": 0.4251, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.0819, """answer""": """1110212019""", """start""": 23, """end""": 23},
]
]
* 2 , )
UpperCamelCase_: Union[str, Any] = list(zip(*apply_tesseract(load_image(snake_case_ ) , snake_case_ , """""" ) ) )
# This model should also work if `image` is set to None
UpperCamelCase_: List[str] = dqa_pipeline({"""image""": None, """word_boxes""": word_boxes, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(snake_case_ , decimals=4 ) , [
{"""score""": 0.4251, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.0819, """answer""": """1110212019""", """start""": 23, """end""": 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def lowerCAmelCase__ ( self : Tuple ):
UpperCamelCase_: str = AutoTokenizer.from_pretrained(
"""impira/layoutlm-document-qa""" , revision="""3dc6de3""" , add_prefix_space=snake_case_ )
UpperCamelCase_: Tuple = pipeline(
"""document-question-answering""" , model="""impira/layoutlm-document-qa""" , tokenizer=snake_case_ , revision="""3dc6de3""" , max_seq_len=50 , )
UpperCamelCase_: List[Any] = INVOICE_URL
UpperCamelCase_: Union[str, Any] = """What is the invoice number?"""
UpperCamelCase_: int = dqa_pipeline(image=snake_case_ , question=snake_case_ , top_k=2 )
self.assertEqual(
nested_simplify(snake_case_ , decimals=4 ) , [
{"""score""": 0.9999, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.9998, """answer""": """us-001""", """start""": 16, """end""": 16},
] , )
UpperCamelCase_: Any = dqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 )
self.assertEqual(
nested_simplify(snake_case_ , decimals=4 ) , [
[
{"""score""": 0.9999, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.9998, """answer""": """us-001""", """start""": 16, """end""": 16},
]
]
* 2 , )
UpperCamelCase_: Optional[int] = list(zip(*apply_tesseract(load_image(snake_case_ ) , snake_case_ , """""" ) ) )
# This model should also work if `image` is set to None
UpperCamelCase_: List[str] = dqa_pipeline({"""image""": None, """word_boxes""": word_boxes, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(snake_case_ , decimals=4 ) , [
{"""score""": 0.9999, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.9998, """answer""": """us-001""", """start""": 16, """end""": 16},
] , )
@slow
@require_torch
def lowerCAmelCase__ ( self : Optional[Any] ):
UpperCamelCase_: Any = pipeline(
"""document-question-answering""" , model="""naver-clova-ix/donut-base-finetuned-docvqa""" , tokenizer=AutoTokenizer.from_pretrained("""naver-clova-ix/donut-base-finetuned-docvqa""" ) , feature_extractor="""naver-clova-ix/donut-base-finetuned-docvqa""" , )
UpperCamelCase_: str = INVOICE_URL
UpperCamelCase_: Optional[int] = """What is the invoice number?"""
UpperCamelCase_: str = dqa_pipeline(image=snake_case_ , question=snake_case_ , top_k=2 )
self.assertEqual(nested_simplify(snake_case_ , decimals=4 ) , [{"""answer""": """us-001"""}] )
@require_tf
@unittest.skip("""Document question answering not implemented in TF""" )
def lowerCAmelCase__ ( self : List[Any] ):
pass
| 223 |
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@require_torch
def lowerCAmelCase__ ( self : Dict ):
UpperCamelCase_: Any = pipeline(
task="""zero-shot-audio-classification""" , model="""hf-internal-testing/tiny-clap-htsat-unfused""" )
UpperCamelCase_: str = load_dataset("""ashraq/esc50""" )
UpperCamelCase_: Optional[int] = dataset["""train"""]["""audio"""][-1]["""array"""]
UpperCamelCase_: Optional[int] = audio_classifier(snake_case_ , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] )
self.assertEqual(
nested_simplify(snake_case_ ) , [{"""score""": 0.501, """label""": """Sound of a dog"""}, {"""score""": 0.499, """label""": """Sound of vaccum cleaner"""}] , )
@unittest.skip("""No models are available in TF""" )
def lowerCAmelCase__ ( self : Optional[Any] ):
pass
@slow
@require_torch
def lowerCAmelCase__ ( self : int ):
UpperCamelCase_: List[str] = pipeline(
task="""zero-shot-audio-classification""" , model="""laion/clap-htsat-unfused""" , )
# This is an audio of a dog
UpperCamelCase_: Tuple = load_dataset("""ashraq/esc50""" )
UpperCamelCase_: Optional[int] = dataset["""train"""]["""audio"""][-1]["""array"""]
UpperCamelCase_: int = audio_classifier(snake_case_ , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] )
self.assertEqual(
nested_simplify(snake_case_ ) , [
{"""score""": 0.999, """label""": """Sound of a dog"""},
{"""score""": 0.001, """label""": """Sound of vaccum cleaner"""},
] , )
UpperCamelCase_: Any = audio_classifier([audio] * 5 , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] )
self.assertEqual(
nested_simplify(snake_case_ ) , [
[
{"""score""": 0.999, """label""": """Sound of a dog"""},
{"""score""": 0.001, """label""": """Sound of vaccum cleaner"""},
],
]
* 5 , )
UpperCamelCase_: Any = audio_classifier(
[audio] * 5 , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] , batch_size=5 )
self.assertEqual(
nested_simplify(snake_case_ ) , [
[
{"""score""": 0.999, """label""": """Sound of a dog"""},
{"""score""": 0.001, """label""": """Sound of vaccum cleaner"""},
],
]
* 5 , )
@unittest.skip("""No models are available in TF""" )
def lowerCAmelCase__ ( self : Optional[Any] ):
pass
| 223 | 1 |
import os
def a ( A__ : str = "input.txt" ) -> int:
"""simple docstring"""
with open(os.path.join(os.path.dirname(A__ ) , A__ ) ) as input_file:
_lowercase =[
[int(A__ ) for element in line.split(',' )]
for line in input_file.readlines()
]
_lowercase =len(A__ )
_lowercase =len(matrix[0] )
_lowercase =[[-1 for _ in range(A__ )] for _ in range(A__ )]
for i in range(A__ ):
_lowercase =matrix[i][0]
for j in range(1 , A__ ):
for i in range(A__ ):
_lowercase =minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , A__ ):
_lowercase =min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
_lowercase =min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(f"{solution() = }")
| 205 |
def a ( A__ : str , A__ : bool = False ) -> str:
"""simple docstring"""
if not isinstance(A__ , A__ ):
_lowercase =F'''Expected string as input, found {type(A__ )}'''
raise ValueError(A__ )
if not isinstance(A__ , A__ ):
_lowercase =F'''Expected boolean as use_pascal parameter, found {type(A__ )}'''
raise ValueError(A__ )
_lowercase =input_str.split('_' )
_lowercase =0 if use_pascal else 1
_lowercase =words[start_index:]
_lowercase =[word[0].upper() + word[1:] for word in words_to_capitalize]
_lowercase ='' if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 205 | 1 |
'''simple docstring'''
import os
from datetime import datetime as dt
from github import Github
A__ : Optional[Any] =[
'''good first issue''',
'''good second issue''',
'''good difficult issue''',
'''enhancement''',
'''new pipeline/model''',
'''new scheduler''',
'''wip''',
]
def UpperCamelCase__ ( ):
"""simple docstring"""
_lowerCAmelCase = Github(os.environ["""GITHUB_TOKEN"""] )
_lowerCAmelCase = g.get_repo("""huggingface/diffusers""" )
_lowerCAmelCase = repo.get_issues(state="""open""" )
for issue in open_issues:
_lowerCAmelCase = sorted(issue.get_comments() , key=lambda lowerCAmelCase : i.created_at , reverse=lowerCAmelCase )
_lowerCAmelCase = comments[0] if len(lowerCAmelCase ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state="""closed""" )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state="""open""" )
issue.remove_from_labels("""stale""" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
"""This issue has been automatically marked as stale because it has not had """
"""recent activity. If you think this still needs to be addressed """
"""please comment on this thread.\n\nPlease note that issues that do not follow the """
"""[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) """
"""are likely to be ignored.""" )
issue.add_to_labels("""stale""" )
if __name__ == "__main__":
main()
| 365 |
'''simple docstring'''
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class UpperCAmelCase :
@staticmethod
def lowercase__ ( *__snake_case : Optional[Any] , **__snake_case : Any ) -> Tuple:
pass
@is_pipeline_test
@require_torch
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
_lowercase: Union[str, Any] = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def lowercase__ ( self : List[str] , __snake_case : Optional[Any] , __snake_case : str , __snake_case : List[str] ) -> int:
_lowerCAmelCase = pipeline("""visual-question-answering""" , model="""hf-internal-testing/tiny-vilt-random-vqa""" )
_lowerCAmelCase = [
{
"""image""": Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ),
"""question""": """How many cats are there?""",
},
{
"""image""": """./tests/fixtures/tests_samples/COCO/000000039769.png""",
"""question""": """How many cats are there?""",
},
]
return vqa_pipeline, examples
def lowercase__ ( self : Any , __snake_case : List[Any] , __snake_case : List[Any] ) -> Union[str, Any]:
_lowerCAmelCase = vqa_pipeline(__snake_case , top_k=1 )
self.assertEqual(
__snake_case , [
[{"""score""": ANY(__snake_case ), """answer""": ANY(__snake_case )}],
[{"""score""": ANY(__snake_case ), """answer""": ANY(__snake_case )}],
] , )
@require_torch
def lowercase__ ( self : str ) -> int:
_lowerCAmelCase = pipeline("""visual-question-answering""" , model="""hf-internal-testing/tiny-vilt-random-vqa""" )
_lowerCAmelCase = """./tests/fixtures/tests_samples/COCO/000000039769.png"""
_lowerCAmelCase = """How many cats are there?"""
_lowerCAmelCase = vqa_pipeline(image=__snake_case , question="""How many cats are there?""" , top_k=2 )
self.assertEqual(
__snake_case , [{"""score""": ANY(__snake_case ), """answer""": ANY(__snake_case )}, {"""score""": ANY(__snake_case ), """answer""": ANY(__snake_case )}] )
_lowerCAmelCase = vqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(
__snake_case , [{"""score""": ANY(__snake_case ), """answer""": ANY(__snake_case )}, {"""score""": ANY(__snake_case ), """answer""": ANY(__snake_case )}] )
@slow
@require_torch
def lowercase__ ( self : List[Any] ) -> List[str]:
_lowerCAmelCase = pipeline("""visual-question-answering""" , model="""dandelin/vilt-b32-finetuned-vqa""" )
_lowerCAmelCase = """./tests/fixtures/tests_samples/COCO/000000039769.png"""
_lowerCAmelCase = """How many cats are there?"""
_lowerCAmelCase = vqa_pipeline(image=__snake_case , question=__snake_case , top_k=2 )
self.assertEqual(
nested_simplify(__snake_case , decimals=4 ) , [{"""score""": 0.87_99, """answer""": """2"""}, {"""score""": 0.2_96, """answer""": """1"""}] )
_lowerCAmelCase = vqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(__snake_case , decimals=4 ) , [{"""score""": 0.87_99, """answer""": """2"""}, {"""score""": 0.2_96, """answer""": """1"""}] )
_lowerCAmelCase = vqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 )
self.assertEqual(
nested_simplify(__snake_case , decimals=4 ) , [[{"""score""": 0.87_99, """answer""": """2"""}, {"""score""": 0.2_96, """answer""": """1"""}]] * 2 , )
@require_tf
@unittest.skip("""Visual question answering not implemented in TF""" )
def lowercase__ ( self : List[str] ) -> Union[str, Any]:
pass
| 220 | 0 |
from ..utils import DummyObject, requires_backends
class UpperCamelCase_ ( metaclass=UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = ['''flax''', '''transformers''']
def __init__( self : List[str] , *UpperCAmelCase__ : str , **UpperCAmelCase__ : Optional[Any]) ->str:
'''simple docstring'''
requires_backends(self , ['''flax''', '''transformers'''])
@classmethod
def SCREAMING_SNAKE_CASE ( cls : int , *UpperCAmelCase__ : List[Any] , **UpperCAmelCase__ : Tuple) ->Dict:
'''simple docstring'''
requires_backends(cls , ['''flax''', '''transformers'''])
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Optional[Any] , *UpperCAmelCase__ : Any , **UpperCAmelCase__ : List[str]) ->Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ['''flax''', '''transformers'''])
class UpperCamelCase_ ( metaclass=UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = ['''flax''', '''transformers''']
def __init__( self : Tuple , *UpperCAmelCase__ : Tuple , **UpperCAmelCase__ : Optional[int]) ->List[Any]:
'''simple docstring'''
requires_backends(self , ['''flax''', '''transformers'''])
@classmethod
def SCREAMING_SNAKE_CASE ( cls : int , *UpperCAmelCase__ : Dict , **UpperCAmelCase__ : List[Any]) ->Dict:
'''simple docstring'''
requires_backends(cls , ['''flax''', '''transformers'''])
@classmethod
def SCREAMING_SNAKE_CASE ( cls : List[str] , *UpperCAmelCase__ : List[Any] , **UpperCAmelCase__ : Union[str, Any]) ->List[Any]:
'''simple docstring'''
requires_backends(cls , ['''flax''', '''transformers'''])
class UpperCamelCase_ ( metaclass=UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = ['''flax''', '''transformers''']
def __init__( self : Union[str, Any] , *UpperCAmelCase__ : Tuple , **UpperCAmelCase__ : Any) ->Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['''flax''', '''transformers'''])
@classmethod
def SCREAMING_SNAKE_CASE ( cls : int , *UpperCAmelCase__ : str , **UpperCAmelCase__ : Optional[int]) ->Any:
'''simple docstring'''
requires_backends(cls , ['''flax''', '''transformers'''])
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Optional[Any] , *UpperCAmelCase__ : Tuple , **UpperCAmelCase__ : Union[str, Any]) ->List[Any]:
'''simple docstring'''
requires_backends(cls , ['''flax''', '''transformers'''])
class UpperCamelCase_ ( metaclass=UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = ['''flax''', '''transformers''']
def __init__( self : int , *UpperCAmelCase__ : Dict , **UpperCAmelCase__ : int) ->Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['''flax''', '''transformers'''])
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Any , *UpperCAmelCase__ : List[Any] , **UpperCAmelCase__ : Optional[int]) ->Dict:
'''simple docstring'''
requires_backends(cls , ['''flax''', '''transformers'''])
@classmethod
def SCREAMING_SNAKE_CASE ( cls : List[str] , *UpperCAmelCase__ : Optional[int] , **UpperCAmelCase__ : str) ->Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['''flax''', '''transformers'''])
| 14 | """simple docstring"""
def a_ ( lowerCamelCase , lowerCamelCase ):
if a < 0 or b < 0:
raise ValueError('the value of both inputs must be positive' )
UpperCAmelCase__ = str(bin(lowerCamelCase ) )[2:] # remove the leading "0b"
UpperCAmelCase__ = str(bin(lowerCamelCase ) )[2:] # remove the leading "0b"
UpperCAmelCase__ = max(len(lowerCamelCase ) , len(lowerCamelCase ) )
return "0b" + "".join(
str(int(char_a == '1' and char_b == '1' ) )
for char_a, char_b in zip(a_binary.zfill(lowerCamelCase ) , b_binary.zfill(lowerCamelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 98 | 0 |
"""simple docstring"""
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
lowerCamelCase_ = get_logger()
lowerCamelCase_ = None
class UpperCamelCase_ (TensorFormatter[Mapping, '''jax.Array''', Mapping] ):
def __init__( self : Tuple , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : str=None , **lowerCAmelCase_ : Tuple ) -> Tuple:
super().__init__(features=lowerCAmelCase_ )
import jax
from jaxlib.xla_client import Device
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
raise ValueError(
f"""Expected {device} to be a `str` not {type(lowerCAmelCase_ )}, as `jaxlib.xla_extension.Device` """
"is not serializable neither with `pickle` nor with `dill`. Instead you can surround "
"the device with `str()` to get its string identifier that will be internally mapped "
"to the actual `jaxlib.xla_extension.Device`." )
UpperCAmelCase_ : str = device if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
UpperCAmelCase_ : Any = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
f"""Device with string identifier {self.device} not listed among the available """
f"""devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default """
f"""device: {str(jax.devices()[0] )}.""" )
UpperCAmelCase_ : Tuple = str(jax.devices()[0] )
UpperCAmelCase_ : List[str] = jnp_array_kwargs
@staticmethod
def _SCREAMING_SNAKE_CASE ( ) -> Dict[str, "jaxlib.xla_extension.Device"]:
import jax
return {str(lowerCAmelCase_ ): device for device in jax.devices()}
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase_ : int ) -> Optional[int]:
import jax
import jax.numpy as jnp
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and column:
if all(
isinstance(lowerCAmelCase_ , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(lowerCAmelCase_ , axis=0 )
return column
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase_ : Tuple ) -> Any:
import jax
import jax.numpy as jnp
if isinstance(lowerCAmelCase_ , (str, bytes, type(lowerCAmelCase_ )) ):
return value
elif isinstance(lowerCAmelCase_ , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
UpperCAmelCase_ : int = {}
if isinstance(lowerCAmelCase_ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
UpperCAmelCase_ : str = {"dtype": jnp.intaa}
else:
UpperCAmelCase_ : str = {"dtype": jnp.intaa}
elif isinstance(lowerCAmelCase_ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
UpperCAmelCase_ : Tuple = {"dtype": jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(lowerCAmelCase_ , PIL.Image.Image ):
UpperCAmelCase_ : Any = np.asarray(lowerCAmelCase_ )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
UpperCAmelCase_ : Optional[Any] = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(lowerCAmelCase_ , **{**default_dtype, **self.jnp_array_kwargs} )
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase_ : Optional[Any] ) -> Optional[int]:
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(lowerCAmelCase_ , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(lowerCAmelCase_ , "__array__" ) and not isinstance(lowerCAmelCase_ , jax.Array ):
UpperCAmelCase_ : Union[str, Any] = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(lowerCAmelCase_ , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(lowerCAmelCase_ ) for substruct in data_struct] )
elif isinstance(lowerCAmelCase_ , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(lowerCAmelCase_ ) for substruct in data_struct] )
return self._tensorize(lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : dict ) -> Dict:
return map_nested(self._recursive_tensorize , lowerCAmelCase_ , map_list=lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase_ : pa.Table ) -> Mapping:
UpperCAmelCase_ : Optional[Any] = self.numpy_arrow_extractor().extract_row(lowerCAmelCase_ )
UpperCAmelCase_ : Optional[Any] = self.python_features_decoder.decode_row(lowerCAmelCase_ )
return self.recursive_tensorize(lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase_ : pa.Table ) -> "jax.Array":
UpperCAmelCase_ : Dict = self.numpy_arrow_extractor().extract_column(lowerCAmelCase_ )
UpperCAmelCase_ : List[Any] = self.python_features_decoder.decode_column(lowerCAmelCase_ , pa_table.column_names[0] )
UpperCAmelCase_ : List[Any] = self.recursive_tensorize(lowerCAmelCase_ )
UpperCAmelCase_ : Optional[int] = self._consolidate(lowerCAmelCase_ )
return column
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase_ : pa.Table ) -> Mapping:
UpperCAmelCase_ : List[str] = self.numpy_arrow_extractor().extract_batch(lowerCAmelCase_ )
UpperCAmelCase_ : int = self.python_features_decoder.decode_batch(lowerCAmelCase_ )
UpperCAmelCase_ : List[str] = self.recursive_tensorize(lowerCAmelCase_ )
for column_name in batch:
UpperCAmelCase_ : str = self._consolidate(batch[column_name] )
return batch
| 352 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json''',
'''allenai/longformer-large-4096''': '''https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json''',
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json'''
),
}
class UpperCamelCase_ (__A ):
__magic_name__ = '''longformer'''
def __init__( self : List[str] , lowerCAmelCase_ : Union[List[int], int] = 512 , lowerCAmelCase_ : int = 2 , lowerCAmelCase_ : int = 1 , lowerCAmelCase_ : int = 0 , lowerCAmelCase_ : int = 2 , lowerCAmelCase_ : int = 30_522 , lowerCAmelCase_ : int = 768 , lowerCAmelCase_ : int = 12 , lowerCAmelCase_ : int = 12 , lowerCAmelCase_ : int = 3_072 , lowerCAmelCase_ : str = "gelu" , lowerCAmelCase_ : float = 0.1 , lowerCAmelCase_ : float = 0.1 , lowerCAmelCase_ : int = 512 , lowerCAmelCase_ : int = 2 , lowerCAmelCase_ : float = 0.0_2 , lowerCAmelCase_ : float = 1e-12 , lowerCAmelCase_ : bool = False , **lowerCAmelCase_ : Optional[int] , ) -> Dict:
super().__init__(pad_token_id=lowerCAmelCase_ , **lowerCAmelCase_ )
UpperCAmelCase_ : List[Any] = attention_window
UpperCAmelCase_ : Dict = sep_token_id
UpperCAmelCase_ : Any = bos_token_id
UpperCAmelCase_ : Dict = eos_token_id
UpperCAmelCase_ : List[str] = vocab_size
UpperCAmelCase_ : Any = hidden_size
UpperCAmelCase_ : List[Any] = num_hidden_layers
UpperCAmelCase_ : Tuple = num_attention_heads
UpperCAmelCase_ : int = hidden_act
UpperCAmelCase_ : Union[str, Any] = intermediate_size
UpperCAmelCase_ : Tuple = hidden_dropout_prob
UpperCAmelCase_ : Any = attention_probs_dropout_prob
UpperCAmelCase_ : Union[str, Any] = max_position_embeddings
UpperCAmelCase_ : List[str] = type_vocab_size
UpperCAmelCase_ : Optional[int] = initializer_range
UpperCAmelCase_ : Optional[Any] = layer_norm_eps
UpperCAmelCase_ : Optional[Any] = onnx_export
class UpperCamelCase_ (__A ):
def __init__( self : List[Any] , lowerCAmelCase_ : "PretrainedConfig" , lowerCAmelCase_ : str = "default" , lowerCAmelCase_ : "List[PatchingSpec]" = None ) -> Union[str, Any]:
super().__init__(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : int = True
@property
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
UpperCAmelCase_ : Tuple = {0: "batch", 1: "choice", 2: "sequence"}
else:
UpperCAmelCase_ : Optional[Any] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("global_attention_mask", dynamic_axis),
] )
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
UpperCAmelCase_ : Dict = super().outputs
if self.task == "default":
UpperCAmelCase_ : List[str] = {0: "batch"}
return outputs
@property
def _SCREAMING_SNAKE_CASE ( self : int ) -> float:
return 1e-4
@property
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> int:
# needs to be >= 14 to support tril operator
return max(super().default_onnx_opset , 14 )
def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase_ : "PreTrainedTokenizerBase" , lowerCAmelCase_ : int = -1 , lowerCAmelCase_ : int = -1 , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
UpperCAmelCase_ : Tuple = super().generate_dummy_inputs(
preprocessor=lowerCAmelCase_ , batch_size=lowerCAmelCase_ , seq_length=lowerCAmelCase_ , is_pair=lowerCAmelCase_ , framework=lowerCAmelCase_ )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
UpperCAmelCase_ : str = torch.zeros_like(inputs["input_ids"] )
# make every second token global
UpperCAmelCase_ : Union[str, Any] = 1
return inputs
| 253 | 0 |
'''simple docstring'''
import inspect
import os
import unittest
from pathlib import Path
import torch
import accelerate
from accelerate.test_utils import execute_subprocess_async
from accelerate.test_utils.testing import run_command
class snake_case ( unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = inspect.getfile(accelerate.test_utils )
_lowerCamelCase = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_cli.py"] )
_lowerCamelCase = ["accelerate", "launch"]
_lowerCamelCase = Path.home() / ".cache/huggingface/accelerate"
_lowerCamelCase = "default_config.yaml"
_lowerCamelCase = config_folder / config_file
_lowerCamelCase = config_folder / "_default_config.yaml"
_lowerCamelCase = Path("tests/test_configs" )
@classmethod
def snake_case ( cls ):
"""simple docstring"""
if cls.config_path.is_file():
cls.config_path.rename(cls.changed_path )
@classmethod
def snake_case ( cls ):
"""simple docstring"""
if cls.changed_path.is_file():
cls.changed_path.rename(cls.config_path )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = self.base_cmd
if torch.cuda.is_available() and (torch.cuda.device_count() > 1):
cmd += ["--multi_gpu"]
execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy() )
def snake_case ( self ):
"""simple docstring"""
for config in sorted(self.test_config_path.glob("**/*.yaml" ) ):
with self.subTest(config_file=UpperCamelCase ):
execute_subprocess_async(
self.base_cmd + ["--config_file", str(UpperCamelCase ), self.test_file_path] , env=os.environ.copy() )
def snake_case ( self ):
"""simple docstring"""
execute_subprocess_async(["accelerate", "test"] , env=os.environ.copy() )
class snake_case ( unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = "test-tpu"
_lowerCamelCase = "us-central1-a"
_lowerCamelCase = "ls"
_lowerCamelCase = ["accelerate", "tpu-config"]
_lowerCamelCase = "cd /usr/share"
_lowerCamelCase = "tests/test_samples/test_command_file.sh"
_lowerCamelCase = "Running gcloud compute tpus tpu-vm ssh"
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = run_command(
self.cmd
+ ["--command", self.command, "--tpu_zone", self.tpu_zone, "--tpu_name", self.tpu_name, "--debug"] , return_stdout=UpperCamelCase , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , UpperCamelCase , )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = run_command(
self.cmd
+ [
"--config_file",
"tests/test_configs/0_12_0.yaml",
"--command",
self.command,
"--tpu_zone",
self.tpu_zone,
"--tpu_name",
self.tpu_name,
"--debug",
] , return_stdout=UpperCamelCase , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , UpperCamelCase , )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = run_command(
self.cmd + ["--config_file", "tests/test_configs/latest.yaml", "--debug"] , return_stdout=UpperCamelCase )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , UpperCamelCase , )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = run_command(
self.cmd + ["--config_file", "tests/test_configs/latest.yaml", "--command", self.command, "--debug"] , return_stdout=UpperCamelCase , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , UpperCamelCase , )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = run_command(
self.cmd
+ [
"--config_file",
"tests/test_configs/latest.yaml",
"--command",
self.command,
"--command",
"echo \"Hello World\"",
"--debug",
] , return_stdout=UpperCamelCase , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo "Hello World" --worker all''' , UpperCamelCase , )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = run_command(
self.cmd
+ ["--config_file", "tests/test_configs/latest.yaml", "--command_file", self.command_file, "--debug"] , return_stdout=UpperCamelCase , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , UpperCamelCase , )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = run_command(
self.cmd
+ [
"--config_file",
"tests/test_configs/0_12_0.yaml",
"--command_file",
self.command_file,
"--tpu_zone",
self.tpu_zone,
"--tpu_name",
self.tpu_name,
"--debug",
] , return_stdout=UpperCamelCase , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , UpperCamelCase , )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = run_command(
self.cmd + ["--config_file", "tests/test_configs/latest.yaml", "--install_accelerate", "--debug"] , return_stdout=UpperCamelCase , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo "hello world"; echo "this is a second command" --worker all''' , UpperCamelCase , )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = run_command(
self.cmd
+ [
"--config_file",
"tests/test_configs/latest.yaml",
"--install_accelerate",
"--accelerate_version",
"12.0.0",
"--debug",
] , return_stdout=UpperCamelCase , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo "hello world"; echo "this is a second command" --worker all''' , UpperCamelCase , )
| 55 |
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
@dataclass
class UpperCAmelCase_ :
lowercase__ = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
lowercase__ = field(
default=A_, metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
lowercase__ = field(
default=A_, metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
lowercase__ = field(
default=A_, metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''}, )
lowercase__ = field(
default=A_, metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''}, )
lowercase__ = field(
default='''main''', metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''}, )
lowercase__ = field(
default=A_, metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
}, )
@dataclass
class UpperCAmelCase_ :
lowercase__ = field(default=A_, metadata={'''help''': '''The input training data file (a text file).'''} )
lowercase__ = field(
default=A_, metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''}, )
lowercase__ = field(
default=A_, metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
lowercase__ = field(
default=A_, metadata={'''help''': '''The number of processes to use for the preprocessing.'''}, )
lowercase__ = field(
default=A_, metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. If passed, sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
}, )
lowercase__ = field(
default=A_, metadata={
'''help''': (
'''Whether to pad all samples to the maximum sentence length. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch. More '''
'''efficient on GPU but very bad for TPU.'''
)
}, )
lowercase__ = field(
default=A_, metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
}, )
lowercase__ = field(
default=A_, metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
}, )
def __magic_name__ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
if self.train_file is not None:
A__ = self.train_file.split("." )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
A__ = self.validation_file.split("." )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class UpperCAmelCase_ :
lowercase__ = 42
lowercase__ = True
lowercase__ = None
lowercase__ = None
def __call__( self : Optional[Any] , snake_case_ : List[str] ) -> Union[str, Any]:
'''simple docstring'''
A__ = "label" if "label" in features[0].keys() else "labels"
A__ = [feature.pop(snake_case_ ) for feature in features]
A__ = len(snake_case_ )
A__ = len(features[0]["input_ids"] )
A__ = [
[{k: v[i] for k, v in feature.items()} for i in range(snake_case_ )] for feature in features
]
A__ = list(chain(*snake_case_ ) )
A__ = self.tokenizer.pad(
snake_case_ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , )
# Un-flatten
A__ = {k: v.view(snake_case_ , snake_case_ , -1 ) for k, v in batch.items()}
# Add back labels
A__ = torch.tensor(snake_case_ , dtype=torch.intaa )
return batch
def _SCREAMING_SNAKE_CASE ( ) -> int:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
A__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
A__, A__, A__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
A__, A__, A__ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_swag" , lowercase_ , lowercase_ )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
A__ = training_args.get_process_log_level()
logger.setLevel(lowercase_ )
datasets.utils.logging.set_verbosity(lowercase_ )
transformers.utils.logging.set_verbosity(lowercase_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
A__ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
A__ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
A__ = {}
if data_args.train_file is not None:
A__ = data_args.train_file
if data_args.validation_file is not None:
A__ = data_args.validation_file
A__ = data_args.train_file.split("." )[-1]
A__ = load_dataset(
lowercase_ , data_files=lowercase_ , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
A__ = load_dataset(
"swag" , "regular" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
A__ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
A__ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
A__ = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=lowercase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
A__ = [f"""ending{i}""" for i in range(4 )]
A__ = "sent1"
A__ = "sent2"
if data_args.max_seq_length is None:
A__ = tokenizer.model_max_length
if max_seq_length > 10_24:
logger.warning(
"The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value"
" of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can"
" override this default with `--block_size xxx`." )
A__ = 10_24
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"""
f"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" )
A__ = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(lowercase_ ):
A__ = [[context] * 4 for context in examples[context_name]]
A__ = examples[question_header_name]
A__ = [
[f"""{header} {examples[end][i]}""" for end in ending_names] for i, header in enumerate(lowercase_ )
]
# Flatten out
A__ = list(chain(*lowercase_ ) )
A__ = list(chain(*lowercase_ ) )
# Tokenize
A__ = tokenizer(
lowercase_ , lowercase_ , truncation=lowercase_ , max_length=lowercase_ , padding="max_length" if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(lowercase_ ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("--do_train requires a train dataset" )
A__ = raw_datasets["train"]
if data_args.max_train_samples is not None:
A__ = min(len(lowercase_ ) , data_args.max_train_samples )
A__ = train_dataset.select(range(lowercase_ ) )
with training_args.main_process_first(desc="train dataset map pre-processing" ):
A__ = train_dataset.map(
lowercase_ , batched=lowercase_ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError("--do_eval requires a validation dataset" )
A__ = raw_datasets["validation"]
if data_args.max_eval_samples is not None:
A__ = min(len(lowercase_ ) , data_args.max_eval_samples )
A__ = eval_dataset.select(range(lowercase_ ) )
with training_args.main_process_first(desc="validation dataset map pre-processing" ):
A__ = eval_dataset.map(
lowercase_ , batched=lowercase_ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
A__ = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=lowercase_ , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(lowercase_ ):
A__, A__ = eval_predictions
A__ = np.argmax(lowercase_ , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
A__ = Trainer(
model=lowercase_ , args=lowercase_ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=lowercase_ , data_collator=lowercase_ , compute_metrics=lowercase_ , )
# Training
if training_args.do_train:
A__ = None
if training_args.resume_from_checkpoint is not None:
A__ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
A__ = last_checkpoint
A__ = trainer.train(resume_from_checkpoint=lowercase_ )
trainer.save_model() # Saves the tokenizer too for easy upload
A__ = train_result.metrics
A__ = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowercase_ )
)
A__ = min(lowercase_ , len(lowercase_ ) )
trainer.log_metrics("train" , lowercase_ )
trainer.save_metrics("train" , lowercase_ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
A__ = trainer.evaluate()
A__ = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowercase_ )
A__ = min(lowercase_ , len(lowercase_ ) )
trainer.log_metrics("eval" , lowercase_ )
trainer.save_metrics("eval" , lowercase_ )
A__ = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "multiple-choice",
"dataset_tags": "swag",
"dataset_args": "regular",
"dataset": "SWAG",
"language": "en",
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowercase_ )
else:
trainer.create_model_card(**lowercase_ )
def _SCREAMING_SNAKE_CASE ( lowercase_ ) -> Any:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 247 | 0 |
'''simple docstring'''
from manim import *
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
def _snake_case ( self : int ) -> int:
'''simple docstring'''
A: List[str] = Rectangle(height=0.5 , width=0.5 )
A: str = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
A: List[str] = Rectangle(height=0.25 , width=0.25 )
A: List[Any] = [mem.copy() for i in range(6 )]
A: int = [mem.copy() for i in range(6 )]
A: Optional[Any] = VGroup(*SCREAMING_SNAKE_CASE_ ).arrange(SCREAMING_SNAKE_CASE_ , buff=0 )
A: Any = VGroup(*SCREAMING_SNAKE_CASE_ ).arrange(SCREAMING_SNAKE_CASE_ , buff=0 )
A: int = VGroup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).arrange(SCREAMING_SNAKE_CASE_ , buff=0 )
A: Union[str, Any] = Text('''CPU''' , font_size=24 )
A: str = Group(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).arrange(SCREAMING_SNAKE_CASE_ , buff=0.5 , aligned_edge=SCREAMING_SNAKE_CASE_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(SCREAMING_SNAKE_CASE_ )
A: List[str] = [mem.copy() for i in range(4 )]
A: Union[str, Any] = VGroup(*SCREAMING_SNAKE_CASE_ ).arrange(SCREAMING_SNAKE_CASE_ , buff=0 )
A: Any = Text('''GPU''' , font_size=24 )
A: Union[str, Any] = Group(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).arrange(SCREAMING_SNAKE_CASE_ , buff=0.5 , aligned_edge=SCREAMING_SNAKE_CASE_ )
gpu.move_to([-1, -1, 0] )
self.add(SCREAMING_SNAKE_CASE_ )
A: Dict = [mem.copy() for i in range(6 )]
A: Optional[int] = VGroup(*SCREAMING_SNAKE_CASE_ ).arrange(SCREAMING_SNAKE_CASE_ , buff=0 )
A: int = Text('''Model''' , font_size=24 )
A: str = Group(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).arrange(SCREAMING_SNAKE_CASE_ , buff=0.5 , aligned_edge=SCREAMING_SNAKE_CASE_ )
model.move_to([3, -1.0, 0] )
self.add(SCREAMING_SNAKE_CASE_ )
A: Any = []
A: str = []
for i, rect in enumerate(SCREAMING_SNAKE_CASE_ ):
A: Optional[int] = fill.copy().set_fill(SCREAMING_SNAKE_CASE_ , opacity=0.8 )
target.move_to(SCREAMING_SNAKE_CASE_ )
model_arr.append(SCREAMING_SNAKE_CASE_ )
A: str = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(SCREAMING_SNAKE_CASE_ , opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(SCREAMING_SNAKE_CASE_ )
self.add(*SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ )
A: int = [meta_mem.copy() for i in range(6 )]
A: Dict = [meta_mem.copy() for i in range(6 )]
A: int = VGroup(*SCREAMING_SNAKE_CASE_ ).arrange(SCREAMING_SNAKE_CASE_ , buff=0 )
A: Optional[int] = VGroup(*SCREAMING_SNAKE_CASE_ ).arrange(SCREAMING_SNAKE_CASE_ , buff=0 )
A: List[str] = VGroup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).arrange(SCREAMING_SNAKE_CASE_ , buff=0 )
A: List[Any] = Text('''Disk''' , font_size=24 )
A: Union[str, Any] = Group(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).arrange(SCREAMING_SNAKE_CASE_ , buff=0.5 , aligned_edge=SCREAMING_SNAKE_CASE_ )
disk.move_to([-4, -1.25, 0] )
self.add(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
A: List[Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
A: Any = MarkupText(
f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
A: Optional[Any] = MarkupText(
f"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , )
blue_text.next_to(SCREAMING_SNAKE_CASE_ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(SCREAMING_SNAKE_CASE_ )
A: str = MarkupText(
f"""Now watch as an input is passed through the model\nand how the memory is utilized and handled.""" , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(SCREAMING_SNAKE_CASE_ ) )
A: Union[str, Any] = Square(0.3 )
input.set_fill(SCREAMING_SNAKE_CASE_ , opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] , SCREAMING_SNAKE_CASE_ , buff=0.5 )
self.play(Write(SCREAMING_SNAKE_CASE_ ) )
input.generate_target()
input.target.next_to(model_arr[0] , direction=SCREAMING_SNAKE_CASE_ , buff=0.02 )
self.play(MoveToTarget(SCREAMING_SNAKE_CASE_ ) )
self.play(FadeOut(SCREAMING_SNAKE_CASE_ ) )
A: Dict = Arrow(start=SCREAMING_SNAKE_CASE_ , end=SCREAMING_SNAKE_CASE_ , color=SCREAMING_SNAKE_CASE_ , buff=0.5 )
a.next_to(model_arr[0].get_left() , SCREAMING_SNAKE_CASE_ , buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
A: Optional[int] = MarkupText(
f"""As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.""" , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(SCREAMING_SNAKE_CASE_ , run_time=3 ) )
A: Union[str, Any] = {'''run_time''': 1, '''fade_in''': True, '''fade_out''': True, '''buff''': 0.02}
self.play(
Write(SCREAMING_SNAKE_CASE_ ) , Circumscribe(model_arr[0] , color=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) , Circumscribe(model_cpu_arr[0] , color=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) , Circumscribe(gpu_rect[0] , color=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) , )
self.play(MoveToTarget(model_cpu_arr[0] ) )
A: Any = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 , SCREAMING_SNAKE_CASE_ , buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
A: Tuple = AnimationGroup(
FadeOut(SCREAMING_SNAKE_CASE_ , run_time=0.5 ) , MoveToTarget(SCREAMING_SNAKE_CASE_ , run_time=0.5 ) , FadeIn(SCREAMING_SNAKE_CASE_ , run_time=0.5 ) , lag_ratio=0.2 )
self.play(SCREAMING_SNAKE_CASE_ )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
A: Optional[Any] = 0.7
self.play(
Circumscribe(model_arr[i] , **SCREAMING_SNAKE_CASE_ ) , Circumscribe(cpu_left_col_base[i] , **SCREAMING_SNAKE_CASE_ ) , Circumscribe(cpu_left_col_base[i + 1] , color=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) , Circumscribe(gpu_rect[0] , color=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) , Circumscribe(model_arr[i + 1] , color=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 )
self.play(
Circumscribe(model_arr[-1] , color=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) , Circumscribe(cpu_left_col_base[-1] , color=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) , Circumscribe(gpu_rect[0] , color=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) , )
self.play(MoveToTarget(model_cpu_arr[i] ) )
A: Union[str, Any] = a_c
A: Optional[int] = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 )
self.play(
FadeOut(SCREAMING_SNAKE_CASE_ ) , FadeOut(SCREAMING_SNAKE_CASE_ , run_time=0.5 ) , )
A: Tuple = MarkupText(f"""Inference on a model too large for GPU memory\nis successfully completed.""" , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(SCREAMING_SNAKE_CASE_ , run_time=3 ) , MoveToTarget(SCREAMING_SNAKE_CASE_ ) )
self.wait()
| 334 |
'''simple docstring'''
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCamelCase = {
'''vocab_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
UpperCamelCase = {
'''vocab_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
UpperCamelCase = {
'''vocab_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'''
),
},
}
UpperCamelCase = {
'''facebook/dpr-ctx_encoder-single-nq-base''': 512,
'''facebook/dpr-ctx_encoder-multiset-base''': 512,
}
UpperCamelCase = {
'''facebook/dpr-question_encoder-single-nq-base''': 512,
'''facebook/dpr-question_encoder-multiset-base''': 512,
}
UpperCamelCase = {
'''facebook/dpr-reader-single-nq-base''': 512,
'''facebook/dpr-reader-multiset-base''': 512,
}
UpperCamelCase = {
'''facebook/dpr-ctx_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-ctx_encoder-multiset-base''': {'''do_lower_case''': True},
}
UpperCamelCase = {
'''facebook/dpr-question_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-question_encoder-multiset-base''': {'''do_lower_case''': True},
}
UpperCamelCase = {
'''facebook/dpr-reader-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-reader-multiset-base''': {'''do_lower_case''': True},
}
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
UpperCamelCase_ : Union[str, Any] = VOCAB_FILES_NAMES
UpperCamelCase_ : Union[str, Any] = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ : Union[str, Any] = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ : Optional[Any] = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
UpperCamelCase_ : Any = DPRContextEncoderTokenizer
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
UpperCamelCase_ : Dict = VOCAB_FILES_NAMES
UpperCamelCase_ : List[str] = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ : List[Any] = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ : Tuple = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
UpperCamelCase_ : Optional[int] = DPRQuestionEncoderTokenizer
UpperCamelCase = collections.namedtuple(
'''DPRSpanPrediction''', ['''span_score''', '''relevance_score''', '''doc_id''', '''start_index''', '''end_index''', '''text''']
)
UpperCamelCase = collections.namedtuple('''DPRReaderOutput''', ['''start_logits''', '''end_logits''', '''relevance_logits'''])
UpperCamelCase = R'''
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `\'tf\'`: Return TensorFlow `tf.constant` objects.
- `\'pt\'`: Return PyTorch `torch.Tensor` objects.
- `\'np\'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer\'s default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Return:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
'''
@add_start_docstrings(UpperCAmelCase_ )
class lowerCAmelCase_ :
'''simple docstring'''
def __call__( self : Dict , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[str] = None , SCREAMING_SNAKE_CASE_ : Optional[str] = None , SCREAMING_SNAKE_CASE_ : Union[bool, str] = False , SCREAMING_SNAKE_CASE_ : Union[bool, str] = False , SCREAMING_SNAKE_CASE_ : Optional[int] = None , SCREAMING_SNAKE_CASE_ : Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , **SCREAMING_SNAKE_CASE_ : Dict , ) -> BatchEncoding:
'''simple docstring'''
if titles is None and texts is None:
return super().__call__(
SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
elif titles is None or texts is None:
A: Union[str, Any] = titles if texts is None else texts
return super().__call__(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
A: Union[str, Any] = titles if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else [titles]
A: Optional[Any] = texts if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else [texts]
A: str = len(SCREAMING_SNAKE_CASE_ )
A: List[Any] = questions if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else [questions] * n_passages
assert len(SCREAMING_SNAKE_CASE_ ) == len(
SCREAMING_SNAKE_CASE_ ), f"""There should be as many titles than texts but got {len(SCREAMING_SNAKE_CASE_ )} titles and {len(SCREAMING_SNAKE_CASE_ )} texts."""
A: Union[str, Any] = super().__call__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ )['''input_ids''']
A: Dict = super().__call__(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ )['''input_ids''']
A: str = {
'''input_ids''': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
]
}
if return_attention_mask is not False:
A: Union[str, Any] = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
A: Optional[Any] = attention_mask
return self.pad(SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : int , SCREAMING_SNAKE_CASE_ : BatchEncoding , SCREAMING_SNAKE_CASE_ : DPRReaderOutput , SCREAMING_SNAKE_CASE_ : int = 16 , SCREAMING_SNAKE_CASE_ : int = 64 , SCREAMING_SNAKE_CASE_ : int = 4 , ) -> List[DPRSpanPrediction]:
'''simple docstring'''
A: Any = reader_input['''input_ids''']
A , A , A: str = reader_output[:3]
A: str = len(SCREAMING_SNAKE_CASE_ )
A: Union[str, Any] = sorted(range(SCREAMING_SNAKE_CASE_ ) , reverse=SCREAMING_SNAKE_CASE_ , key=relevance_logits.__getitem__ )
A: List[DPRReaderOutput] = []
for doc_id in sorted_docs:
A: List[str] = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
A: Dict = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
A: Union[str, Any] = sequence_ids.index(self.pad_token_id )
else:
A: int = len(SCREAMING_SNAKE_CASE_ )
A: Dict = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=SCREAMING_SNAKE_CASE_ , top_spans=SCREAMING_SNAKE_CASE_ , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=SCREAMING_SNAKE_CASE_ , start_index=SCREAMING_SNAKE_CASE_ , end_index=SCREAMING_SNAKE_CASE_ , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(SCREAMING_SNAKE_CASE_ ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def _snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , ) -> List[DPRSpanPrediction]:
'''simple docstring'''
A: Union[str, Any] = []
for start_index, start_score in enumerate(SCREAMING_SNAKE_CASE_ ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
A: Any = sorted(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : x[1] , reverse=SCREAMING_SNAKE_CASE_ )
A: Dict = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, f"""Wrong span indices: [{start_index}:{end_index}]"""
A: int = end_index - start_index + 1
assert length <= max_answer_length, f"""Span is too long: {length} > {max_answer_length}"""
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(SCREAMING_SNAKE_CASE_ ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(UpperCAmelCase_ )
class lowerCAmelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ ):
'''simple docstring'''
UpperCamelCase_ : Tuple = VOCAB_FILES_NAMES
UpperCamelCase_ : List[Any] = READER_PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ : Union[str, Any] = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ : Dict = READER_PRETRAINED_INIT_CONFIGURATION
UpperCamelCase_ : Any = ["""input_ids""", """attention_mask"""]
UpperCamelCase_ : Optional[Any] = DPRReaderTokenizer
| 334 | 1 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
SCREAMING_SNAKE_CASE : Any = 1_92
SCREAMING_SNAKE_CASE : Dict = 7_68
SCREAMING_SNAKE_CASE : Dict = 12
SCREAMING_SNAKE_CASE : List[Any] = 3
SCREAMING_SNAKE_CASE : Dict = [8_00, 13_33]
SCREAMING_SNAKE_CASE : List[str] = False
elif yolos_name == "yolos_s_dWr":
SCREAMING_SNAKE_CASE : Optional[int] = 3_30
SCREAMING_SNAKE_CASE : List[str] = 14
SCREAMING_SNAKE_CASE : str = 6
SCREAMING_SNAKE_CASE : List[str] = 13_20
elif "yolos_s" in yolos_name:
SCREAMING_SNAKE_CASE : Union[str, Any] = 3_84
SCREAMING_SNAKE_CASE : Dict = 15_36
SCREAMING_SNAKE_CASE : int = 12
SCREAMING_SNAKE_CASE : List[str] = 6
elif "yolos_b" in yolos_name:
SCREAMING_SNAKE_CASE : List[Any] = [8_00, 13_44]
SCREAMING_SNAKE_CASE : Optional[Any] = 91
SCREAMING_SNAKE_CASE : str = '''huggingface/label-files'''
SCREAMING_SNAKE_CASE : Optional[Any] = '''coco-detection-id2label.json'''
SCREAMING_SNAKE_CASE : Dict = json.load(open(hf_hub_download(_UpperCamelCase , _UpperCamelCase , repo_type="""dataset""" ) , """r""" ) )
SCREAMING_SNAKE_CASE : Dict = {int(_UpperCamelCase ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE : Optional[Any] = idalabel
SCREAMING_SNAKE_CASE : List[Any] = {v: k for k, v in idalabel.items()}
return config
def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
SCREAMING_SNAKE_CASE : Any = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' )
SCREAMING_SNAKE_CASE : Dict = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE : List[Any] = in_proj_weight[: config.hidden_size, :]
SCREAMING_SNAKE_CASE : Optional[int] = in_proj_bias[: config.hidden_size]
SCREAMING_SNAKE_CASE : List[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
SCREAMING_SNAKE_CASE : Tuple = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
SCREAMING_SNAKE_CASE : str = in_proj_weight[-config.hidden_size :, :]
SCREAMING_SNAKE_CASE : Optional[Any] = in_proj_bias[-config.hidden_size :]
def __A ( lowerCamelCase_ ):
"""simple docstring"""
if "backbone" in name:
SCREAMING_SNAKE_CASE : List[str] = name.replace("""backbone""" , """vit""" )
if "cls_token" in name:
SCREAMING_SNAKE_CASE : List[str] = name.replace("""cls_token""" , """embeddings.cls_token""" )
if "det_token" in name:
SCREAMING_SNAKE_CASE : int = name.replace("""det_token""" , """embeddings.detection_tokens""" )
if "mid_pos_embed" in name:
SCREAMING_SNAKE_CASE : Optional[int] = name.replace("""mid_pos_embed""" , """encoder.mid_position_embeddings""" )
if "pos_embed" in name:
SCREAMING_SNAKE_CASE : Tuple = name.replace("""pos_embed""" , """embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
SCREAMING_SNAKE_CASE : Dict = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "blocks" in name:
SCREAMING_SNAKE_CASE : Any = name.replace("""blocks""" , """encoder.layer""" )
if "attn.proj" in name:
SCREAMING_SNAKE_CASE : Optional[int] = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
SCREAMING_SNAKE_CASE : int = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
SCREAMING_SNAKE_CASE : str = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
SCREAMING_SNAKE_CASE : Dict = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
SCREAMING_SNAKE_CASE : int = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
SCREAMING_SNAKE_CASE : Any = name.replace("""mlp.fc2""" , """output.dense""" )
if "class_embed" in name:
SCREAMING_SNAKE_CASE : Any = name.replace("""class_embed""" , """class_labels_classifier""" )
if "bbox_embed" in name:
SCREAMING_SNAKE_CASE : List[Any] = name.replace("""bbox_embed""" , """bbox_predictor""" )
if "vit.norm" in name:
SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace("""vit.norm""" , """vit.layernorm""" )
return name
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
SCREAMING_SNAKE_CASE : List[str] = orig_state_dict.pop(_UpperCamelCase )
if "qkv" in key:
SCREAMING_SNAKE_CASE : Optional[int] = key.split(""".""" )
SCREAMING_SNAKE_CASE : Optional[int] = int(key_split[2] )
SCREAMING_SNAKE_CASE : int = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
SCREAMING_SNAKE_CASE : Dict = val[:dim, :]
SCREAMING_SNAKE_CASE : Dict = val[
dim : dim * 2, :
]
SCREAMING_SNAKE_CASE : List[str] = val[-dim:, :]
else:
SCREAMING_SNAKE_CASE : int = val[:dim]
SCREAMING_SNAKE_CASE : Optional[int] = val[dim : dim * 2]
SCREAMING_SNAKE_CASE : Optional[int] = val[-dim:]
else:
SCREAMING_SNAKE_CASE : List[Any] = val
return orig_state_dict
def __A ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
SCREAMING_SNAKE_CASE : int = Image.open(requests.get(_UpperCamelCase , stream=_UpperCamelCase ).raw )
return im
@torch.no_grad()
def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = get_yolos_config(_UpperCamelCase )
# load original state_dict
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.load(_UpperCamelCase , map_location="""cpu""" )['''model''']
# load 🤗 model
SCREAMING_SNAKE_CASE : int = YolosForObjectDetection(_UpperCamelCase )
model.eval()
SCREAMING_SNAKE_CASE : str = convert_state_dict(_UpperCamelCase , _UpperCamelCase )
model.load_state_dict(_UpperCamelCase )
# Check outputs on an image, prepared by YolosImageProcessor
SCREAMING_SNAKE_CASE : str = 8_00 if yolos_name != '''yolos_ti''' else 5_12
SCREAMING_SNAKE_CASE : Tuple = YolosImageProcessor(format="""coco_detection""" , size=_UpperCamelCase )
SCREAMING_SNAKE_CASE : Any = image_processor(images=prepare_img() , return_tensors="""pt""" )
SCREAMING_SNAKE_CASE : Tuple = model(**_UpperCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = outputs.logits, outputs.pred_boxes
SCREAMING_SNAKE_CASE : List[Any] = None, None
if yolos_name == "yolos_ti":
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor(
[[-39.5_022, -11.9_820, -17.6_888], [-29.9_574, -9.9_769, -17.7_691], [-42.3_281, -20.7_200, -30.6_294]] )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor(
[[0.4_021, 0.0_836, 0.7_979], [0.0_184, 0.2_609, 0.0_364], [0.1_781, 0.2_004, 0.2_095]] )
elif yolos_name == "yolos_s_200_pre":
SCREAMING_SNAKE_CASE : str = torch.tensor(
[[-24.0_248, -10.3_024, -14.8_290], [-42.0_392, -16.8_200, -27.4_334], [-27.2_743, -11.8_154, -18.7_148]] )
SCREAMING_SNAKE_CASE : int = torch.tensor(
[[0.2_559, 0.5_455, 0.4_706], [0.2_989, 0.7_279, 0.1_875], [0.7_732, 0.4_017, 0.4_462]] )
elif yolos_name == "yolos_s_300_pre":
SCREAMING_SNAKE_CASE : str = torch.tensor(
[[-36.2_220, -14.4_385, -23.5_457], [-35.6_970, -14.7_583, -21.3_935], [-31.5_939, -13.6_042, -16.8_049]] )
SCREAMING_SNAKE_CASE : int = torch.tensor(
[[0.7_614, 0.2_316, 0.4_728], [0.7_168, 0.4_495, 0.3_855], [0.4_996, 0.1_466, 0.9_996]] )
elif yolos_name == "yolos_s_dWr":
SCREAMING_SNAKE_CASE : List[str] = torch.tensor(
[[-42.8_668, -24.1_049, -41.1_690], [-34.7_456, -14.1_274, -24.9_194], [-33.7_898, -12.1_946, -25.6_495]] )
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor(
[[0.5_587, 0.2_773, 0.0_605], [0.5_004, 0.3_014, 0.9_994], [0.4_999, 0.1_548, 0.9_994]] )
elif yolos_name == "yolos_base":
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor(
[[-40.6_064, -24.3_084, -32.6_447], [-55.1_990, -30.7_719, -35.5_877], [-51.4_311, -33.3_507, -35.6_462]] )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor(
[[0.5_555, 0.2_794, 0.0_655], [0.9_049, 0.2_664, 0.1_894], [0.9_183, 0.1_984, 0.1_635]] )
else:
raise ValueError(f'''Unknown yolos_name: {yolos_name}''' )
assert torch.allclose(logits[0, :3, :3] , _UpperCamelCase , atol=1E-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , _UpperCamelCase , atol=1E-4 )
Path(_UpperCamelCase ).mkdir(exist_ok=_UpperCamelCase )
print(f'''Saving model {yolos_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_UpperCamelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_UpperCamelCase )
if push_to_hub:
SCREAMING_SNAKE_CASE : Optional[Any] = {
'''yolos_ti''': '''yolos-tiny''',
'''yolos_s_200_pre''': '''yolos-small''',
'''yolos_s_300_pre''': '''yolos-small-300''',
'''yolos_s_dWr''': '''yolos-small-dwr''',
'''yolos_base''': '''yolos-base''',
}
print("""Pushing to the hub...""" )
SCREAMING_SNAKE_CASE : List[str] = model_mapping[yolos_name]
image_processor.push_to_hub(_UpperCamelCase , organization="""hustvl""" )
model.push_to_hub(_UpperCamelCase , organization="""hustvl""" )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--yolos_name""",
default="""yolos_s_200_pre""",
type=str,
help=(
"""Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\',"""
""" \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'."""
),
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, help="""Path to the original state dict (.pth file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
__UpperCAmelCase = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 323 |
from math import factorial
lowerCAmelCase_ = {str(digit): factorial(digit) for digit in range(1_0)}
def lowerCamelCase_ ( _UpperCamelCase ) -> int:
"""simple docstring"""
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise TypeError('''Parameter number must be int''' )
if number < 0:
raise ValueError('''Parameter number must be greater than or equal to 0''' )
# Converts number in string to iterate on its digits and adds its factorial.
return sum(DIGIT_FACTORIAL[digit] for digit in str(_UpperCamelCase ) )
def lowerCamelCase_ ( _UpperCamelCase = 60 , _UpperCamelCase = 1_000_000 ) -> int:
"""simple docstring"""
if not isinstance(_UpperCamelCase , _UpperCamelCase ) or not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise TypeError('''Parameters chain_length and number_limit must be int''' )
if chain_length <= 0 or number_limit <= 0:
raise ValueError(
'''Parameters chain_length and number_limit must be greater than 0''' )
# the counter for the chains with the exact desired length
snake_case_ : Optional[Any] = 0
# the cached sizes of the previous chains
snake_case_ : dict[int, int] = {}
for start_chain_element in range(1 , _UpperCamelCase ):
# The temporary set will contain the elements of the chain
snake_case_ : List[str] = set()
snake_case_ : List[Any] = 0
# Stop computing the chain when you find a cached size, a repeating item or the
# length is greater then the desired one.
snake_case_ : Any = start_chain_element
while (
chain_element not in chain_sets_lengths
and chain_element not in chain_set
and chain_set_length <= chain_length
):
chain_set.add(_UpperCamelCase )
chain_set_length += 1
snake_case_ : List[Any] = digit_factorial_sum(_UpperCamelCase )
if chain_element in chain_sets_lengths:
chain_set_length += chain_sets_lengths[chain_element]
snake_case_ : List[str] = chain_set_length
# If chain contains the exact amount of elements increase the counter
if chain_set_length == chain_length:
chains_counter += 1
return chains_counter
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'''{solution()}''')
| 279 | 0 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
UpperCAmelCase_ = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt')
@dataclass
class lowerCAmelCase_ :
'''simple docstring'''
lowerCAmelCase_ : Optional[str] = field(
default="""cifar10""" , metadata={"""help""": """Name of a dataset from the datasets package"""} )
lowerCAmelCase_ : Optional[str] = field(
default=lowerCamelCase_ , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
lowerCAmelCase_ : Optional[str] = field(
default=lowerCamelCase_ , metadata={"""help""": """The column name of the images in the files."""} )
lowerCAmelCase_ : Optional[str] = field(default=lowerCamelCase_ , metadata={"""help""": """A folder containing the training data."""} )
lowerCAmelCase_ : Optional[str] = field(default=lowerCamelCase_ , metadata={"""help""": """A folder containing the validation data."""} )
lowerCAmelCase_ : Optional[float] = field(
default=0.15 , metadata={"""help""": """Percent to split off of train for validation."""} )
lowerCAmelCase_ : Optional[int] = field(
default=lowerCamelCase_ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
lowerCAmelCase_ : Optional[int] = field(
default=lowerCamelCase_ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
UpperCAmelCase__ = {}
if self.train_dir is not None:
UpperCAmelCase__ = self.train_dir
if self.validation_dir is not None:
UpperCAmelCase__ = self.validation_dir
UpperCAmelCase__ = data_files if data_files else None
@dataclass
class lowerCAmelCase_ :
'''simple docstring'''
lowerCAmelCase_ : str = field(
default=lowerCamelCase_ , metadata={
"""help""": (
"""The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."""
)
} , )
lowerCAmelCase_ : Optional[str] = field(
default=lowerCamelCase_ , metadata={"""help""": """Pretrained config name or path if not the same as model_name_or_path"""} )
lowerCAmelCase_ : Optional[str] = field(
default=lowerCamelCase_ , metadata={
"""help""": (
"""Override some existing default config settings when a model is trained from scratch. Example: """
"""n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"""
)
} , )
lowerCAmelCase_ : Optional[str] = field(
default=lowerCamelCase_ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from s3"""} )
lowerCAmelCase_ : str = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
lowerCAmelCase_ : str = field(default=lowerCamelCase_ , metadata={"""help""": """Name or path of preprocessor config."""} )
lowerCAmelCase_ : bool = field(
default=lowerCamelCase_ , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
lowerCAmelCase_ : float = field(
default=0.75 , metadata={"""help""": """The ratio of the number of masked tokens in the input sequence."""} )
lowerCAmelCase_ : bool = field(
default=lowerCamelCase_ , metadata={"""help""": """Whether or not to train with normalized pixel values as target."""} )
@dataclass
class lowerCAmelCase_ ( lowerCamelCase_ ):
'''simple docstring'''
lowerCAmelCase_ : float = field(
default=1e-3 , metadata={"""help""": """Base learning rate: absolute_lr = base_lr * total_batch_size / 256."""} )
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Tuple ):
'''simple docstring'''
UpperCAmelCase__ = torch.stack([example["""pixel_values"""] for example in examples] )
return {"pixel_values": pixel_values}
def _UpperCamelCase ( ):
'''simple docstring'''
UpperCAmelCase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_mae""" , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCAmelCase__ = training_args.get_process_log_level()
logger.setLevel(SCREAMING_SNAKE_CASE__ )
transformers.utils.logging.set_verbosity(SCREAMING_SNAKE_CASE__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
UpperCAmelCase__ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCAmelCase__ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Initialize our dataset.
UpperCAmelCase__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
UpperCAmelCase__ = None if """validation""" in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , SCREAMING_SNAKE_CASE__ ) and data_args.train_val_split > 0.0:
UpperCAmelCase__ = ds["""train"""].train_test_split(data_args.train_val_split )
UpperCAmelCase__ = split["""train"""]
UpperCAmelCase__ = split["""test"""]
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCAmelCase__ = {
"""cache_dir""": model_args.cache_dir,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.config_name:
UpperCAmelCase__ = ViTMAEConfig.from_pretrained(model_args.config_name , **SCREAMING_SNAKE_CASE__ )
elif model_args.model_name_or_path:
UpperCAmelCase__ = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **SCREAMING_SNAKE_CASE__ )
else:
UpperCAmelCase__ = ViTMAEConfig()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.config_overrides is not None:
logger.info(F'''Overriding config: {model_args.config_overrides}''' )
config.update_from_string(model_args.config_overrides )
logger.info(F'''New config: {config}''' )
# adapt config
config.update(
{
"""mask_ratio""": model_args.mask_ratio,
"""norm_pix_loss""": model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
UpperCAmelCase__ = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **SCREAMING_SNAKE_CASE__ )
elif model_args.model_name_or_path:
UpperCAmelCase__ = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **SCREAMING_SNAKE_CASE__ )
else:
UpperCAmelCase__ = ViTImageProcessor()
# create model
if model_args.model_name_or_path:
UpperCAmelCase__ = ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=SCREAMING_SNAKE_CASE__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("""Training new model from scratch""" )
UpperCAmelCase__ = ViTMAEForPreTraining(SCREAMING_SNAKE_CASE__ )
if training_args.do_train:
UpperCAmelCase__ = ds["""train"""].column_names
else:
UpperCAmelCase__ = ds["""validation"""].column_names
if data_args.image_column_name is not None:
UpperCAmelCase__ = data_args.image_column_name
elif "image" in column_names:
UpperCAmelCase__ = """image"""
elif "img" in column_names:
UpperCAmelCase__ = """img"""
else:
UpperCAmelCase__ = column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
UpperCAmelCase__ = image_processor.size["""shortest_edge"""]
else:
UpperCAmelCase__ = (image_processor.size["""height"""], image_processor.size["""width"""])
UpperCAmelCase__ = Compose(
[
Lambda(lambda SCREAMING_SNAKE_CASE__ : img.convert("""RGB""" ) if img.mode != "RGB" else img ),
RandomResizedCrop(SCREAMING_SNAKE_CASE__ , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
def preprocess_images(SCREAMING_SNAKE_CASE__ : str ):
UpperCAmelCase__ = [transforms(SCREAMING_SNAKE_CASE__ ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError("""--do_train requires a train dataset""" )
if data_args.max_train_samples is not None:
UpperCAmelCase__ = ds["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(SCREAMING_SNAKE_CASE__ )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError("""--do_eval requires a validation dataset""" )
if data_args.max_eval_samples is not None:
UpperCAmelCase__ = (
ds["""validation"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(SCREAMING_SNAKE_CASE__ )
# Compute absolute learning rate
UpperCAmelCase__ = (
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
UpperCAmelCase__ = training_args.base_learning_rate * total_train_batch_size / 256
# Initialize our trainer
UpperCAmelCase__ = Trainer(
model=SCREAMING_SNAKE_CASE__ , args=SCREAMING_SNAKE_CASE__ , train_dataset=ds["""train"""] if training_args.do_train else None , eval_dataset=ds["""validation"""] if training_args.do_eval else None , tokenizer=SCREAMING_SNAKE_CASE__ , data_collator=SCREAMING_SNAKE_CASE__ , )
# Training
if training_args.do_train:
UpperCAmelCase__ = None
if training_args.resume_from_checkpoint is not None:
UpperCAmelCase__ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCAmelCase__ = last_checkpoint
UpperCAmelCase__ = trainer.train(resume_from_checkpoint=SCREAMING_SNAKE_CASE__ )
trainer.save_model()
trainer.log_metrics("""train""" , train_result.metrics )
trainer.save_metrics("""train""" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
UpperCAmelCase__ = trainer.evaluate()
trainer.log_metrics("""eval""" , SCREAMING_SNAKE_CASE__ )
trainer.save_metrics("""eval""" , SCREAMING_SNAKE_CASE__ )
# Write model card and (optionally) push to hub
UpperCAmelCase__ = {
"""tasks""": """masked-auto-encoding""",
"""dataset""": data_args.dataset_name,
"""tags""": ["""masked-auto-encoding"""],
}
if training_args.push_to_hub:
trainer.push_to_hub(**SCREAMING_SNAKE_CASE__ )
else:
trainer.create_model_card(**SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Tuple ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 61 |
'''simple docstring'''
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list[list[int]] ):
'''simple docstring'''
def update_area_of_max_square(SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
UpperCAmelCase__ = update_area_of_max_square(SCREAMING_SNAKE_CASE__ , col + 1 )
UpperCAmelCase__ = update_area_of_max_square(row + 1 , col + 1 )
UpperCAmelCase__ = update_area_of_max_square(row + 1 , SCREAMING_SNAKE_CASE__ )
if mat[row][col]:
UpperCAmelCase__ = 1 + min([right, diagonal, down] )
UpperCAmelCase__ = max(largest_square_area[0] , SCREAMING_SNAKE_CASE__ )
return sub_problem_sol
else:
return 0
UpperCAmelCase__ = [0]
update_area_of_max_square(0 , 0 )
return largest_square_area[0]
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list[list[int]] ):
'''simple docstring'''
def update_area_of_max_square_using_dp_array(
SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list[list[int]] ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
UpperCAmelCase__ = update_area_of_max_square_using_dp_array(SCREAMING_SNAKE_CASE__ , col + 1 , SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = update_area_of_max_square_using_dp_array(row + 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if mat[row][col]:
UpperCAmelCase__ = 1 + min([right, diagonal, down] )
UpperCAmelCase__ = max(largest_square_area[0] , SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = sub_problem_sol
return sub_problem_sol
else:
return 0
UpperCAmelCase__ = [0]
UpperCAmelCase__ = [[-1] * cols for _ in range(SCREAMING_SNAKE_CASE__ )]
update_area_of_max_square_using_dp_array(0 , 0 , SCREAMING_SNAKE_CASE__ )
return largest_square_area[0]
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list[list[int]] ):
'''simple docstring'''
UpperCAmelCase__ = [[0] * (cols + 1) for _ in range(rows + 1 )]
UpperCAmelCase__ = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
UpperCAmelCase__ = dp_array[row][col + 1]
UpperCAmelCase__ = dp_array[row + 1][col + 1]
UpperCAmelCase__ = dp_array[row + 1][col]
if mat[row][col] == 1:
UpperCAmelCase__ = 1 + min(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = max(dp_array[row][col] , SCREAMING_SNAKE_CASE__ )
else:
UpperCAmelCase__ = 0
return largest_square_area
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list[list[int]] ):
'''simple docstring'''
UpperCAmelCase__ = [0] * (cols + 1)
UpperCAmelCase__ = [0] * (cols + 1)
UpperCAmelCase__ = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
UpperCAmelCase__ = current_row[col + 1]
UpperCAmelCase__ = next_row[col + 1]
UpperCAmelCase__ = next_row[col]
if mat[row][col] == 1:
UpperCAmelCase__ = 1 + min(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = max(current_row[col] , SCREAMING_SNAKE_CASE__ )
else:
UpperCAmelCase__ = 0
UpperCAmelCase__ = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 61 | 1 |
import math
def lowercase_ ( _lowerCamelCase : int):
return math.sqrt(_lowerCamelCase) * math.sqrt(_lowerCamelCase) == num
def lowercase_ ( _lowerCamelCase : int):
lowercase__ : List[Any] = 0
lowercase__ : Tuple = n
while left <= right:
lowercase__ : str = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
lowercase__ : Any = mid - 1
else:
lowercase__ : Union[str, Any] = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 87 |
'''simple docstring'''
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
UpperCamelCase__ : Any = '\\n Text data.\n Second line of data.'
UpperCamelCase__ : List[Any] = 'file'
@pytest.fixture(scope="""session""" )
def UpperCAmelCase ( a_ ) -> Optional[int]:
"""simple docstring"""
A_ : int = tmp_path_factory.mktemp("""data""" ) / (FILE_PATH + """.zstd""")
A_ : int = bytes(a_ , """utf-8""" )
with zstd.open(a_ , """wb""" ) as f:
f.write(a_ )
return path
@pytest.fixture
def UpperCAmelCase ( a_ ) -> Optional[int]:
"""simple docstring"""
with open(os.path.join(tmpfs.local_root_dir , a_ ) , """w""" ) as f:
f.write(a_ )
return FILE_PATH
@pytest.mark.parametrize("""compression_format""" , ["""gzip""", """xz""", """zstd"""] )
def UpperCAmelCase ( a_ , a_ , a_ , a_ , a_ , a_ ) -> Optional[int]:
"""simple docstring"""
A_ : List[str] = {"""gzip""": gz_file, """xz""": xz_file, """zstd""": zstd_path}
A_ : Any = input_paths[compression_format]
A_ : Tuple = tmp_path / """cache"""
A_ : Tuple = DownloadConfig(cache_dir=a_ , extract_compressed_file=a_ )
A_ : Dict = cached_path(a_ , download_config=a_ )
with open(a_ ) as f:
A_ : Optional[Any] = f.read()
with open(a_ ) as f:
A_ : List[str] = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize("""default_extracted""" , [True, False] )
@pytest.mark.parametrize("""default_cache_dir""" , [True, False] )
def UpperCAmelCase ( a_ , a_ , a_ , a_ , a_ ) -> str:
"""simple docstring"""
A_ : Union[str, Any] = """custom_cache"""
A_ : List[str] = """custom_extracted_dir"""
A_ : Optional[Any] = tmp_path / """custom_extracted_path"""
if default_extracted:
A_ : Any = ("""downloads""" if default_cache_dir else custom_cache_dir, """extracted""")
else:
monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_DIR""" , a_ )
monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_PATH""" , str(a_ ) )
A_ : Union[str, Any] = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
A_ : List[Any] = xz_file
A_ : Optional[int] = (
DownloadConfig(extract_compressed_file=a_ )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=a_ )
)
A_ : Union[str, Any] = cached_path(a_ , download_config=a_ )
assert Path(a_ ).parent.parts[-2:] == expected
def UpperCAmelCase ( a_ ) -> Dict:
"""simple docstring"""
A_ : str = str(Path(a_ ).resolve() )
assert cached_path(a_ ) == text_file
# relative path
A_ : List[str] = str(Path(a_ ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(a_ ) == text_file
def UpperCAmelCase ( a_ ) -> int:
"""simple docstring"""
A_ : Optional[Any] = str(tmp_path.resolve() / """__missing_file__.txt""" )
with pytest.raises(a_ ):
cached_path(a_ )
# relative path
A_ : Tuple = """./__missing_file__.txt"""
with pytest.raises(a_ ):
cached_path(a_ )
def UpperCAmelCase ( a_ ) -> Tuple:
"""simple docstring"""
A_ : Any = get_from_cache(F"tmp://{tmpfs_file}" )
with open(a_ ) as f:
A_ : List[str] = f.read()
assert output_file_content == FILE_CONTENT
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , a_ )
def UpperCAmelCase ( ) -> List[str]:
"""simple docstring"""
with pytest.raises(a_ ):
cached_path("""https://huggingface.co""" )
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , a_ )
def UpperCAmelCase ( a_ ) -> Union[str, Any]:
"""simple docstring"""
A_ : List[str] = tmp_path_factory.mktemp("""data""" ) / """file.html"""
with pytest.raises(a_ ):
http_get("""https://huggingface.co""" , temp_file=a_ )
with pytest.raises(a_ ):
http_head("""https://huggingface.co""" )
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , a_ )
def UpperCAmelCase ( a_ ) -> int:
"""simple docstring"""
A_ : List[Any] = tmp_path_factory.mktemp("""data""" ) / """file.html"""
with pytest.raises(a_ ):
ftp_get("""ftp://huggingface.co""" , temp_file=a_ )
with pytest.raises(a_ ):
ftp_head("""ftp://huggingface.co""" )
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , a_ )
def UpperCAmelCase ( a_ ) -> Optional[int]:
"""simple docstring"""
A_ : Optional[int] = tmp_path_factory.mktemp("""data""" ) / """file.html"""
with pytest.raises(a_ ):
fsspec_get("""s3://huggingface.co""" , temp_file=a_ )
with pytest.raises(a_ ):
fsspec_head("""s3://huggingface.co""" )
| 344 | 0 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class UpperCamelCase_ (metaclass=a__ ):
"""simple docstring"""
_lowerCAmelCase = ['onnx']
def __init__( self : List[Any] , *_lowerCamelCase : List[Any] , **_lowerCamelCase : List[str] ):
"""simple docstring"""
requires_backends(self , ['''onnx'''] )
@classmethod
def _a ( cls : str , *_lowerCamelCase : Tuple , **_lowerCamelCase : Tuple ):
"""simple docstring"""
requires_backends(cls , ['''onnx'''] )
@classmethod
def _a ( cls : Optional[Any] , *_lowerCamelCase : str , **_lowerCamelCase : Optional[int] ):
"""simple docstring"""
requires_backends(cls , ['''onnx'''] )
| 368 |
'''simple docstring'''
class UpperCamelCase_ :
"""simple docstring"""
def __init__( self : Optional[Any] , _lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
A_ : Union[str, Any] = val
A_ : Tuple = None
A_ : Any = None
def _a ( self : Tuple , _lowerCamelCase : List[Any] ):
"""simple docstring"""
if self.val:
if val < self.val:
if self.left is None:
A_ : int = Node(_lowerCamelCase )
else:
self.left.insert(_lowerCamelCase )
elif val > self.val:
if self.right is None:
A_ : List[str] = Node(_lowerCamelCase )
else:
self.right.insert(_lowerCamelCase )
else:
A_ : Any = val
def snake_case__ ( lowerCamelCase__ : Any , lowerCamelCase__ : Optional[int] ) -> str:
# Recursive traversal
if root:
inorder(root.left , lowerCamelCase__ )
res.append(root.val )
inorder(root.right , lowerCamelCase__ )
def snake_case__ ( lowerCamelCase__ : Optional[int] ) -> Tuple:
# Build BST
if len(lowerCamelCase__ ) == 0:
return arr
A_ : Dict = Node(arr[0] )
for i in range(1 , len(lowerCamelCase__ ) ):
root.insert(arr[i] )
# Traverse BST in order.
A_ : Tuple = []
inorder(lowerCamelCase__ , lowerCamelCase__ )
return res
if __name__ == "__main__":
print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
| 4 | 0 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = 42
class __magic_name__ ( UpperCAmelCase__ , UpperCAmelCase__ ):
'''simple docstring'''
@register_to_config
def __init__( self , _a = 65_536 , _a = None , _a = 2 , _a = 2 , _a = 0 , _a = "fourier" , _a = True , _a = False , _a = 0.0 , _a = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , _a = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , _a = "UNetMidBlock1D" , _a = None , _a = (32, 32, 64) , _a = None , _a = 8 , _a = 1 , _a = False , ):
"""simple docstring"""
super().__init__()
lowerCamelCase = sample_size
# time
if time_embedding_type == "fourier":
lowerCamelCase = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=_a , log=_a , flip_sin_to_cos=_a )
lowerCamelCase = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
lowerCamelCase = Timesteps(
block_out_channels[0] , flip_sin_to_cos=_a , downscale_freq_shift=_a )
lowerCamelCase = block_out_channels[0]
if use_timestep_embedding:
lowerCamelCase = block_out_channels[0] * 4
lowerCamelCase = TimestepEmbedding(
in_channels=_a , time_embed_dim=_a , act_fn=_a , out_dim=block_out_channels[0] , )
lowerCamelCase = nn.ModuleList([] )
lowerCamelCase = None
lowerCamelCase = nn.ModuleList([] )
lowerCamelCase = None
# down
lowerCamelCase = in_channels
for i, down_block_type in enumerate(_a ):
lowerCamelCase = output_channel
lowerCamelCase = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
lowerCamelCase = i == len(_a ) - 1
lowerCamelCase = get_down_block(
_a , num_layers=_a , in_channels=_a , out_channels=_a , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(_a )
# mid
lowerCamelCase = get_mid_block(
_a , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=_a , add_downsample=_a , )
# up
lowerCamelCase = list(reversed(_a ) )
lowerCamelCase = reversed_block_out_channels[0]
if out_block_type is None:
lowerCamelCase = out_channels
else:
lowerCamelCase = block_out_channels[0]
for i, up_block_type in enumerate(_a ):
lowerCamelCase = output_channel
lowerCamelCase = (
reversed_block_out_channels[i + 1] if i < len(_a ) - 1 else final_upsample_channels
)
lowerCamelCase = i == len(_a ) - 1
lowerCamelCase = get_up_block(
_a , num_layers=_a , in_channels=_a , out_channels=_a , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(_a )
lowerCamelCase = output_channel
# out
lowerCamelCase = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32 )
lowerCamelCase = get_out_block(
out_block_type=_a , num_groups_out=_a , embed_dim=block_out_channels[0] , out_channels=_a , act_fn=_a , fc_dim=block_out_channels[-1] // 4 , )
def _lowerCAmelCase ( self , _a , _a , _a = True , ):
"""simple docstring"""
lowerCamelCase = timestep
if not torch.is_tensor(_a ):
lowerCamelCase = torch.tensor([timesteps] , dtype=torch.long , device=sample.device )
elif torch.is_tensor(_a ) and len(timesteps.shape ) == 0:
lowerCamelCase = timesteps[None].to(sample.device )
lowerCamelCase = self.time_proj(_a )
if self.config.use_timestep_embedding:
lowerCamelCase = self.time_mlp(_a )
else:
lowerCamelCase = timestep_embed[..., None]
lowerCamelCase = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
lowerCamelCase = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
lowerCamelCase = ()
for downsample_block in self.down_blocks:
lowerCamelCase , lowerCamelCase = downsample_block(hidden_states=_a , temb=_a )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
lowerCamelCase = self.mid_block(_a , _a )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
lowerCamelCase = down_block_res_samples[-1:]
lowerCamelCase = down_block_res_samples[:-1]
lowerCamelCase = upsample_block(_a , res_hidden_states_tuple=_a , temb=_a )
# 5. post-process
if self.out_block:
lowerCamelCase = self.out_block(_a , _a )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=_a )
| 291 |
"""simple docstring"""
def a__ ( snake_case__ , snake_case__ = False ) -> str:
if not isinstance(snake_case__ , snake_case__ ):
lowerCamelCase = F'Expected string as input, found {type(snake_case__ )}'
raise ValueError(snake_case__ )
if not isinstance(snake_case__ , snake_case__ ):
lowerCamelCase = F'Expected boolean as use_pascal parameter, found {type(snake_case__ )}'
raise ValueError(snake_case__ )
lowerCamelCase = input_str.split("""_""" )
lowerCamelCase = 0 if use_pascal else 1
lowerCamelCase = words[start_index:]
lowerCamelCase = [word[0].upper() + word[1:] for word in words_to_capitalize]
lowerCamelCase = """""" if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 291 | 1 |
UpperCAmelCase_ = [
(1000, 'M'),
(900, 'CM'),
(500, 'D'),
(400, 'CD'),
(100, 'C'),
(90, 'XC'),
(50, 'L'),
(40, 'XL'),
(10, 'X'),
(9, 'IX'),
(5, 'V'),
(4, 'IV'),
(1, 'I'),
]
def lowerCAmelCase_ ( __UpperCAmelCase: str ) -> int:
UpperCamelCase__ : Dict = {'''I''': 1, '''V''': 5, '''X''': 10, '''L''': 50, '''C''': 100, '''D''': 500, '''M''': 1000}
UpperCamelCase__ : str = 0
UpperCamelCase__ : int = 0
while place < len(__UpperCAmelCase ):
if (place + 1 < len(__UpperCAmelCase )) and (vals[roman[place]] < vals[roman[place + 1]]):
total += vals[roman[place + 1]] - vals[roman[place]]
place += 2
else:
total += vals[roman[place]]
place += 1
return total
def lowerCAmelCase_ ( __UpperCAmelCase: int ) -> str:
UpperCamelCase__ : Dict = []
for arabic, roman in ROMAN:
((UpperCamelCase__) ,(UpperCamelCase__)) : Optional[int] = divmod(__UpperCAmelCase , __UpperCAmelCase )
result.append(roman * factor )
if number == 0:
break
return "".join(__UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 247 |
from manim import *
class lowercase__ ( __lowerCamelCase ):
'''simple docstring'''
def UpperCamelCase__ ( self ) -> int:
"""simple docstring"""
UpperCamelCase__ : int = Rectangle(height=0.5, width=0.5 )
UpperCamelCase__ : Optional[int] = Rectangle(height=0.46, width=0.46 ).set_stroke(width=0 )
UpperCamelCase__ : Dict = [mem.copy() for i in range(6 )]
UpperCamelCase__ : Any = [mem.copy() for i in range(6 )]
UpperCamelCase__ : int = VGroup(*__magic_name__ ).arrange(__magic_name__, buff=0 )
UpperCamelCase__ : Tuple = VGroup(*__magic_name__ ).arrange(__magic_name__, buff=0 )
UpperCamelCase__ : int = VGroup(__magic_name__, __magic_name__ ).arrange(__magic_name__, buff=0 )
UpperCamelCase__ : Optional[int] = Text('''CPU''', font_size=24 )
UpperCamelCase__ : Any = Group(__magic_name__, __magic_name__ ).arrange(__magic_name__, buff=0.5, aligned_edge=__magic_name__ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__magic_name__ )
UpperCamelCase__ : Any = [mem.copy() for i in range(1 )]
UpperCamelCase__ : Optional[int] = VGroup(*__magic_name__ ).arrange(__magic_name__, buff=0 )
UpperCamelCase__ : Union[str, Any] = Text('''GPU''', font_size=24 )
UpperCamelCase__ : List[Any] = Group(__magic_name__, __magic_name__ ).arrange(__magic_name__, buff=0.5, aligned_edge=__magic_name__ )
gpu.align_to(__magic_name__, __magic_name__ )
gpu.set_x(gpu.get_x() - 1 )
self.add(__magic_name__ )
UpperCamelCase__ : str = [mem.copy() for i in range(6 )]
UpperCamelCase__ : Optional[int] = VGroup(*__magic_name__ ).arrange(__magic_name__, buff=0 )
UpperCamelCase__ : Optional[int] = Text('''Model''', font_size=24 )
UpperCamelCase__ : int = Group(__magic_name__, __magic_name__ ).arrange(__magic_name__, buff=0.5, aligned_edge=__magic_name__ )
model.move_to([3, -1.0, 0] )
self.play(
Create(__magic_name__, run_time=1 ), Create(__magic_name__, run_time=1 ), Create(__magic_name__, run_time=1 ), )
UpperCamelCase__ : Optional[int] = MarkupText(
f"First, an empty model skeleton is loaded\ninto <span fgcolor='{YELLOW}'>memory</span> without using much RAM.", font_size=24, )
UpperCamelCase__ : List[str] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
UpperCamelCase__ : Union[str, Any] = MarkupText(
f"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model", font_size=18, )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(__magic_name__, run_time=2.5 ), Write(__magic_name__ ), Write(__magic_name__ ) )
self.add(__magic_name__ )
UpperCamelCase__ : Dict = []
UpperCamelCase__ : Any = []
UpperCamelCase__ : int = []
for i, rect in enumerate(__magic_name__ ):
UpperCamelCase__ : Union[str, Any] = Rectangle(height=0.46, width=0.46 ).set_stroke(width=0.0 ).set_fill(__magic_name__, opacity=0.7 )
cpu_target.move_to(__magic_name__ )
cpu_target.generate_target()
UpperCamelCase__ : Tuple = 0.46 / 4
UpperCamelCase__ : Optional[Any] = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ), buff=0.02, direction=__magic_name__ )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target, direction=__magic_name__, buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target, direction=__magic_name__, buff=0.0 )
cpu_targs.append(__magic_name__ )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(__magic_name__ ) )
second_animations.append(MoveToTarget(__magic_name__, run_time=1.5 ) )
self.play(*__magic_name__ )
self.play(*__magic_name__ )
self.wait()
| 247 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class lowerCAmelCase ( unittest.TestCase ):
def A_ ( self : List[str] ) -> int:
lowerCamelCase__ : Union[str, Any] = {
'task_specific_params': {
'summarization': {'length_penalty': 1.0, 'max_length': 128, 'min_length': 12, 'num_beams': 4},
'summarization_cnn': {'length_penalty': 2.0, 'max_length': 142, 'min_length': 56, 'num_beams': 4},
'summarization_xsum': {'length_penalty': 1.0, 'max_length': 62, 'min_length': 11, 'num_beams': 6},
}
}
lowerCamelCase__ : Tuple = {
'task_specific_params.summarization.length_penalty': 1.0,
'task_specific_params.summarization.max_length': 128,
'task_specific_params.summarization.min_length': 12,
'task_specific_params.summarization.num_beams': 4,
'task_specific_params.summarization_cnn.length_penalty': 2.0,
'task_specific_params.summarization_cnn.max_length': 142,
'task_specific_params.summarization_cnn.min_length': 56,
'task_specific_params.summarization_cnn.num_beams': 4,
'task_specific_params.summarization_xsum.length_penalty': 1.0,
'task_specific_params.summarization_xsum.max_length': 62,
'task_specific_params.summarization_xsum.min_length': 11,
'task_specific_params.summarization_xsum.num_beams': 6,
}
self.assertEqual(flatten_dict(UpperCAmelCase ) , UpperCAmelCase )
def A_ ( self : List[str] ) -> Optional[int]:
lowerCamelCase__ : Union[str, Any] = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(UpperCAmelCase ) , x.transpose() ) )
lowerCamelCase__ : str = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(UpperCAmelCase , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def A_ ( self : Any ) -> int:
lowerCamelCase__ : List[Any] = np.random.randn(3 , 4 )
lowerCamelCase__ : Dict = torch.tensor(UpperCAmelCase )
self.assertTrue(np.allclose(transpose(UpperCAmelCase ) , transpose(UpperCAmelCase ).numpy() ) )
lowerCamelCase__ : Union[str, Any] = np.random.randn(3 , 4 , 5 )
lowerCamelCase__ : str = torch.tensor(UpperCAmelCase )
self.assertTrue(np.allclose(transpose(UpperCAmelCase , axes=(1, 2, 0) ) , transpose(UpperCAmelCase , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def A_ ( self : int ) -> Union[str, Any]:
lowerCamelCase__ : int = np.random.randn(3 , 4 )
lowerCamelCase__ : Any = tf.constant(UpperCAmelCase )
self.assertTrue(np.allclose(transpose(UpperCAmelCase ) , transpose(UpperCAmelCase ).numpy() ) )
lowerCamelCase__ : Any = np.random.randn(3 , 4 , 5 )
lowerCamelCase__ : List[str] = tf.constant(UpperCAmelCase )
self.assertTrue(np.allclose(transpose(UpperCAmelCase , axes=(1, 2, 0) ) , transpose(UpperCAmelCase , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def A_ ( self : Any ) -> Optional[Any]:
lowerCamelCase__ : List[str] = np.random.randn(3 , 4 )
lowerCamelCase__ : Optional[int] = jnp.array(UpperCAmelCase )
self.assertTrue(np.allclose(transpose(UpperCAmelCase ) , np.asarray(transpose(UpperCAmelCase ) ) ) )
lowerCamelCase__ : Any = np.random.randn(3 , 4 , 5 )
lowerCamelCase__ : List[str] = jnp.array(UpperCAmelCase )
self.assertTrue(np.allclose(transpose(UpperCAmelCase , axes=(1, 2, 0) ) , np.asarray(transpose(UpperCAmelCase , axes=(1, 2, 0) ) ) ) )
def A_ ( self : List[str] ) -> Dict:
lowerCamelCase__ : Tuple = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(UpperCAmelCase , (4, 3) ) , np.reshape(UpperCAmelCase , (4, 3) ) ) )
lowerCamelCase__ : List[Any] = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(UpperCAmelCase , (12, 5) ) , np.reshape(UpperCAmelCase , (12, 5) ) ) )
@require_torch
def A_ ( self : List[Any] ) -> int:
lowerCamelCase__ : Dict = np.random.randn(3 , 4 )
lowerCamelCase__ : Dict = torch.tensor(UpperCAmelCase )
self.assertTrue(np.allclose(reshape(UpperCAmelCase , (4, 3) ) , reshape(UpperCAmelCase , (4, 3) ).numpy() ) )
lowerCamelCase__ : Union[str, Any] = np.random.randn(3 , 4 , 5 )
lowerCamelCase__ : Union[str, Any] = torch.tensor(UpperCAmelCase )
self.assertTrue(np.allclose(reshape(UpperCAmelCase , (12, 5) ) , reshape(UpperCAmelCase , (12, 5) ).numpy() ) )
@require_tf
def A_ ( self : Tuple ) -> List[Any]:
lowerCamelCase__ : int = np.random.randn(3 , 4 )
lowerCamelCase__ : Dict = tf.constant(UpperCAmelCase )
self.assertTrue(np.allclose(reshape(UpperCAmelCase , (4, 3) ) , reshape(UpperCAmelCase , (4, 3) ).numpy() ) )
lowerCamelCase__ : List[str] = np.random.randn(3 , 4 , 5 )
lowerCamelCase__ : Dict = tf.constant(UpperCAmelCase )
self.assertTrue(np.allclose(reshape(UpperCAmelCase , (12, 5) ) , reshape(UpperCAmelCase , (12, 5) ).numpy() ) )
@require_flax
def A_ ( self : Optional[int] ) -> str:
lowerCamelCase__ : Optional[Any] = np.random.randn(3 , 4 )
lowerCamelCase__ : int = jnp.array(UpperCAmelCase )
self.assertTrue(np.allclose(reshape(UpperCAmelCase , (4, 3) ) , np.asarray(reshape(UpperCAmelCase , (4, 3) ) ) ) )
lowerCamelCase__ : str = np.random.randn(3 , 4 , 5 )
lowerCamelCase__ : Any = jnp.array(UpperCAmelCase )
self.assertTrue(np.allclose(reshape(UpperCAmelCase , (12, 5) ) , np.asarray(reshape(UpperCAmelCase , (12, 5) ) ) ) )
def A_ ( self : str ) -> List[str]:
lowerCamelCase__ : Union[str, Any] = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(UpperCAmelCase ) , np.squeeze(UpperCAmelCase ) ) )
lowerCamelCase__ : Any = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(UpperCAmelCase , axis=2 ) , np.squeeze(UpperCAmelCase , axis=2 ) ) )
@require_torch
def A_ ( self : List[str] ) -> List[str]:
lowerCamelCase__ : Tuple = np.random.randn(1 , 3 , 4 )
lowerCamelCase__ : Union[str, Any] = torch.tensor(UpperCAmelCase )
self.assertTrue(np.allclose(squeeze(UpperCAmelCase ) , squeeze(UpperCAmelCase ).numpy() ) )
lowerCamelCase__ : str = np.random.randn(1 , 4 , 1 , 5 )
lowerCamelCase__ : Optional[Any] = torch.tensor(UpperCAmelCase )
self.assertTrue(np.allclose(squeeze(UpperCAmelCase , axis=2 ) , squeeze(UpperCAmelCase , axis=2 ).numpy() ) )
@require_tf
def A_ ( self : Optional[int] ) -> List[str]:
lowerCamelCase__ : str = np.random.randn(1 , 3 , 4 )
lowerCamelCase__ : Optional[int] = tf.constant(UpperCAmelCase )
self.assertTrue(np.allclose(squeeze(UpperCAmelCase ) , squeeze(UpperCAmelCase ).numpy() ) )
lowerCamelCase__ : List[str] = np.random.randn(1 , 4 , 1 , 5 )
lowerCamelCase__ : Optional[Any] = tf.constant(UpperCAmelCase )
self.assertTrue(np.allclose(squeeze(UpperCAmelCase , axis=2 ) , squeeze(UpperCAmelCase , axis=2 ).numpy() ) )
@require_flax
def A_ ( self : Optional[Any] ) -> List[Any]:
lowerCamelCase__ : List[str] = np.random.randn(1 , 3 , 4 )
lowerCamelCase__ : Optional[Any] = jnp.array(UpperCAmelCase )
self.assertTrue(np.allclose(squeeze(UpperCAmelCase ) , np.asarray(squeeze(UpperCAmelCase ) ) ) )
lowerCamelCase__ : Optional[Any] = np.random.randn(1 , 4 , 1 , 5 )
lowerCamelCase__ : Tuple = jnp.array(UpperCAmelCase )
self.assertTrue(np.allclose(squeeze(UpperCAmelCase , axis=2 ) , np.asarray(squeeze(UpperCAmelCase , axis=2 ) ) ) )
def A_ ( self : Dict ) -> Optional[Any]:
lowerCamelCase__ : Any = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(UpperCAmelCase , axis=1 ) , np.expand_dims(UpperCAmelCase , axis=1 ) ) )
@require_torch
def A_ ( self : Optional[Any] ) -> List[Any]:
lowerCamelCase__ : Any = np.random.randn(3 , 4 )
lowerCamelCase__ : Optional[int] = torch.tensor(UpperCAmelCase )
self.assertTrue(np.allclose(expand_dims(UpperCAmelCase , axis=1 ) , expand_dims(UpperCAmelCase , axis=1 ).numpy() ) )
@require_tf
def A_ ( self : int ) -> Optional[int]:
lowerCamelCase__ : Optional[int] = np.random.randn(3 , 4 )
lowerCamelCase__ : Any = tf.constant(UpperCAmelCase )
self.assertTrue(np.allclose(expand_dims(UpperCAmelCase , axis=1 ) , expand_dims(UpperCAmelCase , axis=1 ).numpy() ) )
@require_flax
def A_ ( self : Any ) -> Optional[Any]:
lowerCamelCase__ : Optional[Any] = np.random.randn(3 , 4 )
lowerCamelCase__ : Optional[Any] = jnp.array(UpperCAmelCase )
self.assertTrue(np.allclose(expand_dims(UpperCAmelCase , axis=1 ) , np.asarray(expand_dims(UpperCAmelCase , axis=1 ) ) ) )
| 50 |
"""simple docstring"""
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
_UpperCAmelCase = get_tests_dir("""fixtures/test_sentencepiece.model""")
_UpperCAmelCase = {"""target_lang""": """fi""", """source_lang""": """en"""}
_UpperCAmelCase = """>>zh<<"""
_UpperCAmelCase = """Helsinki-NLP/"""
if is_torch_available():
_UpperCAmelCase = """pt"""
elif is_tf_available():
_UpperCAmelCase = """tf"""
else:
_UpperCAmelCase = """jax"""
@require_sentencepiece
class a ( UpperCAmelCase__ , unittest.TestCase ):
UpperCamelCase : Any = MarianTokenizer
UpperCamelCase : List[Any] = False
UpperCamelCase : Optional[Any] = True
def lowerCamelCase__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
super().setUp()
SCREAMING_SNAKE_CASE_: str =["""</s>""", """<unk>""", """▁This""", """▁is""", """▁a""", """▁t""", """est""", """\u0120""", """<pad>"""]
SCREAMING_SNAKE_CASE_: List[Any] =dict(zip(lowerCAmelCase , range(len(lowerCAmelCase ) ) ) )
SCREAMING_SNAKE_CASE_: Optional[int] =Path(self.tmpdirname )
save_json(lowerCAmelCase , save_dir / VOCAB_FILES_NAMES["""vocab"""] )
save_json(lowerCAmelCase , save_dir / VOCAB_FILES_NAMES["""tokenizer_config_file"""] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(lowerCAmelCase , save_dir / VOCAB_FILES_NAMES["""source_spm"""] )
copyfile(lowerCAmelCase , save_dir / VOCAB_FILES_NAMES["""target_spm"""] )
SCREAMING_SNAKE_CASE_: str =MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase__ ( self : str , **lowerCAmelCase : Any ) -> MarianTokenizer:
'''simple docstring'''
return MarianTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase )
def lowerCamelCase__ ( self : Union[str, Any] , lowerCAmelCase : List[str] ) -> int:
'''simple docstring'''
return (
"This is a test",
"This is a test",
)
def lowerCamelCase__ ( self : Any ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[str] ="""</s>"""
SCREAMING_SNAKE_CASE_: List[str] =0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase ) , lowerCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase ) , lowerCAmelCase )
def lowerCamelCase__ ( self : str ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """</s>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """<pad>""" )
self.assertEqual(len(lowerCAmelCase ) , 9 )
def lowerCamelCase__ ( self : Dict ) -> Tuple:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def lowerCamelCase__ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[Any] =MarianTokenizer.from_pretrained(f'''{ORG_NAME}opus-mt-en-de''' )
SCREAMING_SNAKE_CASE_: List[Any] =en_de_tokenizer(["""I am a small frog"""] , return_tensors=lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE_: int =[38, 121, 14, 697, 3_8848, 0]
self.assertListEqual(lowerCAmelCase , batch.input_ids[0] )
SCREAMING_SNAKE_CASE_: Optional[int] =tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =[x.name for x in Path(lowerCAmelCase ).glob("""*""" )]
self.assertIn("""source.spm""" , lowerCAmelCase )
MarianTokenizer.from_pretrained(lowerCAmelCase )
def lowerCamelCase__ ( self : int ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =self.get_tokenizer()
SCREAMING_SNAKE_CASE_: str =tok(
["""I am a small frog""" * 1000, """I am a small frog"""] , padding=lowerCAmelCase , truncation=lowerCAmelCase , return_tensors=lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase , lowerCAmelCase )
self.assertEqual(batch.input_ids.shape , (2, 512) )
def lowerCamelCase__ ( self : Optional[int] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =self.get_tokenizer()
SCREAMING_SNAKE_CASE_: int =tok(["""I am a tiny frog""", """I am a small frog"""] , padding=lowerCAmelCase , return_tensors=lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase , lowerCAmelCase )
self.assertEqual(batch_smaller.input_ids.shape , (2, 10) )
@slow
def lowerCamelCase__ ( self : str ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple ={"""input_ids""": [[4_3495, 462, 20, 4_2164, 1369, 52, 464, 132, 1703, 492, 13, 7491, 3_8999, 6, 8, 464, 132, 1703, 492, 13, 4669, 3_7867, 13, 7525, 27, 1593, 988, 13, 3_3972, 7029, 6, 20, 8251, 383, 2, 270, 5866, 3788, 2, 2353, 8251, 1_2338, 2, 1_3958, 387, 2, 3629, 6953, 188, 2900, 2, 1_3958, 8011, 1_1501, 23, 8460, 4073, 3_4009, 20, 435, 1_1439, 27, 8, 8460, 4073, 6004, 20, 9988, 375, 27, 33, 266, 1945, 1076, 1350, 3_7867, 3288, 5, 577, 1076, 4374, 8, 5082, 5, 2_6453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 1_0767, 6, 316, 304, 4239, 3, 0], [148, 1_5722, 19, 1839, 12, 1350, 13, 2_2327, 5082, 5418, 4_7567, 3_5938, 59, 318, 1_9552, 108, 2183, 54, 1_4976, 4835, 32, 547, 1114, 8, 315, 2417, 5, 92, 1_9088, 3, 0, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100], [36, 6395, 1_2570, 3_9147, 1_1597, 6, 266, 4, 4_5405, 7296, 3, 0, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase , model_name="""Helsinki-NLP/opus-mt-en-de""" , revision="""1a8c2263da11e68e50938f97e10cd57820bd504c""" , decode_kwargs={"""use_source_tokenizer""": True} , )
def lowerCamelCase__ ( self : int ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[str] =MarianTokenizer.from_pretrained("""hf-internal-testing/test-marian-two-vocabs""" )
SCREAMING_SNAKE_CASE_: Optional[int] ="""Tämä on testi"""
SCREAMING_SNAKE_CASE_: Union[str, Any] ="""This is a test"""
SCREAMING_SNAKE_CASE_: List[Any] =[76, 7, 2047, 2]
SCREAMING_SNAKE_CASE_: Any =[69, 12, 11, 940, 2]
SCREAMING_SNAKE_CASE_: Optional[int] =tokenizer(lowerCAmelCase ).input_ids
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE_: str =tokenizer(text_target=lowerCAmelCase ).input_ids
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE_: str =tokenizer.decode(lowerCAmelCase , skip_special_tokens=lowerCAmelCase )
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
| 173 | 0 |
A_ : Tuple = 2_56
# Modulus to hash a string
A_ : str = 1_00_00_03
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : List[str] = len(_lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = len(_lowerCamelCase )
if p_len > t_len:
return False
lowerCamelCase__ : Tuple = 0
lowerCamelCase__ : Dict = 0
lowerCamelCase__ : Union[str, Any] = 1
# Calculating the hash of pattern and substring of text
for i in range(_lowerCamelCase ):
lowerCamelCase__ : Optional[int] = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
lowerCamelCase__ : Dict = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
lowerCamelCase__ : List[Any] = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
lowerCamelCase__ : Optional[int] = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def lowerCamelCase_ ( ):
lowerCamelCase__ : List[Any] = 'abc1abc12'
lowerCamelCase__ : Tuple = 'alskfjaldsabc1abc1abc12k23adsfabcabc'
lowerCamelCase__ : Union[str, Any] = 'alskfjaldsk23adsfabcabc'
assert rabin_karp(_lowerCamelCase , _lowerCamelCase ) and not rabin_karp(_lowerCamelCase , _lowerCamelCase )
# Test 2)
lowerCamelCase__ : List[Any] = 'ABABX'
lowerCamelCase__ : Optional[int] = 'ABABZABABYABABX'
assert rabin_karp(_lowerCamelCase , _lowerCamelCase )
# Test 3)
lowerCamelCase__ : Any = 'AAAB'
lowerCamelCase__ : str = 'ABAAAAAB'
assert rabin_karp(_lowerCamelCase , _lowerCamelCase )
# Test 4)
lowerCamelCase__ : List[Any] = 'abcdabcy'
lowerCamelCase__ : List[Any] = 'abcxabcdabxabcdabcdabcy'
assert rabin_karp(_lowerCamelCase , _lowerCamelCase )
# Test 5)
lowerCamelCase__ : List[Any] = 'Lü'
lowerCamelCase__ : Union[str, Any] = 'Lüsai'
assert rabin_karp(_lowerCamelCase , _lowerCamelCase )
lowerCamelCase__ : List[Any] = 'Lue'
assert not rabin_karp(_lowerCamelCase , _lowerCamelCase )
print('Success.' )
if __name__ == "__main__":
test_rabin_karp()
| 357 |
"""simple docstring"""
def lowerCamelCase_ ( ):
lowerCamelCase__ : Optional[Any] = []
lowerCamelCase__ : List[Any] = 1
while len(_lowerCamelCase ) < 1e6:
constant.append(str(_lowerCamelCase ) )
i += 1
lowerCamelCase__ : str = ''.join(_lowerCamelCase )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[9_9999] )
* int(constant[99_9999] )
)
if __name__ == "__main__":
print(solution())
| 316 | 0 |
"""simple docstring"""
__snake_case = range(2, 20 + 1)
__snake_case = [10**k for k in range(ks[-1] + 1)]
__snake_case = {}
def A_ ( _lowerCAmelCase : List[str], _lowerCAmelCase : Optional[Any], _lowerCAmelCase : Optional[int], _lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
_a = sum(a_i[j] for j in range(_lowerCAmelCase, len(_lowerCAmelCase ) ) )
_a = sum(a_i[j] * base[j] for j in range(min(len(_lowerCAmelCase ), _lowerCAmelCase ) ) )
_a , _a = 0, 0
_a = n - i
_a = memo.get(_lowerCAmelCase )
if sub_memo is not None:
_a = sub_memo.get(_lowerCAmelCase )
if jumps is not None and len(_lowerCAmelCase ) > 0:
# find and make the largest jump without going over
_a = -1
for _k in range(len(_lowerCAmelCase ) - 1, -1, -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
_a = _k
break
if max_jump >= 0:
_a , _a , _a = jumps[max_jump]
# since the difference between jumps is cached, add c
_a = diff + c
for j in range(min(_lowerCAmelCase, len(_lowerCAmelCase ) ) ):
_a , _a = divmod(_lowerCAmelCase, 10 )
if new_c > 0:
add(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase )
else:
_a = []
else:
_a = {c: []}
_a = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
_a , _a = next_term(_lowerCAmelCase, k - 1, i + dn, _lowerCAmelCase )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
_a , _a = compute(_lowerCAmelCase, _lowerCAmelCase, i + dn, _lowerCAmelCase )
diff += _diff
dn += terms_jumped
_a = sub_memo[c]
# keep jumps sorted by # of terms skipped
_a = 0
while j < len(_lowerCAmelCase ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(_lowerCAmelCase, (diff, dn, k) )
return (diff, dn)
def A_ ( _lowerCAmelCase : Dict, _lowerCAmelCase : Any, _lowerCAmelCase : int, _lowerCAmelCase : List[str] ):
"""simple docstring"""
if i >= n:
return 0, i
if k > len(_lowerCAmelCase ):
a_i.extend([0 for _ in range(k - len(_lowerCAmelCase ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
_a = i
_a , _a , _a = 0, 0, 0
for j in range(len(_lowerCAmelCase ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
_a = ds_c + ds_b
diff += addend
_a = 0
for j in range(_lowerCAmelCase ):
_a = a_i[j] + addend
_a , _a = divmod(_lowerCAmelCase, 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase )
return diff, i - start_i
def A_ ( _lowerCAmelCase : Any, _lowerCAmelCase : Optional[Any], _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
for j in range(_lowerCAmelCase, len(_lowerCAmelCase ) ):
_a = digits[j] + addend
if s >= 10:
_a , _a = divmod(_lowerCAmelCase, 10 )
_a = addend // 10 + quotient
else:
_a = s
_a = addend // 10
if addend == 0:
break
while addend > 0:
_a , _a = divmod(_lowerCAmelCase, 10 )
digits.append(_lowerCAmelCase )
def A_ ( _lowerCAmelCase : int = 10**15 ):
"""simple docstring"""
_a = [1]
_a = 1
_a = 0
while True:
_a , _a = next_term(_lowerCAmelCase, 20, i + dn, _lowerCAmelCase )
dn += terms_jumped
if dn == n - i:
break
_a = 0
for j in range(len(_lowerCAmelCase ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(f'{solution() = }') | 320 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''microsoft/unispeech-large-1500h-cv''': (
'''https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json'''
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class __lowerCamelCase ( a__ ):
'''simple docstring'''
A_ : Dict = 'unispeech'
def __init__( self , __UpperCAmelCase=32 , __UpperCAmelCase=768 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=3072 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1e-5 , __UpperCAmelCase="group" , __UpperCAmelCase="gelu" , __UpperCAmelCase=(512, 512, 512, 512, 512, 512, 512) , __UpperCAmelCase=(5, 2, 2, 2, 2, 2, 2) , __UpperCAmelCase=(10, 3, 3, 3, 3, 2, 2) , __UpperCAmelCase=False , __UpperCAmelCase=128 , __UpperCAmelCase=16 , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=0.05 , __UpperCAmelCase=10 , __UpperCAmelCase=2 , __UpperCAmelCase=0.0 , __UpperCAmelCase=10 , __UpperCAmelCase=0 , __UpperCAmelCase=320 , __UpperCAmelCase=2 , __UpperCAmelCase=0.1 , __UpperCAmelCase=100 , __UpperCAmelCase=256 , __UpperCAmelCase=256 , __UpperCAmelCase=0.1 , __UpperCAmelCase="mean" , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=256 , __UpperCAmelCase=80 , __UpperCAmelCase=0 , __UpperCAmelCase=1 , __UpperCAmelCase=2 , __UpperCAmelCase=0.5 , **__UpperCAmelCase , ) -> Union[str, Any]:
super().__init__(**__UpperCAmelCase , pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase )
_a = hidden_size
_a = feat_extract_norm
_a = feat_extract_activation
_a = list(__UpperCAmelCase )
_a = list(__UpperCAmelCase )
_a = list(__UpperCAmelCase )
_a = conv_bias
_a = num_conv_pos_embeddings
_a = num_conv_pos_embedding_groups
_a = len(self.conv_dim )
_a = num_hidden_layers
_a = intermediate_size
_a = hidden_act
_a = num_attention_heads
_a = hidden_dropout
_a = attention_dropout
_a = activation_dropout
_a = feat_proj_dropout
_a = final_dropout
_a = layerdrop
_a = layer_norm_eps
_a = initializer_range
_a = num_ctc_classes
_a = vocab_size
_a = do_stable_layer_norm
_a = use_weighted_layer_sum
_a = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
F' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'
F' `len(config.conv_kernel) = {len(self.conv_kernel )}`.' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_a = apply_spec_augment
_a = mask_time_prob
_a = mask_time_length
_a = mask_time_min_masks
_a = mask_feature_prob
_a = mask_feature_length
_a = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
_a = num_codevectors_per_group
_a = num_codevector_groups
_a = contrastive_logits_temperature
_a = feat_quantizer_dropout
_a = num_negatives
_a = codevector_dim
_a = proj_codevector_dim
_a = diversity_loss_weight
# ctc loss
_a = ctc_loss_reduction
_a = ctc_zero_infinity
# pretraining loss
_a = replace_prob
@property
def _UpperCAmelCase ( self ) -> Optional[int]:
return functools.reduce(operator.mul , self.conv_stride , 1 ) | 320 | 1 |
'''simple docstring'''
lowerCAmelCase : List[Any] = {str(digit): digit**5 for digit in range(10)}
def A_( A : int):
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(A))
def A_( ):
return sum(
number
for number in range(1000 , 100_0000)
if number == digits_fifth_powers_sum(A))
if __name__ == "__main__":
print(solution())
| 251 |
'''simple docstring'''
from __future__ import annotations
from statistics import mean
def A_( A : list[int] , A : list[int] , A : int):
UpperCamelCase = [0] * no_of_processes
UpperCamelCase = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(A):
UpperCamelCase = burst_time[i]
UpperCamelCase = []
UpperCamelCase = 0
UpperCamelCase = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
UpperCamelCase = []
UpperCamelCase = -1
for i in range(A):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(A)
if len(A) > 0:
UpperCamelCase = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
UpperCamelCase = i
total_time += burst_time[target_process]
completed += 1
UpperCamelCase = 0
UpperCamelCase = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def A_( A : list[int] , A : int , A : list[int]):
UpperCamelCase = [0] * no_of_processes
for i in range(A):
UpperCamelCase = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print('[TEST CASE 01]')
lowerCAmelCase : int = 4
lowerCAmelCase : Any = [2, 5, 3, 7]
lowerCAmelCase : int = [0, 0, 0, 0]
lowerCAmelCase : Dict = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
lowerCAmelCase : Optional[Any] = calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print('PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time')
for i, process_id in enumerate(list(range(1, 5))):
print(
f"""{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t"""
f"""{waiting_time[i]}\t\t\t\t{turn_around_time[i]}"""
)
print(f"""\nAverage waiting time = {mean(waiting_time):.5f}""")
print(f"""Average turnaround time = {mean(turn_around_time):.5f}""")
| 251 | 1 |
'''simple docstring'''
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Union[str, Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : int=1_3 , _lowerCAmelCase : Optional[Any]=3_0 , _lowerCAmelCase : Tuple=2 , _lowerCAmelCase : Union[str, Any]=3 , _lowerCAmelCase : Any=True , _lowerCAmelCase : Any=True , _lowerCAmelCase : Any=3_2 , _lowerCAmelCase : List[str]=5 , _lowerCAmelCase : Optional[Any]=4 , _lowerCAmelCase : Any=3_7 , _lowerCAmelCase : str="gelu" , _lowerCAmelCase : int=0.1 , _lowerCAmelCase : int=0.1 , _lowerCAmelCase : str=1_0 , _lowerCAmelCase : Union[str, Any]=0.02 , _lowerCAmelCase : Any=3 , _lowerCAmelCase : Optional[Any]=None , _lowerCAmelCase : Union[str, Any]=2 , ):
'''simple docstring'''
__lowercase =parent
__lowercase =batch_size
__lowercase =image_size
__lowercase =patch_size
__lowercase =num_channels
__lowercase =is_training
__lowercase =use_labels
__lowercase =hidden_size
__lowercase =num_hidden_layers
__lowercase =num_attention_heads
__lowercase =intermediate_size
__lowercase =hidden_act
__lowercase =hidden_dropout_prob
__lowercase =attention_probs_dropout_prob
__lowercase =type_sequence_label_size
__lowercase =initializer_range
__lowercase =scope
__lowercase =encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
__lowercase =(image_size // patch_size) ** 2
__lowercase =num_patches + 2
def __lowerCamelCase ( self : Any):
'''simple docstring'''
__lowercase =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
__lowercase =None
if self.use_labels:
__lowercase =ids_tensor([self.batch_size] , self.type_sequence_label_size)
__lowercase =self.get_config()
return config, pixel_values, labels
def __lowerCamelCase ( self : Tuple):
'''simple docstring'''
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __lowerCamelCase ( self : List[str] , _lowerCAmelCase : Tuple , _lowerCAmelCase : int , _lowerCAmelCase : Tuple):
'''simple docstring'''
__lowercase =DeiTModel(config=_lowerCAmelCase)
model.to(_lowerCAmelCase)
model.eval()
__lowercase =model(_lowerCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def __lowerCamelCase ( self : List[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Any):
'''simple docstring'''
__lowercase =DeiTForMaskedImageModeling(config=_lowerCAmelCase)
model.to(_lowerCAmelCase)
model.eval()
__lowercase =model(_lowerCAmelCase)
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size))
# test greyscale images
__lowercase =1
__lowercase =DeiTForMaskedImageModeling(_lowerCAmelCase)
model.to(_lowerCAmelCase)
model.eval()
__lowercase =floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
__lowercase =model(_lowerCAmelCase)
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size))
def __lowerCamelCase ( self : List[str] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Tuple):
'''simple docstring'''
__lowercase =self.type_sequence_label_size
__lowercase =DeiTForImageClassification(_lowerCAmelCase)
model.to(_lowerCAmelCase)
model.eval()
__lowercase =model(_lowerCAmelCase , labels=_lowerCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# test greyscale images
__lowercase =1
__lowercase =DeiTForImageClassification(_lowerCAmelCase)
model.to(_lowerCAmelCase)
model.eval()
__lowercase =floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
__lowercase =model(_lowerCAmelCase , labels=_lowerCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def __lowerCamelCase ( self : Tuple):
'''simple docstring'''
__lowercase =self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) =config_and_inputs
__lowercase ={'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class _UpperCamelCase ( A , A , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
lowerCAmelCase__ = (
{
"""feature-extraction""": DeiTModel,
"""image-classification""": (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def __lowerCamelCase ( self : str):
'''simple docstring'''
__lowercase =DeiTModelTester(self)
__lowercase =ConfigTester(self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase , hidden_size=3_7)
def __lowerCamelCase ( self : Any):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='DeiT does not use inputs_embeds')
def __lowerCamelCase ( self : List[str]):
'''simple docstring'''
pass
def __lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__lowercase , __lowercase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase =model_class(_lowerCAmelCase)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
__lowercase =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCAmelCase , nn.Linear))
def __lowerCamelCase ( self : List[str]):
'''simple docstring'''
__lowercase , __lowercase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase =model_class(_lowerCAmelCase)
__lowercase =inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase =[*signature.parameters.keys()]
__lowercase =['pixel_values']
self.assertListEqual(arg_names[:1] , _lowerCAmelCase)
def __lowerCamelCase ( self : Tuple):
'''simple docstring'''
__lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase)
def __lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_lowerCAmelCase)
def __lowerCamelCase ( self : Tuple):
'''simple docstring'''
__lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase)
def __lowerCamelCase ( self : int , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : int , _lowerCAmelCase : Tuple=False):
'''simple docstring'''
__lowercase =super()._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase , return_labels=_lowerCAmelCase)
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def __lowerCamelCase ( self : str):
'''simple docstring'''
if not self.model_tester.is_training:
return
__lowercase , __lowercase =self.model_tester.prepare_config_and_inputs_for_common()
__lowercase =True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(_lowerCAmelCase)
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
__lowercase =model_class(_lowerCAmelCase)
model.to(_lowerCAmelCase)
model.train()
__lowercase =self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase , return_labels=_lowerCAmelCase)
__lowercase =model(**_lowerCAmelCase).loss
loss.backward()
def __lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__lowercase , __lowercase =self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
__lowercase =False
__lowercase =True
for model_class in self.all_model_classes:
if model_class in get_values(_lowerCAmelCase) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
__lowercase =model_class(_lowerCAmelCase)
model.gradient_checkpointing_enable()
model.to(_lowerCAmelCase)
model.train()
__lowercase =self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase , return_labels=_lowerCAmelCase)
__lowercase =model(**_lowerCAmelCase).loss
loss.backward()
def __lowerCamelCase ( self : List[str]):
'''simple docstring'''
__lowercase , __lowercase =self.model_tester.prepare_config_and_inputs_for_common()
__lowercase =[
{'title': 'multi_label_classification', 'num_labels': 2, 'dtype': torch.float},
{'title': 'single_label_classification', 'num_labels': 1, 'dtype': torch.long},
{'title': 'regression', 'num_labels': 1, 'dtype': torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(_lowerCAmelCase),
*get_values(_lowerCAmelCase),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=f"""Testing {model_class} with {problem_type['title']}"""):
__lowercase =problem_type['title']
__lowercase =problem_type['num_labels']
__lowercase =model_class(_lowerCAmelCase)
model.to(_lowerCAmelCase)
model.train()
__lowercase =self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase , return_labels=_lowerCAmelCase)
if problem_type["num_labels"] > 1:
__lowercase =inputs['labels'].unsqueeze(1).repeat(1 , problem_type['num_labels'])
__lowercase =inputs['labels'].to(problem_type['dtype'])
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=_lowerCAmelCase) as warning_list:
__lowercase =model(**_lowerCAmelCase).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message):
raise ValueError(
f"""Something is going wrong in the regression problem: intercepted {w.message}""")
loss.backward()
@slow
def __lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase =DeiTModel.from_pretrained(_lowerCAmelCase)
self.assertIsNotNone(_lowerCAmelCase)
def _A ( ):
"""simple docstring"""
__lowercase =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __lowerCamelCase ( self : List[Any]):
'''simple docstring'''
return (
DeiTImageProcessor.from_pretrained('facebook/deit-base-distilled-patch16-224')
if is_vision_available()
else None
)
@slow
def __lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__lowercase =DeiTForImageClassificationWithTeacher.from_pretrained('facebook/deit-base-distilled-patch16-224').to(
_lowerCAmelCase)
__lowercase =self.default_image_processor
__lowercase =prepare_img()
__lowercase =image_processor(images=_lowerCAmelCase , return_tensors='pt').to(_lowerCAmelCase)
# forward pass
with torch.no_grad():
__lowercase =model(**_lowerCAmelCase)
# verify the logits
__lowercase =torch.Size((1, 1_0_0_0))
self.assertEqual(outputs.logits.shape , _lowerCAmelCase)
__lowercase =torch.tensor([-1.0266, 0.1912, -1.2861]).to(_lowerCAmelCase)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1e-4))
@slow
@require_accelerate
@require_torch_gpu
def __lowerCamelCase ( self : Tuple):
'''simple docstring'''
__lowercase =DeiTModel.from_pretrained(
'facebook/deit-base-distilled-patch16-224' , torch_dtype=torch.floataa , device_map='auto')
__lowercase =self.default_image_processor
__lowercase =prepare_img()
__lowercase =image_processor(images=_lowerCAmelCase , return_tensors='pt')
__lowercase =inputs.pixel_values.to(_lowerCAmelCase)
# forward pass to make sure inference works in fp16
with torch.no_grad():
__lowercase =model(_lowerCAmelCase)
| 166 |
'''simple docstring'''
import warnings
from ...utils import is_sklearn_available, requires_backends
if is_sklearn_available():
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
lowerCamelCase = (
"""This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate """
"""library. You can have a look at this example script for pointers: """
"""https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py"""
)
def _A ( _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
warnings.warn(_lowerCAmelCase , _lowerCAmelCase )
requires_backends(_lowerCAmelCase , 'sklearn' )
return (preds == labels).mean()
def _A ( _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
warnings.warn(_lowerCAmelCase , _lowerCAmelCase )
requires_backends(_lowerCAmelCase , 'sklearn' )
__lowercase =simple_accuracy(_lowerCAmelCase , _lowerCAmelCase )
__lowercase =fa_score(y_true=_lowerCAmelCase , y_pred=_lowerCAmelCase )
return {
"acc": acc,
"f1": fa,
"acc_and_f1": (acc + fa) / 2,
}
def _A ( _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
warnings.warn(_lowerCAmelCase , _lowerCAmelCase )
requires_backends(_lowerCAmelCase , 'sklearn' )
__lowercase =pearsonr(_lowerCAmelCase , _lowerCAmelCase )[0]
__lowercase =spearmanr(_lowerCAmelCase , _lowerCAmelCase )[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def _A ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
warnings.warn(_lowerCAmelCase , _lowerCAmelCase )
requires_backends(_lowerCAmelCase , 'sklearn' )
assert len(_lowerCAmelCase ) == len(_lowerCAmelCase ), f"""Predictions and labels have mismatched lengths {len(_lowerCAmelCase )} and {len(_lowerCAmelCase )}"""
if task_name == "cola":
return {"mcc": matthews_corrcoef(_lowerCAmelCase , _lowerCAmelCase )}
elif task_name == "sst-2":
return {"acc": simple_accuracy(_lowerCAmelCase , _lowerCAmelCase )}
elif task_name == "mrpc":
return acc_and_fa(_lowerCAmelCase , _lowerCAmelCase )
elif task_name == "sts-b":
return pearson_and_spearman(_lowerCAmelCase , _lowerCAmelCase )
elif task_name == "qqp":
return acc_and_fa(_lowerCAmelCase , _lowerCAmelCase )
elif task_name == "mnli":
return {"mnli/acc": simple_accuracy(_lowerCAmelCase , _lowerCAmelCase )}
elif task_name == "mnli-mm":
return {"mnli-mm/acc": simple_accuracy(_lowerCAmelCase , _lowerCAmelCase )}
elif task_name == "qnli":
return {"acc": simple_accuracy(_lowerCAmelCase , _lowerCAmelCase )}
elif task_name == "rte":
return {"acc": simple_accuracy(_lowerCAmelCase , _lowerCAmelCase )}
elif task_name == "wnli":
return {"acc": simple_accuracy(_lowerCAmelCase , _lowerCAmelCase )}
elif task_name == "hans":
return {"acc": simple_accuracy(_lowerCAmelCase , _lowerCAmelCase )}
else:
raise KeyError(_lowerCAmelCase )
def _A ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
warnings.warn(_lowerCAmelCase , _lowerCAmelCase )
requires_backends(_lowerCAmelCase , 'sklearn' )
if len(_lowerCAmelCase ) != len(_lowerCAmelCase ):
raise ValueError(f"""Predictions and labels have mismatched lengths {len(_lowerCAmelCase )} and {len(_lowerCAmelCase )}""" )
if task_name == "xnli":
return {"acc": simple_accuracy(_lowerCAmelCase , _lowerCAmelCase )}
else:
raise KeyError(_lowerCAmelCase )
| 166 | 1 |
import math
from numpy import inf
from scipy.integrate import quad
def lowerCamelCase_ ( _a : float ):
'''simple docstring'''
if num <= 0:
raise ValueError("""math domain error""" )
return quad(_a , 0 , _a , args=(_a) )[0]
def lowerCamelCase_ ( _a : float , _a : float ):
'''simple docstring'''
return math.pow(_a , z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 360 |
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
UpperCamelCase_ = TypeVar('''T''')
UpperCamelCase_ = TypeVar('''U''')
class _snake_case ( Generic[T, U] ):
'''simple docstring'''
def __init__( self: List[str] ,lowerCamelCase_: T | None ,lowerCamelCase_: U | None ) -> Optional[int]:
UpperCAmelCase_ : Any = key
UpperCAmelCase_ : List[str] = val
UpperCAmelCase_ : DoubleLinkedListNode[T, U] | None = None
UpperCAmelCase_ : DoubleLinkedListNode[T, U] | None = None
def __repr__( self: Optional[Any] ) -> str:
return (
F'''Node: key: {self.key}, val: {self.val}, '''
F'''has next: {bool(self.next )}, has prev: {bool(self.prev )}'''
)
class _snake_case ( Generic[T, U] ):
'''simple docstring'''
def __init__( self: Tuple ) -> None:
UpperCAmelCase_ : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(lowerCamelCase_ ,lowerCamelCase_ )
UpperCAmelCase_ : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(lowerCamelCase_ ,lowerCamelCase_ )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = self.rear, self.head
def __repr__( self: Tuple ) -> str:
UpperCAmelCase_ : List[str] = ["""DoubleLinkedList"""]
UpperCAmelCase_ : Optional[Any] = self.head
while node.next is not None:
rep.append(str(lowerCamelCase_ ) )
UpperCAmelCase_ : str = node.next
rep.append(str(self.rear ) )
return ",\n ".join(lowerCamelCase_ )
def A__ ( self: Tuple ,lowerCamelCase_: DoubleLinkedListNode[T, U] ) -> None:
UpperCAmelCase_ : str = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
UpperCAmelCase_ : Any = node
UpperCAmelCase_ : Tuple = previous
UpperCAmelCase_ : List[Any] = node
UpperCAmelCase_ : List[str] = self.rear
def A__ ( self: str ,lowerCamelCase_: DoubleLinkedListNode[T, U] ) -> DoubleLinkedListNode[T, U] | None:
if node.prev is None or node.next is None:
return None
UpperCAmelCase_ : str = node.next
UpperCAmelCase_ : Optional[int] = node.prev
UpperCAmelCase_ : List[str] = None
UpperCAmelCase_ : int = None
return node
class _snake_case ( Generic[T, U] ):
'''simple docstring'''
A__ : dict[Callable[[T], U], LRUCache[T, U]] = {}
def __init__( self: Any ,lowerCamelCase_: int ) -> Union[str, Any]:
UpperCAmelCase_ : DoubleLinkedList[T, U] = DoubleLinkedList()
UpperCAmelCase_ : Optional[Any] = capacity
UpperCAmelCase_ : Any = 0
UpperCAmelCase_ : Any = 0
UpperCAmelCase_ : Optional[Any] = 0
UpperCAmelCase_ : dict[T, DoubleLinkedListNode[T, U]] = {}
def __repr__( self: Any ) -> str:
return (
F'''CacheInfo(hits={self.hits}, misses={self.miss}, '''
F'''capacity={self.capacity}, current size={self.num_keys})'''
)
def __contains__( self: Union[str, Any] ,lowerCamelCase_: T ) -> bool:
return key in self.cache
def A__ ( self: Any ,lowerCamelCase_: T ) -> U | None:
# Note: pythonic interface would throw KeyError rather than return None
if key in self.cache:
self.hits += 1
UpperCAmelCase_ : DoubleLinkedListNode[T, U] = self.cache[key]
UpperCAmelCase_ : Any = self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(lowerCamelCase_ )
return node.val
self.miss += 1
return None
def A__ ( self: Optional[Any] ,lowerCamelCase_: T ,lowerCamelCase_: U ) -> None:
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
UpperCAmelCase_ : Tuple = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(lowerCamelCase_ ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
UpperCAmelCase_ : Tuple = DoubleLinkedListNode(lowerCamelCase_ ,lowerCamelCase_ )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
UpperCAmelCase_ : Tuple = self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
UpperCAmelCase_ : int = value
self.list.add(lowerCamelCase_ )
@classmethod
def A__ ( cls: List[str] ,lowerCamelCase_: int = 128 ) -> Callable[[Callable[[T], U]], Callable[..., U]]:
def cache_decorator_inner(lowerCamelCase_: Callable[[T], U] ) -> Callable[..., U]:
def cache_decorator_wrapper(*lowerCamelCase_: T ) -> U:
if func not in cls.decorator_function_to_instance_map:
UpperCAmelCase_ : Union[str, Any] = LRUCache(lowerCamelCase_ )
UpperCAmelCase_ : Any = cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
UpperCAmelCase_ : Optional[Any] = func(*lowerCamelCase_ )
cls.decorator_function_to_instance_map[func].put(args[0] ,lowerCamelCase_ )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(lowerCamelCase_ ,"""cache_info""" ,lowerCamelCase_ ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 59 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
__a = logging.get_logger(__name__)
class A__ ( UpperCamelCase ):
"""simple docstring"""
def __init__( self : Dict , *lowerCAmelCase__ : Optional[int] , **lowerCAmelCase__ : Tuple ) -> None:
"""simple docstring"""
warnings.warn(
"The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use OwlViTImageProcessor instead." , lowerCAmelCase__ , )
super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__ ) | 145 | '''simple docstring'''
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
__a = logging.get_logger(__name__) # pylint: disable=invalid-name
class A__ ( UpperCamelCase ):
"""simple docstring"""
def __init__( self : List[Any] , lowerCAmelCase__ : CLIPSegForImageSegmentation , lowerCAmelCase__ : CLIPSegProcessor , lowerCAmelCase__ : AutoencoderKL , lowerCAmelCase__ : CLIPTextModel , lowerCAmelCase__ : CLIPTokenizer , lowerCAmelCase__ : UNetaDConditionModel , lowerCAmelCase__ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , lowerCAmelCase__ : StableDiffusionSafetyChecker , lowerCAmelCase__ : CLIPImageProcessor , ) -> Dict:
"""simple docstring"""
super().__init__()
if hasattr(scheduler.config , "steps_offset" ) and scheduler.config.steps_offset != 1:
_UpperCAmelCase : str = (
F"""The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"""
F""" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure """
"to update the config accordingly as leaving `steps_offset` might led to incorrect results"
" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
" file"
)
deprecate("steps_offset!=1" , "1.0.0" , lowerCAmelCase__ , standard_warn=lowerCAmelCase__ )
_UpperCAmelCase : Any = dict(scheduler.config )
_UpperCAmelCase : Tuple = 1
_UpperCAmelCase : Optional[Any] = FrozenDict(lowerCAmelCase__ )
if hasattr(scheduler.config , "skip_prk_steps" ) and scheduler.config.skip_prk_steps is False:
_UpperCAmelCase : Union[str, Any] = (
F"""The configuration file of this scheduler: {scheduler} has not set the configuration"""
" `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make"
" sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to"
" incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face"
" Hub, it would be very nice if you could open a Pull request for the"
" `scheduler/scheduler_config.json` file"
)
deprecate("skip_prk_steps not set" , "1.0.0" , lowerCAmelCase__ , standard_warn=lowerCAmelCase__ )
_UpperCAmelCase : List[str] = dict(scheduler.config )
_UpperCAmelCase : Optional[Any] = True
_UpperCAmelCase : Dict = FrozenDict(lowerCAmelCase__ )
if safety_checker is None:
logger.warning(
F"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." )
self.register_modules(
segmentation_model=lowerCAmelCase__ , segmentation_processor=lowerCAmelCase__ , vae=lowerCAmelCase__ , text_encoder=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , feature_extractor=lowerCAmelCase__ , )
def _lowerCAmelCase ( self : str , lowerCAmelCase__ : Optional[Union[str, int]] = "auto" ) -> Optional[int]:
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
_UpperCAmelCase : Tuple = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowerCAmelCase__ )
def _lowerCAmelCase ( self : Optional[int] ) -> Any:
"""simple docstring"""
self.enable_attention_slicing(lowerCAmelCase__ )
def _lowerCAmelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
_UpperCAmelCase : Dict = torch.device("cuda" )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(lowerCAmelCase__ , lowerCAmelCase__ )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _lowerCAmelCase ( self : Any ) -> int:
"""simple docstring"""
if self.device != torch.device("meta" ) or not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(lowerCAmelCase__ , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self : Optional[int] , lowerCAmelCase__ : Union[str, List[str]] , lowerCAmelCase__ : Union[torch.FloatTensor, PIL.Image.Image] , lowerCAmelCase__ : str , lowerCAmelCase__ : int = 5_1_2 , lowerCAmelCase__ : int = 5_1_2 , lowerCAmelCase__ : int = 5_0 , lowerCAmelCase__ : float = 7.5 , lowerCAmelCase__ : Optional[Union[str, List[str]]] = None , lowerCAmelCase__ : Optional[int] = 1 , lowerCAmelCase__ : float = 0.0 , lowerCAmelCase__ : Optional[torch.Generator] = None , lowerCAmelCase__ : Optional[torch.FloatTensor] = None , lowerCAmelCase__ : Optional[str] = "pil" , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCAmelCase__ : int = 1 , **lowerCAmelCase__ : Any , ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase : List[str] = self.segmentation_processor(
text=[text] , images=[image] , padding="max_length" , return_tensors="pt" ).to(self.device )
_UpperCAmelCase : List[Any] = self.segmentation_model(**lowerCAmelCase__ )
_UpperCAmelCase : Any = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
_UpperCAmelCase : str = self.numpy_to_pil(lowerCAmelCase__ )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
_UpperCAmelCase : Union[str, Any] = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=lowerCAmelCase__ , image=lowerCAmelCase__ , mask_image=lowerCAmelCase__ , height=lowerCAmelCase__ , width=lowerCAmelCase__ , num_inference_steps=lowerCAmelCase__ , guidance_scale=lowerCAmelCase__ , negative_prompt=lowerCAmelCase__ , num_images_per_prompt=lowerCAmelCase__ , eta=lowerCAmelCase__ , generator=lowerCAmelCase__ , latents=lowerCAmelCase__ , output_type=lowerCAmelCase__ , return_dict=lowerCAmelCase__ , callback=lowerCAmelCase__ , callback_steps=lowerCAmelCase__ , ) | 145 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class __a( unittest.TestCase ):
"""simple docstring"""
def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=7 ,_SCREAMING_SNAKE_CASE=3 ,_SCREAMING_SNAKE_CASE=18 ,_SCREAMING_SNAKE_CASE=30 ,_SCREAMING_SNAKE_CASE=400 ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=True ,) -> Dict:
UpperCAmelCase_ : Union[str, Any] = size if size is not None else {'''shortest_edge''': 20}
UpperCAmelCase_ : Any = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
UpperCAmelCase_ : Any = parent
UpperCAmelCase_ : Dict = batch_size
UpperCAmelCase_ : List[str] = num_channels
UpperCAmelCase_ : Union[str, Any] = image_size
UpperCAmelCase_ : Dict = min_resolution
UpperCAmelCase_ : str = max_resolution
UpperCAmelCase_ : List[Any] = do_resize
UpperCAmelCase_ : List[Any] = size
UpperCAmelCase_ : str = do_center_crop
UpperCAmelCase_ : str = crop_size
UpperCAmelCase_ : Any = do_flip_channel_order
def a__ ( self ) -> Tuple:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class __a( _a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase = MobileViTImageProcessor if is_vision_available() else None
def a__ ( self ) -> Union[str, Any]:
UpperCAmelCase_ : int = MobileViTImageProcessingTester(self )
@property
def a__ ( self ) -> Tuple:
return self.image_processor_tester.prepare_image_processor_dict()
def a__ ( self ) -> Any:
UpperCAmelCase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,'''do_resize''' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,'''size''' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,'''do_center_crop''' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,'''center_crop''' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,'''do_flip_channel_order''' ) )
def a__ ( self ) -> Any:
UpperCAmelCase_ : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{'''shortest_edge''': 20} )
self.assertEqual(image_processor.crop_size ,{'''height''': 18, '''width''': 18} )
UpperCAmelCase_ : Tuple = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 ,crop_size=84 )
self.assertEqual(image_processor.size ,{'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size ,{'''height''': 84, '''width''': 84} )
def a__ ( self ) -> str:
pass
def a__ ( self ) -> Optional[int]:
# Initialize image_processing
UpperCAmelCase_ : int = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,Image.Image )
# Test not batched input
UpperCAmelCase_ : str = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
# Test batched
UpperCAmelCase_ : Any = image_processing(_SCREAMING_SNAKE_CASE ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
def a__ ( self ) -> Optional[Any]:
# Initialize image_processing
UpperCAmelCase_ : int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase_ : str = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_SCREAMING_SNAKE_CASE ,numpify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,np.ndarray )
# Test not batched input
UpperCAmelCase_ : Optional[int] = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
# Test batched
UpperCAmelCase_ : List[str] = image_processing(_SCREAMING_SNAKE_CASE ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
def a__ ( self ) -> Optional[Any]:
# Initialize image_processing
UpperCAmelCase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase_ : List[str] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_SCREAMING_SNAKE_CASE ,torchify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,torch.Tensor )
# Test not batched input
UpperCAmelCase_ : List[str] = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
# Test batched
UpperCAmelCase_ : Tuple = image_processing(_SCREAMING_SNAKE_CASE ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,) | 235 |
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2
def lowerCamelCase__ ( _lowercase , _lowercase=0 ):
'''simple docstring'''
return sorted(_lowercase , key=lambda _lowercase : x[column] )
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase=float('''inf''' ) ):
'''simple docstring'''
for i in range(points_counts - 1 ):
for j in range(i + 1 , _lowercase ):
UpperCAmelCase_ : Optional[int] = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
UpperCAmelCase_ : Optional[Any] = current_dis
return min_dis
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase=float('''inf''' ) ):
'''simple docstring'''
for i in range(min(6 , points_counts - 1 ) , _lowercase ):
for j in range(max(0 , i - 6 ) , _lowercase ):
UpperCAmelCase_ : List[str] = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
UpperCAmelCase_ : Optional[int] = current_dis
return min_dis
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
if points_counts <= 3:
return dis_between_closest_pair(_lowercase , _lowercase )
# recursion
UpperCAmelCase_ : Optional[int] = points_counts // 2
UpperCAmelCase_ : List[Any] = closest_pair_of_points_sqr(
_lowercase , points_sorted_on_y[:mid] , _lowercase )
UpperCAmelCase_ : Dict = closest_pair_of_points_sqr(
_lowercase , points_sorted_on_y[mid:] , points_counts - mid )
UpperCAmelCase_ : Union[str, Any] = min(_lowercase , _lowercase )
UpperCAmelCase_ : str = []
for point in points_sorted_on_x:
if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis:
cross_strip.append(_lowercase )
UpperCAmelCase_ : Optional[Any] = dis_between_closest_in_strip(
_lowercase , len(_lowercase ) , _lowercase )
return min(_lowercase , _lowercase )
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = column_based_sort(_lowercase , column=0 )
UpperCAmelCase_ : List[Any] = column_based_sort(_lowercase , column=1 )
return (
closest_pair_of_points_sqr(
_lowercase , _lowercase , _lowercase )
) ** 0.5
if __name__ == "__main__":
__a = [(2, 3), (12, 30), (40, 50), (5, 1), (12, 10), (3, 4)]
print('Distance:', closest_pair_of_points(points, len(points))) | 235 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase = {
'''configuration_jukebox''': [
'''JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''JukeboxConfig''',
'''JukeboxPriorConfig''',
'''JukeboxVQVAEConfig''',
],
'''tokenization_jukebox''': ['''JukeboxTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'''JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''JukeboxModel''',
'''JukeboxPreTrainedModel''',
'''JukeboxVQVAE''',
'''JukeboxPrior''',
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 89 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : int = logging.get_logger(__name__)
lowerCamelCase : List[Any] = {
'google/realm-cc-news-pretrained-embedder': (
'https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-encoder': (
'https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-scorer': (
'https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-openqa': (
'https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json'
),
'google/realm-orqa-nq-openqa': 'https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json',
'google/realm-orqa-nq-reader': 'https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json',
'google/realm-orqa-wq-openqa': 'https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json',
'google/realm-orqa-wq-reader': 'https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json',
# See all REALM models at https://huggingface.co/models?filter=realm
}
class __lowerCAmelCase (lowercase_ ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = """realm"""
def __init__(self : str , UpperCamelCase : List[Any]=30522 , UpperCamelCase : List[Any]=768 , UpperCamelCase : int=128 , UpperCamelCase : Any=12 , UpperCamelCase : Tuple=12 , UpperCamelCase : List[Any]=8 , UpperCamelCase : Union[str, Any]=3072 , UpperCamelCase : List[str]="gelu_new" , UpperCamelCase : Any=0.1 , UpperCamelCase : List[str]=0.1 , UpperCamelCase : Dict=512 , UpperCamelCase : Dict=2 , UpperCamelCase : List[Any]=0.02 , UpperCamelCase : List[Any]=1E-12 , UpperCamelCase : Dict=256 , UpperCamelCase : Union[str, Any]=10 , UpperCamelCase : Optional[int]=1E-3 , UpperCamelCase : Tuple=5 , UpperCamelCase : Optional[int]=320 , UpperCamelCase : List[str]=13353718 , UpperCamelCase : Optional[Any]=5000 , UpperCamelCase : str=1 , UpperCamelCase : Union[str, Any]=0 , UpperCamelCase : List[Any]=2 , **UpperCamelCase : int , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase , bos_token_id=UpperCamelCase , eos_token_id=UpperCamelCase , **UpperCamelCase )
# Common config
lowercase__ = vocab_size
lowercase__ = max_position_embeddings
lowercase__ = hidden_size
lowercase__ = retriever_proj_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = num_candidates
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = initializer_range
lowercase__ = type_vocab_size
lowercase__ = layer_norm_eps
# Reader config
lowercase__ = span_hidden_size
lowercase__ = max_span_width
lowercase__ = reader_layer_norm_eps
lowercase__ = reader_beam_size
lowercase__ = reader_seq_len
# Retrieval config
lowercase__ = num_block_records
lowercase__ = searcher_beam_size
| 2 | 0 |
"""simple docstring"""
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class _UpperCAmelCase ( _lowerCAmelCase , _lowerCAmelCase ):
@register_to_config
def __init__( self : Dict , *,
_lowercase : int = 4 , _lowercase : int = 7_68 , _lowercase : int , _lowercase : Optional[Any] , ):
super().__init__()
__UpperCAmelCase = nn.Parameter(torch.zeros(_lowercase ) )
# parameters for additional clip time embeddings
__UpperCAmelCase = nn.Linear(_lowercase , _lowercase )
__UpperCAmelCase = nn.Linear(_lowercase , _lowercase )
# parameters for encoder hidden states
__UpperCAmelCase = clip_extra_context_tokens
__UpperCAmelCase = nn.Linear(
_lowercase , self.clip_extra_context_tokens * cross_attention_dim )
__UpperCAmelCase = nn.Linear(_lowercase , _lowercase )
__UpperCAmelCase = nn.LayerNorm(_lowercase )
def a ( self : int , *, _lowercase : int , _lowercase : Dict , _lowercase : List[Any] , _lowercase : Dict ):
if do_classifier_free_guidance:
# Add the classifier free guidance embeddings to the image embeddings
__UpperCAmelCase = image_embeddings.shape[0]
__UpperCAmelCase = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 )
__UpperCAmelCase = classifier_free_guidance_embeddings.expand(
_lowercase , -1 )
__UpperCAmelCase = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 )
# The image embeddings batch size and the text embeddings batch size are equal
assert image_embeddings.shape[0] == prompt_embeds.shape[0]
__UpperCAmelCase = prompt_embeds.shape[0]
# "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and
# adding CLIP embeddings to the existing timestep embedding, ...
__UpperCAmelCase = self.embedding_proj(_lowercase )
__UpperCAmelCase = self.clip_image_embeddings_project_to_time_embeddings(_lowercase )
__UpperCAmelCase = time_projected_image_embeddings + time_projected_prompt_embeds
# ... and by projecting CLIP embeddings into four
# extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder"
__UpperCAmelCase = self.clip_extra_context_tokens_proj(_lowercase )
__UpperCAmelCase = clip_extra_context_tokens.reshape(_lowercase , -1 , self.clip_extra_context_tokens )
__UpperCAmelCase = clip_extra_context_tokens.permute(0 , 2 , 1 )
__UpperCAmelCase = self.encoder_hidden_states_proj(_lowercase )
__UpperCAmelCase = self.text_encoder_hidden_states_norm(_lowercase )
__UpperCAmelCase = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 )
return text_encoder_hidden_states, additive_clip_time_embeddings
| 86 |
"""simple docstring"""
from __future__ import annotations
import bisect
def lowercase__ ( snake_case_ :list[int] , snake_case_ :int , snake_case_ :int = 0 , snake_case_ :int = -1 ):
if hi < 0:
__UpperCAmelCase = len(snake_case_ )
while lo < hi:
__UpperCAmelCase = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
__UpperCAmelCase = mid + 1
else:
__UpperCAmelCase = mid
return lo
def lowercase__ ( snake_case_ :list[int] , snake_case_ :int , snake_case_ :int = 0 , snake_case_ :int = -1 ):
if hi < 0:
__UpperCAmelCase = len(snake_case_ )
while lo < hi:
__UpperCAmelCase = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
__UpperCAmelCase = mid + 1
else:
__UpperCAmelCase = mid
return lo
def lowercase__ ( snake_case_ :list[int] , snake_case_ :int , snake_case_ :int = 0 , snake_case_ :int = -1 ):
sorted_collection.insert(bisect_left(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) , snake_case_ )
def lowercase__ ( snake_case_ :list[int] , snake_case_ :int , snake_case_ :int = 0 , snake_case_ :int = -1 ):
sorted_collection.insert(bisect_right(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) , snake_case_ )
def lowercase__ ( snake_case_ :list[int] , snake_case_ :int ):
__UpperCAmelCase = 0
__UpperCAmelCase = len(snake_case_ ) - 1
while left <= right:
__UpperCAmelCase = left + (right - left) // 2
__UpperCAmelCase = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
__UpperCAmelCase = midpoint - 1
else:
__UpperCAmelCase = midpoint + 1
return None
def lowercase__ ( snake_case_ :list[int] , snake_case_ :int ):
__UpperCAmelCase = bisect.bisect_left(snake_case_ , snake_case_ )
if index != len(snake_case_ ) and sorted_collection[index] == item:
return index
return None
def lowercase__ ( snake_case_ :list[int] , snake_case_ :int , snake_case_ :int , snake_case_ :int ):
if right < left:
return None
__UpperCAmelCase = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(snake_case_ , snake_case_ , snake_case_ , midpoint - 1 )
else:
return binary_search_by_recursion(snake_case_ , snake_case_ , midpoint + 1 , snake_case_ )
if __name__ == "__main__":
_lowercase : Optional[Any] = input('Enter numbers separated by comma:\n').strip()
_lowercase : Optional[int] = sorted(int(item) for item in user_input.split(','))
_lowercase : Optional[Any] = int(input('Enter a single number to be found in the list:\n'))
_lowercase : int = binary_search(collection, target)
if result is None:
print(f"""{target} was not found in {collection}.""")
else:
print(f"""{target} was found at position {result} in {collection}.""")
| 86 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase):
_UpperCamelCase:List[str] = StableDiffusionXLImgaImgPipeline
_UpperCamelCase:Dict = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
_UpperCamelCase:Any = PipelineTesterMixin.required_optional_params - {"latents"}
_UpperCamelCase:Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
_UpperCamelCase:Tuple = IMAGE_TO_IMAGE_IMAGE_PARAMS
_UpperCamelCase:int = IMAGE_TO_IMAGE_IMAGE_PARAMS
def _snake_case ( self )-> List[str]:
torch.manual_seed(0 )
lowerCamelCase_ =UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , attention_head_dim=(2, 4) , use_linear_projection=__UpperCAmelCase , addition_embed_type="""text_time""" , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , )
lowerCamelCase_ =EulerDiscreteScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , steps_offset=1 , beta_schedule="""scaled_linear""" , timestep_spacing="""leading""" , )
torch.manual_seed(0 )
lowerCamelCase_ =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
lowerCamelCase_ =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="""gelu""" , projection_dim=32 , )
lowerCamelCase_ =CLIPTextModel(__UpperCAmelCase )
lowerCamelCase_ =CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" , local_files_only=__UpperCAmelCase )
lowerCamelCase_ =CLIPTextModelWithProjection(__UpperCAmelCase )
lowerCamelCase_ =CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" , local_files_only=__UpperCAmelCase )
lowerCamelCase_ ={
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""text_encoder_2""": text_encoder_a,
"""tokenizer_2""": tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0 )-> str:
lowerCamelCase_ =floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
lowerCamelCase_ =image / 2 + 0.5
if str(__UpperCAmelCase ).startswith("""mps""" ):
lowerCamelCase_ =torch.manual_seed(__UpperCAmelCase )
else:
lowerCamelCase_ =torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
lowerCamelCase_ ={
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 5.0,
"""output_type""": """numpy""",
"""strength""": 0.7_5,
}
return inputs
def _snake_case ( self )-> int:
lowerCamelCase_ ="""cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase_ =self.get_dummy_components()
lowerCamelCase_ =StableDiffusionXLImgaImgPipeline(**__UpperCAmelCase )
lowerCamelCase_ =sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCamelCase_ =self.get_dummy_inputs(__UpperCAmelCase )
lowerCamelCase_ =sd_pipe(**__UpperCAmelCase ).images
lowerCamelCase_ =image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCamelCase_ =np.array([0.4_6_5_6, 0.4_8_4_0, 0.4_4_3_9, 0.6_6_9_8, 0.5_5_7_4, 0.4_5_2_4, 0.5_7_9_9, 0.5_9_4_3, 0.5_1_6_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _snake_case ( self )-> str:
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def _snake_case ( self )-> Any:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def _snake_case ( self )-> Union[str, Any]:
pass
def _snake_case ( self )-> str:
lowerCamelCase_ =self.get_dummy_components()
lowerCamelCase_ =StableDiffusionXLImgaImgPipeline(**__UpperCAmelCase )
lowerCamelCase_ =sd_pipe.to(__UpperCAmelCase )
lowerCamelCase_ =sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
# forward without prompt embeds
lowerCamelCase_ =self.get_dummy_inputs(__UpperCAmelCase )
lowerCamelCase_ =3 * ["""this is a negative prompt"""]
lowerCamelCase_ =negative_prompt
lowerCamelCase_ =3 * [inputs["""prompt"""]]
lowerCamelCase_ =sd_pipe(**__UpperCAmelCase )
lowerCamelCase_ =output.images[0, -3:, -3:, -1]
# forward with prompt embeds
lowerCamelCase_ =self.get_dummy_inputs(__UpperCAmelCase )
lowerCamelCase_ =3 * ["""this is a negative prompt"""]
lowerCamelCase_ =3 * [inputs.pop("""prompt""" )]
(
lowerCamelCase_
) =sd_pipe.encode_prompt(__UpperCAmelCase , negative_prompt=__UpperCAmelCase )
lowerCamelCase_ =sd_pipe(
**__UpperCAmelCase , prompt_embeds=__UpperCAmelCase , negative_prompt_embeds=__UpperCAmelCase , pooled_prompt_embeds=__UpperCAmelCase , negative_pooled_prompt_embeds=__UpperCAmelCase , )
lowerCamelCase_ =output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@slow
@require_torch_gpu
class _SCREAMING_SNAKE_CASE ( unittest.TestCase):
def _snake_case ( self )-> Optional[Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="cpu" , _SCREAMING_SNAKE_CASE=torch.floataa , _SCREAMING_SNAKE_CASE=0 )-> Optional[Any]:
lowerCamelCase_ =torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
lowerCamelCase_ =np.random.RandomState(__UpperCAmelCase ).standard_normal((1, 4, 64, 64) )
lowerCamelCase_ =torch.from_numpy(__UpperCAmelCase ).to(device=__UpperCAmelCase , dtype=__UpperCAmelCase )
lowerCamelCase_ ={
"""prompt""": """a photograph of an astronaut riding a horse""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def _snake_case ( self )-> str:
lowerCamelCase_ =DiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-base""" )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCamelCase_ =self.get_inputs(__UpperCAmelCase )
lowerCamelCase_ =pipe(**__UpperCAmelCase ).images
lowerCamelCase_ =image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
lowerCamelCase_ =np.array([0.4_9_4_9_3, 0.4_7_8_9_6, 0.4_0_7_9_8, 0.5_4_2_1_4, 0.5_3_2_1_2, 0.4_8_2_0_2, 0.4_7_6_5_6, 0.4_6_3_2_9, 0.4_8_5_0_6] )
assert np.abs(image_slice - expected_slice ).max() < 7E-3
| 154 |
'''simple docstring'''
def lowercase_ ( lowerCAmelCase__ : int ):
"""simple docstring"""
__UpperCAmelCase : list[list[int]] = [[0 for _ in range(lowerCAmelCase__ )] for _ in range(m + 1 )]
for i in range(m + 1 ):
__UpperCAmelCase : str = 1
for n in range(m + 1 ):
for k in range(1 , lowerCAmelCase__ ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
_UpperCamelCase = int(input('''Enter a number: ''').strip())
print(partition(n))
except ValueError:
print('''Please enter a number.''')
else:
try:
_UpperCamelCase = int(sys.argv[1])
print(partition(n))
except ValueError:
print('''Please pass a number.''')
| 254 | 0 |
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def _lowerCamelCase ( self ):
UpperCamelCase__ = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
UpperCamelCase__ = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(__lowerCAmelCase )
UpperCamelCase__ = -1
UpperCamelCase__ = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__lowerCAmelCase )
UpperCamelCase__ = model.generate(__lowerCAmelCase , max_new_tokens=10 , do_sample=__lowerCAmelCase )
UpperCamelCase__ = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
UpperCamelCase__ = TextStreamer(__lowerCAmelCase )
model.generate(__lowerCAmelCase , max_new_tokens=10 , do_sample=__lowerCAmelCase , streamer=__lowerCAmelCase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
UpperCamelCase__ = cs.out[:-1]
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
def _lowerCamelCase ( self ):
UpperCamelCase__ = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
UpperCamelCase__ = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(__lowerCAmelCase )
UpperCamelCase__ = -1
UpperCamelCase__ = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__lowerCAmelCase )
UpperCamelCase__ = model.generate(__lowerCAmelCase , max_new_tokens=10 , do_sample=__lowerCAmelCase )
UpperCamelCase__ = tokenizer.decode(greedy_ids[0] )
UpperCamelCase__ = TextIteratorStreamer(__lowerCAmelCase )
UpperCamelCase__ = {"""input_ids""": input_ids, """max_new_tokens""": 10, """do_sample""": False, """streamer""": streamer}
UpperCamelCase__ = Thread(target=model.generate , kwargs=__lowerCAmelCase )
thread.start()
UpperCamelCase__ = """"""
for new_text in streamer:
streamer_text += new_text
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
def _lowerCamelCase ( self ):
UpperCamelCase__ = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
UpperCamelCase__ = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(__lowerCAmelCase )
UpperCamelCase__ = -1
UpperCamelCase__ = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__lowerCAmelCase )
UpperCamelCase__ = model.generate(__lowerCAmelCase , max_new_tokens=10 , do_sample=__lowerCAmelCase )
UpperCamelCase__ = greedy_ids[:, input_ids.shape[1] :]
UpperCamelCase__ = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
UpperCamelCase__ = TextStreamer(__lowerCAmelCase , skip_prompt=__lowerCAmelCase )
model.generate(__lowerCAmelCase , max_new_tokens=10 , do_sample=__lowerCAmelCase , streamer=__lowerCAmelCase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
UpperCamelCase__ = cs.out[:-1]
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
def _lowerCamelCase ( self ):
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
UpperCamelCase__ = AutoTokenizer.from_pretrained("""distilgpt2""" )
UpperCamelCase__ = AutoModelForCausalLM.from_pretrained("""distilgpt2""" ).to(__lowerCAmelCase )
UpperCamelCase__ = -1
UpperCamelCase__ = torch.ones((1, 5) , device=__lowerCAmelCase ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
UpperCamelCase__ = TextStreamer(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase )
model.generate(__lowerCAmelCase , max_new_tokens=1 , do_sample=__lowerCAmelCase , streamer=__lowerCAmelCase )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
UpperCamelCase__ = cs.out[:-1] # Remove the final "\n"
UpperCamelCase__ = tokenizer(__lowerCAmelCase , return_tensors="""pt""" )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def _lowerCamelCase ( self ):
UpperCamelCase__ = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
UpperCamelCase__ = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(__lowerCAmelCase )
UpperCamelCase__ = -1
UpperCamelCase__ = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__lowerCAmelCase )
UpperCamelCase__ = TextIteratorStreamer(__lowerCAmelCase , timeout=0.001 )
UpperCamelCase__ = {"""input_ids""": input_ids, """max_new_tokens""": 10, """do_sample""": False, """streamer""": streamer}
UpperCamelCase__ = Thread(target=model.generate , kwargs=__lowerCAmelCase )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(__lowerCAmelCase ):
UpperCamelCase__ = """"""
for new_text in streamer:
streamer_text += new_text
| 87 |
def _UpperCamelCase (a__ :str ):
"""simple docstring"""
UpperCamelCase__ = 0
# if input_string is "aba" than new_input_string become "a|b|a"
UpperCamelCase__ = """"""
UpperCamelCase__ = """"""
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(a__ ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
UpperCamelCase__ , UpperCamelCase__ = 0, 0
# length[i] shows the length of palindromic substring with center i
UpperCamelCase__ = [1 for i in range(len(a__ ) )]
# for each character in new_string find corresponding palindromic string
UpperCamelCase__ = 0
for j in range(len(a__ ) ):
UpperCamelCase__ = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(a__ )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
UpperCamelCase__ = 2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
UpperCamelCase__ = j - k + 1 # noqa: E741
UpperCamelCase__ = j + k - 1
# update max_length and start position
if max_length < length[j]:
UpperCamelCase__ = length[j]
UpperCamelCase__ = j
# create that string
UpperCamelCase__ = new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 87 | 1 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__lowerCamelCase : str = logging.get_logger(__name__)
__lowerCamelCase : Optional[Any] = {
"""post_extract_proj""": """feature_projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.upsample.0""": """encoder.upsample.projection""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """layer_norm""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Tuple:
for attribute in key.split("." ):
UpperCamelCase : Union[str, Any] = getattr(_lowerCAmelCase , _lowerCAmelCase )
if weight_type is not None:
UpperCamelCase : str = getattr(_lowerCAmelCase , _lowerCAmelCase ).shape
else:
UpperCamelCase : str = hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
UpperCamelCase : Dict = value
elif weight_type == "weight_g":
UpperCamelCase : Optional[int] = value
elif weight_type == "weight_v":
UpperCamelCase : Optional[int] = value
elif weight_type == "bias":
UpperCamelCase : List[str] = value
else:
UpperCamelCase : Dict = value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> int:
UpperCamelCase : Optional[Any] = []
UpperCamelCase : Union[str, Any] = fairseq_model.state_dict()
UpperCamelCase : int = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
UpperCamelCase : List[Any] = False
if "conv_layers" in name:
load_conv_layer(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , hf_model.config.feat_extract_norm == "group" , )
UpperCamelCase : Dict = True
else:
for key, mapped_key in MAPPING.items():
UpperCamelCase : Any = "sew." + mapped_key if (is_finetuned and mapped_key != "lm_head") else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
UpperCamelCase : Any = True
if "*" in mapped_key:
UpperCamelCase : Tuple = name.split(_lowerCAmelCase )[0].split("." )[-2]
UpperCamelCase : Optional[int] = mapped_key.replace("*" , _lowerCAmelCase )
if "weight_g" in name:
UpperCamelCase : int = "weight_g"
elif "weight_v" in name:
UpperCamelCase : Optional[Any] = "weight_v"
elif "weight" in name:
UpperCamelCase : int = "weight"
elif "bias" in name:
UpperCamelCase : str = "bias"
else:
UpperCamelCase : Any = None
set_recursively(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
continue
if not is_used:
unused_weights.append(_lowerCAmelCase )
logger.warning(F"""Unused weights: {unused_weights}""" )
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Any:
UpperCamelCase : Any = full_name.split("conv_layers." )[-1]
UpperCamelCase : Tuple = name.split("." )
UpperCamelCase : Dict = int(items[0] )
UpperCamelCase : Union[str, Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
UpperCamelCase : Tuple = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
UpperCamelCase : Dict = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
UpperCamelCase : Optional[Any] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
UpperCamelCase : Tuple = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_lowerCAmelCase )
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]:
UpperCamelCase : List[Any] = SEWConfig()
if is_finetuned:
UpperCamelCase : int = model.wav_encoder.wav_model.cfg
else:
UpperCamelCase : Dict = model.cfg
UpperCamelCase : int = fs_config.conv_bias
UpperCamelCase : Dict = eval(fs_config.conv_feature_layers )
UpperCamelCase : Union[str, Any] = [x[0] for x in conv_layers]
UpperCamelCase : Union[str, Any] = [x[1] for x in conv_layers]
UpperCamelCase : Union[str, Any] = [x[2] for x in conv_layers]
UpperCamelCase : int = "gelu"
UpperCamelCase : List[Any] = "layer" if fs_config.extractor_mode == "layer_norm" else "group"
UpperCamelCase : Union[str, Any] = 0.0
UpperCamelCase : List[str] = fs_config.activation_fn.name
UpperCamelCase : Any = fs_config.encoder_embed_dim
UpperCamelCase : Optional[int] = 0.02
UpperCamelCase : str = fs_config.encoder_ffn_embed_dim
UpperCamelCase : List[str] = 1e-5
UpperCamelCase : Optional[Any] = fs_config.encoder_layerdrop
UpperCamelCase : Any = fs_config.encoder_attention_heads
UpperCamelCase : List[Any] = fs_config.conv_pos_groups
UpperCamelCase : Any = fs_config.conv_pos
UpperCamelCase : Dict = len(_lowerCAmelCase )
UpperCamelCase : str = fs_config.encoder_layers
UpperCamelCase : List[str] = fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
UpperCamelCase : int = model.cfg
UpperCamelCase : Optional[Any] = fs_config.final_dropout
UpperCamelCase : Tuple = fs_config.layerdrop
UpperCamelCase : Union[str, Any] = fs_config.activation_dropout
UpperCamelCase : Optional[int] = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
UpperCamelCase : List[Any] = fs_config.attention_dropout
UpperCamelCase : Optional[int] = fs_config.dropout_input
UpperCamelCase : Optional[Any] = fs_config.dropout
UpperCamelCase : Union[str, Any] = fs_config.mask_channel_length
UpperCamelCase : int = fs_config.mask_channel_prob
UpperCamelCase : Union[str, Any] = fs_config.mask_length
UpperCamelCase : Union[str, Any] = fs_config.mask_prob
UpperCamelCase : Union[str, Any] = "Wav2Vec2FeatureExtractor"
UpperCamelCase : Optional[int] = "Wav2Vec2CTCTokenizer"
return config
@torch.no_grad()
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=True ) -> Optional[Any]:
if is_finetuned:
UpperCamelCase , UpperCamelCase , UpperCamelCase : Dict = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
UpperCamelCase , UpperCamelCase , UpperCamelCase : Optional[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
if config_path is not None:
UpperCamelCase : Dict = SEWConfig.from_pretrained(_lowerCAmelCase )
else:
UpperCamelCase : List[str] = convert_config(model[0] , _lowerCAmelCase )
UpperCamelCase : int = model[0].eval()
UpperCamelCase : Union[str, Any] = True if config.feat_extract_norm == "layer" else False
UpperCamelCase : Union[str, Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , )
if is_finetuned:
if dict_path:
UpperCamelCase : int = Dictionary.load(_lowerCAmelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
UpperCamelCase : List[str] = target_dict.pad_index
UpperCamelCase : List[str] = target_dict.bos_index
UpperCamelCase : List[Any] = target_dict.pad_index
UpperCamelCase : Optional[int] = target_dict.bos_index
UpperCamelCase : List[Any] = target_dict.eos_index
UpperCamelCase : Optional[int] = len(target_dict.symbols )
UpperCamelCase : Dict = os.path.join(_lowerCAmelCase , "vocab.json" )
if not os.path.isdir(_lowerCAmelCase ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(_lowerCAmelCase ) )
return
os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase )
with open(_lowerCAmelCase , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(target_dict.indices , _lowerCAmelCase )
UpperCamelCase : str = WavaVecaCTCTokenizer(
_lowerCAmelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=_lowerCAmelCase , )
UpperCamelCase : Union[str, Any] = WavaVecaProcessor(feature_extractor=_lowerCAmelCase , tokenizer=_lowerCAmelCase )
processor.save_pretrained(_lowerCAmelCase )
UpperCamelCase : Dict = SEWForCTC(_lowerCAmelCase )
else:
UpperCamelCase : Optional[int] = SEWModel(_lowerCAmelCase )
feature_extractor.save_pretrained(_lowerCAmelCase )
recursively_load_weights(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
hf_model.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
__lowerCamelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--is_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
__lowerCamelCase : Tuple = parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
)
| 52 |
"""simple docstring"""
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
lowercase__ = (CMStochasticIterativeScheduler,)
lowercase__ = 10
def _UpperCAmelCase ( self : Optional[int] , **lowerCAmelCase_ : str):
"""simple docstring"""
lowercase_ = {
"""num_train_timesteps""": 2_0_1,
"""sigma_min""": 0.002,
"""sigma_max""": 80.0,
}
config.update(**lowerCAmelCase_)
return config
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
lowercase_ = 1_0
lowercase_ = self.get_scheduler_config()
lowercase_ = self.scheduler_classes[0](**lowerCAmelCase_)
scheduler.set_timesteps(lowerCAmelCase_)
lowercase_ = scheduler.timesteps[0]
lowercase_ = scheduler.timesteps[1]
lowercase_ = self.dummy_sample
lowercase_ = 0.1 * sample
lowercase_ = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_).prev_sample
lowercase_ = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase_)
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=lowerCAmelCase_)
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
lowercase_ = self.scheduler_classes[0]
lowercase_ = self.get_scheduler_config()
lowercase_ = scheduler_class(**lowerCAmelCase_)
lowercase_ = 1
scheduler.set_timesteps(lowerCAmelCase_)
lowercase_ = scheduler.timesteps
lowercase_ = torch.manual_seed(0)
lowercase_ = self.dummy_model()
lowercase_ = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(lowerCAmelCase_):
# 1. scale model input
lowercase_ = scheduler.scale_model_input(lowerCAmelCase_ , lowerCAmelCase_)
# 2. predict noise residual
lowercase_ = model(lowerCAmelCase_ , lowerCAmelCase_)
# 3. predict previous sample x_t-1
lowercase_ = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , generator=lowerCAmelCase_).prev_sample
lowercase_ = pred_prev_sample
lowercase_ = torch.sum(torch.abs(lowerCAmelCase_))
lowercase_ = torch.mean(torch.abs(lowerCAmelCase_))
assert abs(result_sum.item() - 192.7_614) < 1E-2
assert abs(result_mean.item() - 0.2_510) < 1E-3
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
lowercase_ = self.scheduler_classes[0]
lowercase_ = self.get_scheduler_config()
lowercase_ = scheduler_class(**lowerCAmelCase_)
lowercase_ = [1_0_6, 0]
scheduler.set_timesteps(timesteps=lowerCAmelCase_)
lowercase_ = scheduler.timesteps
lowercase_ = torch.manual_seed(0)
lowercase_ = self.dummy_model()
lowercase_ = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
lowercase_ = scheduler.scale_model_input(lowerCAmelCase_ , lowerCAmelCase_)
# 2. predict noise residual
lowercase_ = model(lowerCAmelCase_ , lowerCAmelCase_)
# 3. predict previous sample x_t-1
lowercase_ = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , generator=lowerCAmelCase_).prev_sample
lowercase_ = pred_prev_sample
lowercase_ = torch.sum(torch.abs(lowerCAmelCase_))
lowercase_ = torch.mean(torch.abs(lowerCAmelCase_))
assert abs(result_sum.item() - 347.6_357) < 1E-2
assert abs(result_mean.item() - 0.4_527) < 1E-3
def _UpperCAmelCase ( self : Union[str, Any]):
"""simple docstring"""
lowercase_ = self.scheduler_classes[0]
lowercase_ = self.get_scheduler_config()
lowercase_ = scheduler_class(**lowerCAmelCase_)
lowercase_ = [3_9, 3_0, 1_2, 1_5, 0]
with self.assertRaises(lowerCAmelCase_ , msg="""`timesteps` must be in descending order."""):
scheduler.set_timesteps(timesteps=lowerCAmelCase_)
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
lowercase_ = self.scheduler_classes[0]
lowercase_ = self.get_scheduler_config()
lowercase_ = scheduler_class(**lowerCAmelCase_)
lowercase_ = [3_9, 3_0, 1_2, 1, 0]
lowercase_ = len(lowerCAmelCase_)
with self.assertRaises(lowerCAmelCase_ , msg="""Can only pass one of `num_inference_steps` or `timesteps`."""):
scheduler.set_timesteps(num_inference_steps=lowerCAmelCase_ , timesteps=lowerCAmelCase_)
def _UpperCAmelCase ( self : int):
"""simple docstring"""
lowercase_ = self.scheduler_classes[0]
lowercase_ = self.get_scheduler_config()
lowercase_ = scheduler_class(**lowerCAmelCase_)
lowercase_ = [scheduler.config.num_train_timesteps]
with self.assertRaises(
lowerCAmelCase_ , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ):
scheduler.set_timesteps(timesteps=lowerCAmelCase_)
| 136 | 0 |
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class lowercase ( UpperCamelCase__ ):
_a = 4_2
class lowercase ( UpperCamelCase__,UpperCamelCase__ ):
@register_to_config
def __init__( self , _a = 32 , _a = 64 , _a = 20 , _a = 768 , _a=77 , _a=4 , _a = 0.0 , _a = "silu" , _a = None , _a = None , _a = "linear" , _a = "prd" , _a = None , _a = None , _a = None , ) -> Any:
super().__init__()
_A : int = num_attention_heads
_A : Union[str, Any] = attention_head_dim
_A : Tuple = num_attention_heads * attention_head_dim
_A : Any = additional_embeddings
_A : Any = time_embed_dim or inner_dim
_A : List[str] = embedding_proj_dim or embedding_dim
_A : Optional[int] = clip_embed_dim or embedding_dim
_A : Union[str, Any] = Timesteps(_a , _a , 0 )
_A : str = TimestepEmbedding(_a , _a , out_dim=_a , act_fn=_a )
_A : Dict = nn.Linear(_a , _a )
if embedding_proj_norm_type is None:
_A : int = None
elif embedding_proj_norm_type == "layer":
_A : Optional[Any] = nn.LayerNorm(_a )
else:
raise ValueError(F'''unsupported embedding_proj_norm_type: {embedding_proj_norm_type}''' )
_A : Optional[Any] = nn.Linear(_a , _a )
if encoder_hid_proj_type is None:
_A : Union[str, Any] = None
elif encoder_hid_proj_type == "linear":
_A : Tuple = nn.Linear(_a , _a )
else:
raise ValueError(F'''unsupported encoder_hid_proj_type: {encoder_hid_proj_type}''' )
_A : List[str] = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , _a ) )
if added_emb_type == "prd":
_A : str = nn.Parameter(torch.zeros(1 , 1 , _a ) )
elif added_emb_type is None:
_A : Union[str, Any] = None
else:
raise ValueError(
F'''`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `\'prd\'` or `None`.''' )
_A : int = nn.ModuleList(
[
BasicTransformerBlock(
_a , _a , _a , dropout=_a , activation_fn="""gelu""" , attention_bias=_a , )
for d in range(_a )
] )
if norm_in_type == "layer":
_A : Union[str, Any] = nn.LayerNorm(_a )
elif norm_in_type is None:
_A : Tuple = None
else:
raise ValueError(F'''Unsupported norm_in_type: {norm_in_type}.''' )
_A : int = nn.LayerNorm(_a )
_A : str = nn.Linear(_a , _a )
_A : Any = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -10000.0 )
causal_attention_mask.triu_(1 )
_A : Optional[int] = causal_attention_mask[None, ...]
self.register_buffer("""causal_attention_mask""" , _a , persistent=_a )
_A : Tuple = nn.Parameter(torch.zeros(1 , _a ) )
_A : Dict = nn.Parameter(torch.zeros(1 , _a ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def a__ ( self ) -> Dict[str, AttentionProcessor]:
_A : List[str] = {}
def fn_recursive_add_processors(_a , _a , _a ):
if hasattr(_a , """set_processor""" ):
_A : Tuple = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F'''{name}.{sub_name}''' , _a , _a )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(_a , _a , _a )
return processors
def a__ ( self , _a ) -> List[str]:
_A : Optional[int] = len(self.attn_processors.keys() )
if isinstance(_a , _a ) and len(_a ) != count:
raise ValueError(
F'''A dict of processors was passed, but the number of processors {len(_a )} does not match the'''
F''' number of attention layers: {count}. Please make sure to pass {count} processor classes.''' )
def fn_recursive_attn_processor(_a , _a , _a ):
if hasattr(_a , """set_processor""" ):
if not isinstance(_a , _a ):
module.set_processor(_a )
else:
module.set_processor(processor.pop(F'''{name}.processor''' ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F'''{name}.{sub_name}''' , _a , _a )
for name, module in self.named_children():
fn_recursive_attn_processor(_a , _a , _a )
def a__ ( self ) -> Union[str, Any]:
self.set_attn_processor(AttnProcessor() )
def a__ ( self , _a , _a , _a , _a = None , _a = None , _a = True , ) -> Optional[Any]:
_A : Tuple = hidden_states.shape[0]
_A : List[Any] = timestep
if not torch.is_tensor(_a ):
_A : Dict = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(_a ) and len(timesteps.shape ) == 0:
_A : Tuple = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
_A : Optional[int] = timesteps * torch.ones(_a , dtype=timesteps.dtype , device=timesteps.device )
_A : Dict = self.time_proj(_a )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
_A : Tuple = timesteps_projected.to(dtype=self.dtype )
_A : List[Any] = self.time_embedding(_a )
if self.embedding_proj_norm is not None:
_A : Dict = self.embedding_proj_norm(_a )
_A : List[Any] = self.embedding_proj(_a )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
_A : List[Any] = self.encoder_hidden_states_proj(_a )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError("""`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set""" )
_A : Optional[int] = self.proj_in(_a )
_A : Optional[int] = self.positional_embedding.to(hidden_states.dtype )
_A : Union[str, Any] = []
_A : List[str] = 0
if encoder_hidden_states is not None:
additional_embeds.append(_a )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
_A : List[str] = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
_A : List[str] = hidden_states[:, None, :]
_A : Dict = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
_A : Optional[int] = self.prd_embedding.to(hidden_states.dtype ).expand(_a , -1 , -1 )
additional_embeds.append(_a )
_A : str = torch.cat(
_a , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
_A : Dict = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
_A : Union[str, Any] = F.pad(
_a , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
_A : Optional[Any] = hidden_states + positional_embeddings
if attention_mask is not None:
_A : Optional[Any] = (1 - attention_mask.to(hidden_states.dtype )) * -10000.0
_A : List[Any] = F.pad(_a , (0, self.additional_embeddings) , value=0.0 )
_A : Optional[Any] = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
_A : int = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
_A : str = self.norm_in(_a )
for block in self.transformer_blocks:
_A : List[Any] = block(_a , attention_mask=_a )
_A : Any = self.norm_out(_a )
if self.prd_embedding is not None:
_A : int = hidden_states[:, -1]
else:
_A : Any = hidden_states[:, additional_embeddings_len:]
_A : Union[str, Any] = self.proj_to_clip_embeddings(_a )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=_a )
def a__ ( self , _a ) -> Tuple:
_A : List[Any] = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 368 |
from __future__ import annotations
from decimal import Decimal
from numpy import array
def lowerCAmelCase_ ( snake_case_ ):
_A : Tuple = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(snake_case_ ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
_A : List[Any] = float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError("""This matrix has no inverse.""" )
# Creates a copy of the matrix with swapped positions of the elements
_A : Tuple = [[0.0, 0.0], [0.0, 0.0]]
_A , _A : List[str] = matrix[1][1], matrix[0][0]
_A , _A : List[str] = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(snake_case_ ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(snake_case_ ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
_A : List[str] = float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError("""This matrix has no inverse.""" )
# Creating cofactor matrix
_A : List[Any] = [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
_A : Union[str, Any] = (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
_A : Optional[Any] = -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
_A : Any = (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
_A : List[Any] = -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
_A : int = (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
_A : Union[str, Any] = -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
_A : Any = (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
_A : List[str] = -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
_A : Optional[int] = (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
_A : List[Any] = array(snake_case_ )
for i in range(3 ):
for j in range(3 ):
_A : List[str] = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
_A : Union[str, Any] = array(snake_case_ )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(snake_case_ )
# Calculate the inverse of the matrix
return [[float(d(snake_case_ ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError("""Please provide a matrix of size 2x2 or 3x3.""" )
| 343 | 0 |
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self : Any , UpperCamelCase__ : Dict , UpperCamelCase__ : Any=13 , UpperCamelCase__ : str=7 , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : Tuple=False , UpperCamelCase__ : int=True , UpperCamelCase__ : int=99 , UpperCamelCase__ : List[Any]=32 , UpperCamelCase__ : Tuple=5 , UpperCamelCase__ : Optional[int]=4 , UpperCamelCase__ : str=37 , UpperCamelCase__ : Optional[Any]="gelu" , UpperCamelCase__ : Union[str, Any]=0.1 , UpperCamelCase__ : Union[str, Any]=0.1 , UpperCamelCase__ : str=512 , UpperCamelCase__ : Optional[Any]=16 , UpperCamelCase__ : str=2 , UpperCamelCase__ : Optional[int]=0.02 , UpperCamelCase__ : int=3 , UpperCamelCase__ : Optional[int]=4 , UpperCamelCase__ : str=None , ) -> Union[str, Any]:
"""simple docstring"""
__magic_name__ = parent
__magic_name__ = batch_size
__magic_name__ = seq_length
__magic_name__ = is_training
__magic_name__ = use_input_mask
__magic_name__ = use_token_type_ids
__magic_name__ = use_labels
__magic_name__ = vocab_size
__magic_name__ = hidden_size
__magic_name__ = num_hidden_layers
__magic_name__ = num_attention_heads
__magic_name__ = intermediate_size
__magic_name__ = hidden_act
__magic_name__ = hidden_dropout_prob
__magic_name__ = attention_probs_dropout_prob
__magic_name__ = max_position_embeddings
__magic_name__ = type_vocab_size
__magic_name__ = type_sequence_label_size
__magic_name__ = initializer_range
__magic_name__ = num_labels
__magic_name__ = num_choices
__magic_name__ = scope
def _lowercase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ = None
if self.use_input_mask:
__magic_name__ = random_attention_mask([self.batch_size, self.seq_length] )
__magic_name__ = None
if self.use_token_type_ids:
__magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__magic_name__ = None
__magic_name__ = None
__magic_name__ = None
if self.use_labels:
__magic_name__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__magic_name__ = ids_tensor([self.batch_size] , self.num_choices )
__magic_name__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowercase ( self : List[str] ) -> str:
"""simple docstring"""
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , use_stable_embedding=UpperCamelCase__ , )
def _lowercase ( self : str , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[Any] ) -> str:
"""simple docstring"""
__magic_name__ = OpenLlamaModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__magic_name__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ )
__magic_name__ = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : Dict , UpperCamelCase__ : str , UpperCamelCase__ : int , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] , ) -> Optional[Any]:
"""simple docstring"""
__magic_name__ = True
__magic_name__ = OpenLlamaModel(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__magic_name__ = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , )
__magic_name__ = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , )
__magic_name__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : Any , UpperCamelCase__ : Tuple , ) -> Dict:
"""simple docstring"""
__magic_name__ = OpenLlamaForCausalLM(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__magic_name__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : str , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str] , ) -> Optional[Any]:
"""simple docstring"""
__magic_name__ = True
__magic_name__ = True
__magic_name__ = OpenLlamaForCausalLM(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
# first forward pass
__magic_name__ = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , use_cache=UpperCamelCase__ , )
__magic_name__ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__magic_name__ = ids_tensor((self.batch_size, 3) , config.vocab_size )
__magic_name__ = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__magic_name__ = torch.cat([input_ids, next_tokens] , dim=-1 )
__magic_name__ = torch.cat([input_mask, next_mask] , dim=-1 )
__magic_name__ = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , output_hidden_states=UpperCamelCase__ , )["""hidden_states"""][0]
__magic_name__ = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , past_key_values=UpperCamelCase__ , output_hidden_states=UpperCamelCase__ , )["""hidden_states"""][0]
# select random slice
__magic_name__ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__magic_name__ = output_from_no_past[:, -3:, random_slice_idx].detach()
__magic_name__ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1E-3 ) )
def _lowercase ( self : Optional[Any] ) -> int:
"""simple docstring"""
__magic_name__ = self.prepare_config_and_inputs()
(
(
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) ,
) = config_and_inputs
__magic_name__ = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _A , _A , _A , unittest.TestCase ):
'''simple docstring'''
a__ = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
a__ = (OpenLlamaForCausalLM,) if is_torch_available() else ()
a__ = (
{
"""feature-extraction""": OpenLlamaModel,
"""text-classification""": OpenLlamaForSequenceClassification,
"""text-generation""": OpenLlamaForCausalLM,
"""zero-shot""": OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
a__ = False
a__ = False
def _lowercase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__magic_name__ = OpenLlamaModelTester(self )
__magic_name__ = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 )
def _lowercase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def _lowercase ( self : str ) -> List[str]:
"""simple docstring"""
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def _lowercase ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__magic_name__ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__magic_name__ = type
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def _lowercase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ = 3
__magic_name__ = input_dict["""input_ids"""]
__magic_name__ = input_ids.ne(1 ).to(UpperCamelCase__ )
__magic_name__ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__magic_name__ = OpenLlamaForSequenceClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__magic_name__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _lowercase ( self : List[str] ) -> Tuple:
"""simple docstring"""
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ = 3
__magic_name__ = """single_label_classification"""
__magic_name__ = input_dict["""input_ids"""]
__magic_name__ = input_ids.ne(1 ).to(UpperCamelCase__ )
__magic_name__ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__magic_name__ = OpenLlamaForSequenceClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__magic_name__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _lowercase ( self : str ) -> Dict:
"""simple docstring"""
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ = 3
__magic_name__ = """multi_label_classification"""
__magic_name__ = input_dict["""input_ids"""]
__magic_name__ = input_ids.ne(1 ).to(UpperCamelCase__ )
__magic_name__ = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
__magic_name__ = OpenLlamaForSequenceClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__magic_name__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("""Open-Llama buffers include complex numbers, which breaks this test""" )
def _lowercase ( self : str ) -> int:
"""simple docstring"""
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def _lowercase ( self : Tuple , UpperCamelCase__ : List[Any] ) -> str:
"""simple docstring"""
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ = ids_tensor([1, 10] , config.vocab_size )
__magic_name__ = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__magic_name__ = OpenLlamaModel(UpperCamelCase__ )
original_model.to(UpperCamelCase__ )
original_model.eval()
__magic_name__ = original_model(UpperCamelCase__ ).last_hidden_state
__magic_name__ = original_model(UpperCamelCase__ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__magic_name__ = {"""type""": scaling_type, """factor""": 10.0}
__magic_name__ = OpenLlamaModel(UpperCamelCase__ )
scaled_model.to(UpperCamelCase__ )
scaled_model.eval()
__magic_name__ = scaled_model(UpperCamelCase__ ).last_hidden_state
__magic_name__ = scaled_model(UpperCamelCase__ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1E-5 ) )
| 88 | '''simple docstring'''
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def lowerCAmelCase_ ( snake_case_ : Dict , snake_case_ : Union[str, Any] , snake_case_ : Optional[Any] , snake_case_ : List[Any] ) -> List[Any]:
'''simple docstring'''
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), f"""Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})"""
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), f"""Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})"""
def lowerCAmelCase_ ( snake_case_ : Any , snake_case_ : Tuple , snake_case_ : Any , snake_case_ : List[Any] , snake_case_ : str=True ) -> Optional[Any]:
'''simple docstring'''
model.train()
UpperCAmelCase_ = model(snake_case_ )
UpperCAmelCase_ = F.mse_loss(snake_case_ , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(snake_case_ )
def lowerCAmelCase_ ( snake_case_ : Optional[Any] , snake_case_ : Any=False ) -> Dict:
'''simple docstring'''
set_seed(42 )
UpperCAmelCase_ = RegressionModel()
UpperCAmelCase_ = deepcopy(snake_case_ )
UpperCAmelCase_ = RegressionDataset(length=80 )
UpperCAmelCase_ = DataLoader(snake_case_ , batch_size=16 )
model.to(accelerator.device )
if sched:
UpperCAmelCase_ = AdamW(params=model.parameters() , lr=1E-3 )
UpperCAmelCase_ = AdamW(params=ddp_model.parameters() , lr=1E-3 )
UpperCAmelCase_ = LambdaLR(snake_case_ , lr_lambda=lambda snake_case_ : epoch**0.65 )
UpperCAmelCase_ = LambdaLR(snake_case_ , lr_lambda=lambda snake_case_ : epoch**0.65 )
# Make a copy of `model`
if sched:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = accelerator.prepare(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
else:
UpperCAmelCase_ , UpperCAmelCase_ = accelerator.prepare(snake_case_ , snake_case_ )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def lowerCAmelCase_ ( snake_case_ : Any ) -> int:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = get_training_setup(snake_case_ )
# Use a single batch
UpperCAmelCase_ , UpperCAmelCase_ = next(iter(snake_case_ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
UpperCAmelCase_ , UpperCAmelCase_ = accelerator.gather((ddp_input, ddp_target) )
UpperCAmelCase_ , UpperCAmelCase_ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(snake_case_ ):
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
else:
# Sync grads
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), f"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
UpperCAmelCase_ = ddp_input[torch.randperm(len(snake_case_ ) )]
def lowerCAmelCase_ ( snake_case_ : Tuple ) -> str:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = get_training_setup(snake_case_ )
# Use a single batch
UpperCAmelCase_ , UpperCAmelCase_ = next(iter(snake_case_ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
UpperCAmelCase_ , UpperCAmelCase_ = accelerator.gather((ddp_input, ddp_target) )
UpperCAmelCase_ , UpperCAmelCase_ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(snake_case_ ):
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
else:
# Sync grads
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f"""Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"""
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
UpperCAmelCase_ = ddp_input[torch.randperm(len(snake_case_ ) )]
def lowerCAmelCase_ ( snake_case_ : Optional[int]=False , snake_case_ : str=False ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = Accelerator(
split_batches=snake_case_ , dispatch_batches=snake_case_ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = get_training_setup(snake_case_ )
for iteration, batch in enumerate(snake_case_ ):
UpperCAmelCase_ , UpperCAmelCase_ = batch.values()
# Gather the distributed inputs and targs for the base model
UpperCAmelCase_ , UpperCAmelCase_ = accelerator.gather((ddp_input, ddp_target) )
UpperCAmelCase_ , UpperCAmelCase_ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(snake_case_ ):
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(snake_case_ ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f"""Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f"""Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
UpperCAmelCase_ = ddp_input[torch.randperm(len(snake_case_ ) )]
GradientState._reset_state()
def lowerCAmelCase_ ( snake_case_ : Optional[Any]=False , snake_case_ : Tuple=False ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = Accelerator(
split_batches=snake_case_ , dispatch_batches=snake_case_ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = get_training_setup(snake_case_ , snake_case_ )
for iteration, batch in enumerate(snake_case_ ):
UpperCAmelCase_ , UpperCAmelCase_ = batch.values()
# Gather the distributed inputs and targs for the base model
UpperCAmelCase_ , UpperCAmelCase_ = accelerator.gather((ddp_input, ddp_target) )
UpperCAmelCase_ , UpperCAmelCase_ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(snake_case_ )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(snake_case_ ):
step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), f"""Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n"""
UpperCAmelCase_ = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(snake_case_ ))
if accelerator.num_processes > 1:
check_model_parameters(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
GradientState._reset_state()
def lowerCAmelCase_ ( ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = Accelerator()
UpperCAmelCase_ = RegressionDataset(length=80 )
UpperCAmelCase_ = DataLoader(snake_case_ , batch_size=16 )
UpperCAmelCase_ = RegressionDataset(length=96 )
UpperCAmelCase_ = DataLoader(snake_case_ , batch_size=16 )
UpperCAmelCase_ , UpperCAmelCase_ = accelerator.prepare(snake_case_ , snake_case_ )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(snake_case_ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(snake_case_ )
if iteration < len(snake_case_ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(snake_case_ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(snake_case_ )
if batch_num < len(snake_case_ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def lowerCAmelCase_ ( ) -> str:
'''simple docstring'''
UpperCAmelCase_ = Accelerator()
UpperCAmelCase_ = accelerator.state
if state.local_process_index == 0:
print("**Test `accumulate` gradient accumulation with dataloader break**" )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("**Test NOOP `no_sync` context manager**" )
test_noop_sync(snake_case_ )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("**Test Distributed `no_sync` context manager**" )
test_distributed_sync(snake_case_ )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation, " , f"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" , )
test_gradient_accumulation(snake_case_ , snake_case_ )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("<" , "2.0" ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , "`split_batches=False`, `dispatch_batches=False`**" , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , f"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" , )
test_gradient_accumulation_with_opt_and_scheduler(snake_case_ , snake_case_ )
def lowerCAmelCase_ ( snake_case_ : Dict ) -> int:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 1 | 0 |
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase ( self : str ) -> Dict:
"""simple docstring"""
super().tearDown()
gc.collect()
def lowerCAmelCase ( self : int ) -> Any:
"""simple docstring"""
__lowercase : str = FlaxStableDiffusionPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2""" , revision="""bf16""" , dtype=jnp.bfloataa , )
__lowercase : Union[str, Any] = """A painting of a squirrel eating a burger"""
__lowercase : Dict = jax.device_count()
__lowercase : int = num_samples * [prompt]
__lowercase : List[str] = sd_pipe.prepare_inputs(_lowerCAmelCase )
__lowercase : Union[str, Any] = replicate(_lowerCAmelCase )
__lowercase : Optional[Any] = shard(_lowerCAmelCase )
__lowercase : List[str] = jax.random.PRNGKey(0 )
__lowercase : int = jax.random.split(_lowerCAmelCase , jax.device_count() )
__lowercase : Optional[Any] = sd_pipe(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , num_inference_steps=25 , jit=_lowerCAmelCase )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
__lowercase : List[str] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__lowercase : Optional[int] = images[0, 253:256, 253:256, -1]
__lowercase : Union[str, Any] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__lowercase : Optional[Any] = jnp.array([0.4238, 0.4414, 0.4395, 0.4453, 0.4629, 0.4590, 0.4531, 0.45508, 0.4512] )
print(F"output_slice: {output_slice}" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def lowerCAmelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
__lowercase : Tuple = """stabilityai/stable-diffusion-2"""
__lowercase : Optional[int] = FlaxDPMSolverMultistepScheduler.from_pretrained(_lowerCAmelCase , subfolder="""scheduler""" )
__lowercase : Union[str, Any] = FlaxStableDiffusionPipeline.from_pretrained(
_lowerCAmelCase , scheduler=_lowerCAmelCase , revision="""bf16""" , dtype=jnp.bfloataa , )
__lowercase : Optional[Any] = scheduler_params
__lowercase : List[str] = """A painting of a squirrel eating a burger"""
__lowercase : Dict = jax.device_count()
__lowercase : Union[str, Any] = num_samples * [prompt]
__lowercase : Any = sd_pipe.prepare_inputs(_lowerCAmelCase )
__lowercase : List[str] = replicate(_lowerCAmelCase )
__lowercase : Dict = shard(_lowerCAmelCase )
__lowercase : Optional[int] = jax.random.PRNGKey(0 )
__lowercase : List[str] = jax.random.split(_lowerCAmelCase , jax.device_count() )
__lowercase : Tuple = sd_pipe(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , num_inference_steps=25 , jit=_lowerCAmelCase )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
__lowercase : List[Any] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__lowercase : Optional[Any] = images[0, 253:256, 253:256, -1]
__lowercase : Any = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__lowercase : Dict = jnp.array([0.4336, 0.42969, 0.4453, 0.4199, 0.4297, 0.4531, 0.4434, 0.4434, 0.4297] )
print(F"output_slice: {output_slice}" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2 | 370 |
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
lowerCamelCase : str = trt.Logger(trt.Logger.WARNING)
lowerCamelCase : Any = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
lowerCamelCase : Optional[Any] = logging.getLogger(__name__)
lowerCamelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--onnx_model_path''',
default=None,
type=str,
required=True,
help='''Path to ONNX model: ''',
)
parser.add_argument(
'''--output_dir''',
default=None,
type=str,
required=True,
help='''The output directory where the model checkpoints and predictions will be written.''',
)
# Other parameters
parser.add_argument(
'''--tokenizer_name''',
default='''''',
type=str,
required=True,
help='''Pretrained tokenizer name or path if not the same as model_name''',
)
parser.add_argument(
'''--version_2_with_negative''',
action='''store_true''',
help='''If true, the SQuAD examples contain some that do not have an answer.''',
)
parser.add_argument(
'''--null_score_diff_threshold''',
type=float,
default=0.0,
help='''If null_score - best_non_null is greater than the threshold predict null.''',
)
parser.add_argument(
'''--max_seq_length''',
default=3_84,
type=int,
help=(
'''The maximum total input sequence length after WordPiece tokenization. Sequences '''
'''longer than this will be truncated, and sequences shorter than this will be padded.'''
),
)
parser.add_argument(
'''--doc_stride''',
default=1_28,
type=int,
help='''When splitting up a long document into chunks, how much stride to take between chunks.''',
)
parser.add_argument('''--per_device_eval_batch_size''', default=8, type=int, help='''Batch size per GPU/CPU for evaluation.''')
parser.add_argument(
'''--n_best_size''',
default=20,
type=int,
help='''The total number of n-best predictions to generate in the nbest_predictions.json output file.''',
)
parser.add_argument(
'''--max_answer_length''',
default=30,
type=int,
help=(
'''The maximum length of an answer that can be generated. This is needed because the start '''
'''and end predictions are not conditioned on one another.'''
),
)
parser.add_argument('''--seed''', type=int, default=42, help='''random seed for initialization''')
parser.add_argument(
'''--dataset_name''',
type=str,
default=None,
required=True,
help='''The name of the dataset to use (via the datasets library).''',
)
parser.add_argument(
'''--dataset_config_name''',
type=str,
default=None,
help='''The configuration name of the dataset to use (via the datasets library).''',
)
parser.add_argument(
'''--preprocessing_num_workers''', type=int, default=4, help='''A csv or a json file containing the training data.'''
)
parser.add_argument('''--overwrite_cache''', action='''store_true''', help='''Overwrite the cached training and evaluation sets''')
parser.add_argument(
'''--fp16''',
action='''store_true''',
help='''Whether to use 16-bit (mixed) precision instead of 32-bit''',
)
parser.add_argument(
'''--int8''',
action='''store_true''',
help='''Whether to use INT8''',
)
lowerCamelCase : Dict = parser.parse_args()
if args.tokenizer_name:
lowerCamelCase : str = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
'''You are instantiating a new tokenizer from scratch. This is not supported by this script.'''
'''You can do it from another script, save it, and load it from here, using --tokenizer_name.'''
)
logger.info('''Training/evaluation parameters %s''', args)
lowerCamelCase : List[str] = args.per_device_eval_batch_size
lowerCamelCase : Any = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
lowerCamelCase : List[str] = True
lowerCamelCase : List[Any] = '''temp_engine/bert-fp32.engine'''
if args.fpaa:
lowerCamelCase : Optional[Any] = '''temp_engine/bert-fp16.engine'''
if args.inta:
lowerCamelCase : int = '''temp_engine/bert-int8.engine'''
# import ONNX file
if not os.path.exists('''temp_engine'''):
os.makedirs('''temp_engine''')
lowerCamelCase : int = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, '''rb''') as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
lowerCamelCase : Union[str, Any] = [network.get_input(i) for i in range(network.num_inputs)]
lowerCamelCase : Dict = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
lowerCamelCase : List[str] = 1 << 50
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
lowerCamelCase : Optional[int] = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
lowerCamelCase : Optional[Any] = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, '''wb''') as f:
f.write(engine.serialize())
def snake_case_ ( lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : str , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Tuple ):
__lowercase : List[str] = np.asarray(inputs["""input_ids"""] , dtype=np.intaa )
__lowercase : Union[str, Any] = np.asarray(inputs["""attention_mask"""] , dtype=np.intaa )
__lowercase : int = np.asarray(inputs["""token_type_ids"""] , dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , lowerCAmelCase_ )
cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , lowerCAmelCase_ )
cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , lowerCAmelCase_ )
# start time
__lowercase : Optional[Any] = time.time()
# Run inference
context.execute_async(
bindings=[int(lowerCAmelCase_ ) for d_inp in d_inputs] + [int(lowerCAmelCase_ ), int(lowerCAmelCase_ )] , stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
cuda.memcpy_dtoh_async(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# Synchronize the stream and take time
stream.synchronize()
# end time
__lowercase : int = time.time()
__lowercase : Union[str, Any] = end_time - start_time
__lowercase : Any = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
lowerCamelCase : Tuple = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''',
datefmt='''%m/%d/%Y %H:%M:%S''',
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
lowerCamelCase : List[Any] = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError('''Evaluation requires a dataset name''')
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
lowerCamelCase : Optional[Any] = raw_datasets['''validation'''].column_names
lowerCamelCase : Union[str, Any] = '''question''' if '''question''' in column_names else column_names[0]
lowerCamelCase : str = '''context''' if '''context''' in column_names else column_names[1]
lowerCamelCase : Dict = '''answers''' if '''answers''' in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
lowerCamelCase : Dict = tokenizer.padding_side == '''right'''
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f'''The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the'''
f'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.'''
)
lowerCamelCase : Tuple = min(args.max_seq_length, tokenizer.model_max_length)
def snake_case_ ( lowerCAmelCase_ : int ):
# Some of the questions have lots of whitespace on the left, which is not useful and will make the
# truncation of the context fail (the tokenized question will take a lots of space). So we remove that
# left whitespace
__lowercase : str = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
__lowercase : List[str] = tokenizer(
examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation="""only_second""" if pad_on_right else """only_first""" , max_length=lowerCAmelCase_ , stride=args.doc_stride , return_overflowing_tokens=lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , padding="""max_length""" , )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
__lowercase : List[str] = tokenized_examples.pop("""overflow_to_sample_mapping""" )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
__lowercase : Any = []
for i in range(len(tokenized_examples["""input_ids"""] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
__lowercase : Dict = tokenized_examples.sequence_ids(lowerCAmelCase_ )
__lowercase : List[Any] = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
__lowercase : List[str] = sample_mapping[i]
tokenized_examples["example_id"].append(examples["""id"""][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
__lowercase : Dict = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples["""offset_mapping"""][i] )
]
return tokenized_examples
lowerCamelCase : Tuple = raw_datasets['''validation''']
# Validation Feature Creation
lowerCamelCase : Optional[int] = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc='''Running tokenizer on validation dataset''',
)
lowerCamelCase : Union[str, Any] = default_data_collator
lowerCamelCase : Optional[Any] = eval_dataset.remove_columns(['''example_id''', '''offset_mapping'''])
lowerCamelCase : List[str] = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def snake_case_ ( lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Dict="eval" ):
# Post-processing: we match the start logits and end logits to answers in the original context.
__lowercase : int = postprocess_qa_predictions(
examples=lowerCAmelCase_ , features=lowerCAmelCase_ , predictions=lowerCAmelCase_ , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=lowerCAmelCase_ , )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
__lowercase : Optional[int] = [
{"""id""": k, """prediction_text""": v, """no_answer_probability""": 0.0} for k, v in predictions.items()
]
else:
__lowercase : List[Any] = [{"""id""": k, """prediction_text""": v} for k, v in predictions.items()]
__lowercase : Optional[int] = [{"""id""": ex["""id"""], """answers""": ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=lowerCAmelCase_ , label_ids=lowerCAmelCase_ )
lowerCamelCase : Dict = load_metric('''squad_v2''' if args.version_2_with_negative else '''squad''')
# Evaluation!
logger.info('''Loading ONNX model %s for evaluation''', args.onnx_model_path)
with open(engine_name, '''rb''') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def snake_case_ ( lowerCAmelCase_ : str ):
return trt.volume(engine.get_binding_shape(lowerCAmelCase_ ) ) * engine.get_binding_dtype(lowerCAmelCase_ ).itemsize
# Allocate device memory for inputs and outputs.
lowerCamelCase : int = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
lowerCamelCase : Dict = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
lowerCamelCase : str = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
lowerCamelCase : Dict = cuda.mem_alloc(h_outputa.nbytes)
lowerCamelCase : Optional[Any] = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
lowerCamelCase : Optional[int] = cuda.Stream()
# Evaluation
logger.info('''***** Running Evaluation *****''')
logger.info(f''' Num examples = {len(eval_dataset)}''')
logger.info(f''' Batch size = {args.per_device_eval_batch_size}''')
lowerCamelCase : int = 0.0
lowerCamelCase : List[str] = 0
lowerCamelCase : List[str] = timeit.default_timer()
lowerCamelCase : List[Any] = None
for step, batch in enumerate(eval_dataloader):
lowerCamelCase ,lowerCamelCase : str = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
lowerCamelCase ,lowerCamelCase : Union[str, Any] = outputs
lowerCamelCase : Optional[Any] = torch.tensor(start_logits)
lowerCamelCase : List[str] = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
lowerCamelCase : Optional[int] = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-1_00)
lowerCamelCase : Dict = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-1_00)
lowerCamelCase : List[Any] = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
lowerCamelCase : Dict = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-1_00)
if all_preds is not None:
lowerCamelCase : Tuple = nested_truncate(all_preds, len(eval_dataset))
lowerCamelCase : Dict = timeit.default_timer() - start_time
logger.info(''' Evaluation done in total %f secs (%f sec per example)''', evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info('''Average Inference Time = {:.3f} ms'''.format(total_time * 10_00 / niter))
logger.info('''Total Inference Time = {:.3f} ms'''.format(total_time * 10_00))
logger.info('''Total Number of Inference = %d''', niter)
lowerCamelCase : str = post_processing_function(eval_examples, eval_dataset, all_preds)
lowerCamelCase : Optional[Any] = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(f'''Evaluation metrics: {eval_metric}''') | 306 | 0 |
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
A : Tuple = logging.get_logger(__name__)
A : Dict = {
'''linear''': get_linear_schedule_with_warmup,
'''cosine''': get_cosine_schedule_with_warmup,
'''cosine_w_restarts''': get_cosine_with_hard_restarts_schedule_with_warmup,
'''polynomial''': get_polynomial_decay_schedule_with_warmup,
'''constant''': get_constant_schedule,
'''constant_w_warmup''': get_constant_schedule_with_warmup,
}
class A (SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : str , __lowerCAmelCase : str=None , __lowerCAmelCase : str=None , *__lowerCAmelCase : List[str] , **__lowerCAmelCase : Optional[int] ) -> Tuple:
"""simple docstring"""
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
if config is None:
assert isinstance(self.model , __lowerCAmelCase ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
f' {self.model.__class__}'
)
A__ = self.model.config
else:
A__ = config
A__ = data_args
A__ = self.config.tgt_vocab_size if isinstance(self.config , __lowerCAmelCase ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
f'The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for'
""" padding..""" )
if self.args.label_smoothing == 0:
A__ = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
A__ = label_smoothed_nll_loss
def a_ ( self : Optional[int] , __lowerCAmelCase : int ) -> List[Any]:
"""simple docstring"""
if self.optimizer is None:
A__ = ["""bias""", """LayerNorm.weight"""]
A__ = [
{
"""params""": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
"""weight_decay""": self.args.weight_decay,
},
{
"""params""": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
"""weight_decay""": 0.0,
},
]
A__ = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
A__ = Adafactor
A__ = {"""scale_parameter""": False, """relative_step""": False}
else:
A__ = AdamW
A__ = {
"""betas""": (self.args.adam_betaa, self.args.adam_betaa),
"""eps""": self.args.adam_epsilon,
}
A__ = self.args.learning_rate
if self.sharded_ddp:
A__ = OSS(
params=__lowerCAmelCase , optim=__lowerCAmelCase , **__lowerCAmelCase , )
else:
A__ = optimizer_cls(__lowerCAmelCase , **__lowerCAmelCase )
if self.lr_scheduler is None:
A__ = self._get_lr_scheduler(__lowerCAmelCase )
else: # ignoring --lr_scheduler
logger.warning("""scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.""" )
def a_ ( self : Dict , __lowerCAmelCase : str ) -> Tuple:
"""simple docstring"""
A__ = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
A__ = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
A__ = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
A__ = schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=__lowerCAmelCase )
return scheduler
def a_ ( self : int ) -> Optional[torch.utils.data.Sampler]:
"""simple docstring"""
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def a_ ( self : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
A__ = model(**__lowerCAmelCase , use_cache=__lowerCAmelCase )[0]
A__ = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
A__ , A__ = model(**__lowerCAmelCase , labels=__lowerCAmelCase , use_cache=__lowerCAmelCase )[:2]
else:
# compute label smoothed loss
A__ = model(**__lowerCAmelCase , use_cache=__lowerCAmelCase )[0]
A__ = torch.nn.functional.log_softmax(__lowerCAmelCase , dim=-1 )
A__ , A__ = self.loss_fn(__lowerCAmelCase , __lowerCAmelCase , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def a_ ( self : Tuple , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : str ) -> List[Any]:
"""simple docstring"""
A__ = inputs.pop("""labels""" )
A__ , A__ = self._compute_loss(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return loss
def a_ ( self : int , __lowerCAmelCase : nn.Module , __lowerCAmelCase : Dict[str, Union[torch.Tensor, Any]] , __lowerCAmelCase : bool , __lowerCAmelCase : Optional[List[str]] = None , ) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:
"""simple docstring"""
A__ = self._prepare_inputs(__lowerCAmelCase )
A__ = {
"""max_length""": self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
"""num_beams""": self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
A__ = self.model.generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , **__lowerCAmelCase , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
A__ = self._pad_tensors_to_max_len(__lowerCAmelCase , gen_kwargs["""max_length"""] )
A__ = inputs.pop("""labels""" )
with torch.no_grad():
# compute loss on predict data
A__ , A__ = self._compute_loss(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
A__ = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
A__ = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
A__ = self._pad_tensors_to_max_len(__lowerCAmelCase , gen_kwargs["""max_length"""] )
return (loss, logits, labels)
def a_ ( self : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] ) -> List[str]:
"""simple docstring"""
A__ = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
"""Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be"""
f' padded to `max_length`={max_length}' )
A__ = pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
A__ = tensor
return padded_tensor
| 274 |
# This code is adapted from OpenAI's release
# https://github.com/openai/human-eval/blob/master/human_eval/execution.py
import contextlib
import faulthandler
import io
import multiprocessing
import os
import platform
import signal
import tempfile
def __lowerCamelCase ( __a :List[str] , __a :List[Any] , __a :Union[str, Any] , __a :List[Any] ) -> Dict:
"""simple docstring"""
A__ = multiprocessing.Manager()
A__ = manager.list()
A__ = multiprocessing.Process(target=__a , args=(check_program, result, timeout) )
p.start()
p.join(timeout=timeout + 1 )
if p.is_alive():
p.kill()
if not result:
result.append("""timed out""" )
return {
"task_id": task_id,
"passed": result[0] == "passed",
"result": result[0],
"completion_id": completion_id,
}
def __lowerCamelCase ( __a :Optional[Any] , __a :Any , __a :List[Any] ) -> Union[str, Any]:
"""simple docstring"""
with create_tempdir():
# These system calls are needed when cleaning up tempdir.
import os
import shutil
A__ = shutil.rmtree
A__ = os.rmdir
A__ = os.chdir
# Disable functionalities that can make destructive changes to the test.
reliability_guard()
# Run program.
try:
A__ = {}
with swallow_io():
with time_limit(__a ):
exec(__a , __a )
result.append("""passed""" )
except TimeoutException:
result.append("""timed out""" )
except BaseException as e:
result.append(F'failed: {e}' )
# Needed for cleaning up.
A__ = rmtree
A__ = rmdir
A__ = chdir
@contextlib.contextmanager
def __lowerCamelCase ( __a :List[str] ) -> Dict:
"""simple docstring"""
def signal_handler(__a :List[Any] , __a :Optional[Any] ):
raise TimeoutException("""Timed out!""" )
signal.setitimer(signal.ITIMER_REAL , __a )
signal.signal(signal.SIGALRM , __a )
try:
yield
finally:
signal.setitimer(signal.ITIMER_REAL , 0 )
@contextlib.contextmanager
def __lowerCamelCase ( ) -> Union[str, Any]:
"""simple docstring"""
A__ = WriteOnlyStringIO()
with contextlib.redirect_stdout(__a ):
with contextlib.redirect_stderr(__a ):
with redirect_stdin(__a ):
yield
@contextlib.contextmanager
def __lowerCamelCase ( ) -> Dict:
"""simple docstring"""
with tempfile.TemporaryDirectory() as dirname:
with chdir(__a ):
yield dirname
class A (SCREAMING_SNAKE_CASE ):
'''simple docstring'''
pass
class A (io.StringIO ):
'''simple docstring'''
def a_ ( self : Any , *__lowerCAmelCase : List[str] , **__lowerCAmelCase : str ) -> Dict:
"""simple docstring"""
raise OSError
def a_ ( self : Optional[Any] , *__lowerCAmelCase : Any , **__lowerCAmelCase : Optional[int] ) -> str:
"""simple docstring"""
raise OSError
def a_ ( self : Optional[Any] , *__lowerCAmelCase : Any , **__lowerCAmelCase : Any ) -> int:
"""simple docstring"""
raise OSError
def a_ ( self : str , *__lowerCAmelCase : Any , **__lowerCAmelCase : Union[str, Any] ) -> int:
"""simple docstring"""
return False
class A (contextlib._RedirectStream ): # type: ignore
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = '''stdin'''
@contextlib.contextmanager
def __lowerCamelCase ( __a :Union[str, Any] ) -> List[str]:
"""simple docstring"""
if root == ".":
yield
return
A__ = os.getcwd()
os.chdir(__a )
try:
yield
except BaseException as exc:
raise exc
finally:
os.chdir(__a )
def __lowerCamelCase ( __a :Union[str, Any]=None ) -> Dict:
"""simple docstring"""
if maximum_memory_bytes is not None:
import resource
resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes) )
resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes) )
if not platform.uname().system == "Darwin":
resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes) )
faulthandler.disable()
import builtins
A__ = None
A__ = None
import os
A__ = """1"""
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
import shutil
A__ = None
A__ = None
A__ = None
import subprocess
A__ = None # type: ignore
A__ = None
import sys
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
| 274 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
lowerCAmelCase : List[Any] = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
lowerCAmelCase : Dict = TaTokenizerFast
lowerCAmelCase : Union[str, Any] = {'configuration_mt5': ['MT5Config', 'MT5OnnxConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : int = [
'MT5EncoderModel',
'MT5ForConditionalGeneration',
'MT5ForQuestionAnswering',
'MT5Model',
'MT5PreTrainedModel',
'MT5Stack',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Tuple = ['TFMT5EncoderModel', 'TFMT5ForConditionalGeneration', 'TFMT5Model']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : List[str] = ['FlaxMT5EncoderModel', 'FlaxMT5ForConditionalGeneration', 'FlaxMT5Model']
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
lowerCAmelCase : Any = _LazyModule(
__name__,
globals()['__file__'],
_import_structure,
extra_objects={'MT5Tokenizer': MTaTokenizer, 'MT5TokenizerFast': MTaTokenizerFast},
module_spec=__spec__,
)
| 251 |
'''simple docstring'''
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
lowerCAmelCase : str = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class SCREAMING_SNAKE_CASE__ ( nn.Module):
def __init__( self , A_ )-> int:
'''simple docstring'''
super().__init__()
UpperCamelCase = torchvision.models.resnetaaa(pretrained=A_ )
UpperCamelCase = list(model.children() )[:-2]
UpperCamelCase = nn.Sequential(*A_ )
UpperCamelCase = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] )
def UpperCAmelCase_ ( self , A_ )-> List[Any]:
'''simple docstring'''
UpperCamelCase = self.pool(self.model(A_ ) )
UpperCamelCase = torch.flatten(A_ , start_dim=2 )
UpperCamelCase = out.transpose(1 , 2 ).contiguous()
return out # BxNx2048
class SCREAMING_SNAKE_CASE__ ( snake_case_):
def __init__( self , A_ , A_ , A_ , A_ , A_ )-> Dict:
'''simple docstring'''
UpperCamelCase = [json.loads(A_ ) for l in open(A_ )]
UpperCamelCase = os.path.dirname(A_ )
UpperCamelCase = tokenizer
UpperCamelCase = labels
UpperCamelCase = len(A_ )
UpperCamelCase = max_seq_length
UpperCamelCase = transforms
def __len__( self )-> Union[str, Any]:
'''simple docstring'''
return len(self.data )
def __getitem__( self , A_ )-> Any:
'''simple docstring'''
UpperCamelCase = torch.LongTensor(self.tokenizer.encode(self.data[index]['text'] , add_special_tokens=A_ ) )
UpperCamelCase , UpperCamelCase , UpperCamelCase = sentence[0], sentence[1:-1], sentence[-1]
UpperCamelCase = sentence[: self.max_seq_length]
UpperCamelCase = torch.zeros(self.n_classes )
UpperCamelCase = 1
UpperCamelCase = Image.open(os.path.join(self.data_dir , self.data[index]['img'] ) ).convert('RGB' )
UpperCamelCase = self.transforms(A_ )
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def UpperCAmelCase_ ( self )-> List[Any]:
'''simple docstring'''
UpperCamelCase = Counter()
for row in self.data:
label_freqs.update(row['label'] )
return label_freqs
def A_( A : Union[str, Any]):
UpperCamelCase = [len(row['sentence']) for row in batch]
UpperCamelCase , UpperCamelCase = len(A), max(A)
UpperCamelCase = torch.zeros(A , A , dtype=torch.long)
UpperCamelCase = torch.zeros(A , A , dtype=torch.long)
for i_batch, (input_row, length) in enumerate(zip(A , A)):
UpperCamelCase = input_row['sentence']
UpperCamelCase = 1
UpperCamelCase = torch.stack([row['image'] for row in batch])
UpperCamelCase = torch.stack([row['label'] for row in batch])
UpperCamelCase = torch.stack([row['image_start_token'] for row in batch])
UpperCamelCase = torch.stack([row['image_end_token'] for row in batch])
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def A_( ):
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def A_( ):
return transforms.Compose(
[
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.46_777_044, 0.44_531_429, 0.40_661_017] , std=[0.12_221_994, 0.12_145_835, 0.14_380_469] , ),
])
| 251 | 1 |
'''simple docstring'''
import os
import unittest
from tempfile import TemporaryDirectory
import torch
import torch.nn as nn
from accelerate.utils import (
OffloadedWeightsLoader,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
)
class lowerCAmelCase__ ( nn.Module ):
def __init__( self ):
"""simple docstring"""
super().__init__()
lowercase_ : Optional[int] = nn.Linear(3 , 4 )
lowercase_ : Optional[Any] = nn.BatchNormad(4 )
lowercase_ : Dict = nn.Linear(4 , 5 )
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.lineara(self.batchnorm(self.lineara(__SCREAMING_SNAKE_CASE ) ) )
class lowerCAmelCase__ ( unittest.TestCase ):
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : List[Any] = ModelForTest()
with TemporaryDirectory() as tmp_dir:
offload_state_dict(__SCREAMING_SNAKE_CASE , model.state_dict() )
lowercase_ : List[str] = os.path.join(__SCREAMING_SNAKE_CASE , '''index.json''' )
self.assertTrue(os.path.isfile(__SCREAMING_SNAKE_CASE ) )
# TODO: add tests on what is inside the index
for key in ["linear1.weight", "linear1.bias", "linear2.weight", "linear2.bias"]:
lowercase_ : Any = os.path.join(__SCREAMING_SNAKE_CASE , F'''{key}.dat''' )
self.assertTrue(os.path.isfile(__SCREAMING_SNAKE_CASE ) )
# TODO: add tests on the fact weights are properly loaded
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Tuple = [torch.floataa, torch.floataa, torch.bfloataa]
for dtype in dtypes:
lowercase_ : Optional[int] = torch.randn(2 , 3 , dtype=__SCREAMING_SNAKE_CASE )
with TemporaryDirectory() as tmp_dir:
lowercase_ : Optional[Any] = offload_weight(__SCREAMING_SNAKE_CASE , '''weight''' , __SCREAMING_SNAKE_CASE , {} )
lowercase_ : str = os.path.join(__SCREAMING_SNAKE_CASE , '''weight.dat''' )
self.assertTrue(os.path.isfile(__SCREAMING_SNAKE_CASE ) )
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {'''weight''': {'''shape''': [2, 3], '''dtype''': str(__SCREAMING_SNAKE_CASE ).split('''.''' )[1]}} )
lowercase_ : int = load_offloaded_weight(__SCREAMING_SNAKE_CASE , index['''weight'''] )
self.assertTrue(torch.equal(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : str = ModelForTest()
lowercase_ : Any = model.state_dict()
lowercase_ : Dict = {k: v for k, v in state_dict.items() if '''linear2''' not in k}
lowercase_ : int = {k: v for k, v in state_dict.items() if '''linear2''' in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : Dict = OffloadedWeightsLoader(state_dict=__SCREAMING_SNAKE_CASE , save_folder=__SCREAMING_SNAKE_CASE )
# Every key is there with the right value
self.assertEqual(sorted(__SCREAMING_SNAKE_CASE ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(__SCREAMING_SNAKE_CASE , weight_map[key] ) )
lowercase_ : Dict = {k: v for k, v in state_dict.items() if '''weight''' in k}
lowercase_ : str = {k: v for k, v in state_dict.items() if '''weight''' not in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : Any = OffloadedWeightsLoader(state_dict=__SCREAMING_SNAKE_CASE , save_folder=__SCREAMING_SNAKE_CASE )
# Every key is there with the right value
self.assertEqual(sorted(__SCREAMING_SNAKE_CASE ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(__SCREAMING_SNAKE_CASE , weight_map[key] ) )
with TemporaryDirectory() as tmp_dir:
offload_state_dict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Duplicates are removed
lowercase_ : Dict = OffloadedWeightsLoader(state_dict=__SCREAMING_SNAKE_CASE , save_folder=__SCREAMING_SNAKE_CASE )
# Every key is there with the right value
self.assertEqual(sorted(__SCREAMING_SNAKE_CASE ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(__SCREAMING_SNAKE_CASE , weight_map[key] ) )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Optional[Any] = {'''a.1''': 0, '''a.10''': 1, '''a.2''': 2}
lowercase_ : Dict = extract_submodules_state_dict(__SCREAMING_SNAKE_CASE , ['''a.1''', '''a.2'''] )
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {'''a.1''': 0, '''a.2''': 2} )
lowercase_ : Any = {'''a.1.a''': 0, '''a.10.a''': 1, '''a.2.a''': 2}
lowercase_ : Optional[int] = extract_submodules_state_dict(__SCREAMING_SNAKE_CASE , ['''a.1''', '''a.2'''] )
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {'''a.1.a''': 0, '''a.2.a''': 2} )
| 93 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Iterator
from typing import Any
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self , lowercase ):
_lowerCamelCase : Any = data
_lowerCamelCase : Node | None = None
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self ):
_lowerCamelCase : str = None
_lowerCamelCase : str = None
def __iter__( self ):
_lowerCamelCase : List[str] = self.head
while self.head:
yield node.data
_lowerCamelCase : Optional[int] = node.next
if node == self.head:
break
def __len__( self ):
return sum(1 for _ in self )
def __repr__( self ):
return "->".join(str(lowercase ) for item in iter(self ) )
def A_ ( self , lowercase ):
self.insert_nth(len(self ) , lowercase )
def A_ ( self , lowercase ):
self.insert_nth(0 , lowercase )
def A_ ( self , lowercase , lowercase ):
if index < 0 or index > len(self ):
raise IndexError('list index out of range.' )
_lowerCamelCase : List[Any] = Node(lowercase )
if self.head is None:
_lowerCamelCase : str = new_node # first node points itself
_lowerCamelCase : Union[str, Any] = new_node
elif index == 0: # insert at head
_lowerCamelCase : List[str] = self.head
_lowerCamelCase : str = new_node
else:
_lowerCamelCase : Union[str, Any] = self.head
for _ in range(index - 1 ):
_lowerCamelCase : List[Any] = temp.next
_lowerCamelCase : Union[str, Any] = temp.next
_lowerCamelCase : List[str] = new_node
if index == len(self ) - 1: # insert at tail
_lowerCamelCase : Any = new_node
def A_ ( self ):
return self.delete_nth(0 )
def A_ ( self ):
return self.delete_nth(len(self ) - 1 )
def A_ ( self , lowercase = 0 ):
if not 0 <= index < len(self ):
raise IndexError('list index out of range.' )
_lowerCamelCase : Any = self.head
if self.head == self.tail: # just one node
_lowerCamelCase : List[str] = None
elif index == 0: # delete head node
_lowerCamelCase : List[str] = self.tail.next.next
_lowerCamelCase : Optional[int] = self.head.next
else:
_lowerCamelCase : Dict = self.head
for _ in range(index - 1 ):
_lowerCamelCase : List[Any] = temp.next
_lowerCamelCase : int = temp.next
_lowerCamelCase : Optional[int] = temp.next.next
if index == len(self ) - 1: # delete at tail
_lowerCamelCase : List[Any] = temp
return delete_node.data
def A_ ( self ):
return len(self ) == 0
def _snake_case ( ):
_lowerCamelCase : Union[str, Any] = CircularLinkedList()
assert len(lowercase__ ) == 0
assert circular_linked_list.is_empty() is True
assert str(lowercase__ ) == ""
try:
circular_linked_list.delete_front()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_tail()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_nth(-1 )
raise AssertionError
except IndexError:
assert True
try:
circular_linked_list.delete_nth(0 )
raise AssertionError
except IndexError:
assert True
assert circular_linked_list.is_empty() is True
for i in range(5 ):
assert len(lowercase__ ) == i
circular_linked_list.insert_nth(lowercase__ , i + 1 )
assert str(lowercase__ ) == "->".join(str(lowercase__ ) for i in range(1 , 6 ) )
circular_linked_list.insert_tail(6 )
assert str(lowercase__ ) == "->".join(str(lowercase__ ) for i in range(1 , 7 ) )
circular_linked_list.insert_head(0 )
assert str(lowercase__ ) == "->".join(str(lowercase__ ) for i in range(0 , 7 ) )
assert circular_linked_list.delete_front() == 0
assert circular_linked_list.delete_tail() == 6
assert str(lowercase__ ) == "->".join(str(lowercase__ ) for i in range(1 , 6 ) )
assert circular_linked_list.delete_nth(2 ) == 3
circular_linked_list.insert_nth(2 , 3 )
assert str(lowercase__ ) == "->".join(str(lowercase__ ) for i in range(1 , 6 ) )
assert circular_linked_list.is_empty() is False
if __name__ == "__main__":
import doctest
doctest.testmod() | 96 | 0 |
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
return round(float(moles / volume ) * nfactor )
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
return round(float((moles * 0.0_821 * temperature) / (volume) ) )
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
return round(float((moles * 0.0_821 * temperature) / (pressure) ) )
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
return round(float((pressure * volume) / (0.0_821 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 365 |
"""simple docstring"""
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class a_ ( snake_case_ ):
'''simple docstring'''
@staticmethod
@abstractmethod
def a__ (lowerCamelCase_ ):
'''simple docstring'''
raise NotImplementedError()
@abstractmethod
def a__ (self ):
'''simple docstring'''
raise NotImplementedError()
| 316 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
SCREAMING_SNAKE_CASE__:List[Any] = {
"""configuration_groupvit""": [
"""GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""GroupViTConfig""",
"""GroupViTOnnxConfig""",
"""GroupViTTextConfig""",
"""GroupViTVisionConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__:List[str] = [
"""GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GroupViTModel""",
"""GroupViTPreTrainedModel""",
"""GroupViTTextModel""",
"""GroupViTVisionModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__:List[str] = [
"""TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFGroupViTModel""",
"""TFGroupViTPreTrainedModel""",
"""TFGroupViTTextModel""",
"""TFGroupViTVisionModel""",
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__:List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 261 | """simple docstring"""
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def _lowerCamelCase( a ):
return getitem, k
def _lowerCamelCase( a , a ):
return setitem, k, v
def _lowerCamelCase( a ):
return delitem, k
def _lowerCamelCase( a , a , *a ):
try:
return fun(a , *a ), None
except Exception as e:
return None, e
SCREAMING_SNAKE_CASE__:List[Any] = (
_set("""key_a""", """val_a"""),
_set("""key_b""", """val_b"""),
)
SCREAMING_SNAKE_CASE__:List[Any] = [
_set("""key_a""", """val_a"""),
_set("""key_a""", """val_b"""),
]
SCREAMING_SNAKE_CASE__:List[Any] = [
_set("""key_a""", """val_a"""),
_set("""key_b""", """val_b"""),
_del("""key_a"""),
_del("""key_b"""),
_set("""key_a""", """val_a"""),
_del("""key_a"""),
]
SCREAMING_SNAKE_CASE__:Any = [
_get("""key_a"""),
_del("""key_a"""),
_set("""key_a""", """val_a"""),
_del("""key_a"""),
_del("""key_a"""),
_get("""key_a"""),
]
SCREAMING_SNAKE_CASE__:int = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
SCREAMING_SNAKE_CASE__:Any = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set("""key_a""", """val_b"""),
]
@pytest.mark.parametrize(
"operations" , (
pytest.param(_add_items , id="add items" ),
pytest.param(_overwrite_items , id="overwrite items" ),
pytest.param(_delete_items , id="delete items" ),
pytest.param(_access_absent_items , id="access absent items" ),
pytest.param(_add_with_resize_up , id="add with resize up" ),
pytest.param(_add_with_resize_down , id="add with resize down" ),
) , )
def _lowerCamelCase( a ):
__a = HashMap(initial_block_size=4 )
__a = {}
for _, (fun, *args) in enumerate(a ):
__a , __a = _run_operation(a , a , *a )
__a , __a = _run_operation(a , a , *a )
assert my_res == py_res
assert str(a ) == str(a )
assert set(a ) == set(a )
assert len(a ) == len(a )
assert set(my.items() ) == set(py.items() )
def _lowerCamelCase( ):
def is_public(a ) -> bool:
return not name.startswith("_" )
__a = {name for name in dir({} ) if is_public(a )}
__a = {name for name in dir(HashMap() ) if is_public(a )}
assert dict_public_names > hash_public_names
| 261 | 1 |
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
lowerCAmelCase__ : int =logging.get_logger(__name__)
def __lowercase ( a__ , a__ , a__ , a__ ) -> Optional[Any]:
def constraint_to_multiple_of(a__ , a__ , a__=0 , a__=None ):
__SCREAMING_SNAKE_CASE = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
__SCREAMING_SNAKE_CASE = math.floor(val / multiple ) * multiple
if x < min_val:
__SCREAMING_SNAKE_CASE = math.ceil(val / multiple ) * multiple
return x
__SCREAMING_SNAKE_CASE = (output_size, output_size) if isinstance(__a , __a ) else output_size
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = get_image_size(__a )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = output_size
# determine new height and width
__SCREAMING_SNAKE_CASE = output_height / input_height
__SCREAMING_SNAKE_CASE = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
__SCREAMING_SNAKE_CASE = scale_width
else:
# fit height
__SCREAMING_SNAKE_CASE = scale_height
__SCREAMING_SNAKE_CASE = constraint_to_multiple_of(scale_height * input_height , multiple=__a )
__SCREAMING_SNAKE_CASE = constraint_to_multiple_of(scale_width * input_width , multiple=__a )
return (new_height, new_width)
class UpperCAmelCase_ ( __lowercase ):
'''simple docstring'''
UpperCamelCase__ : Tuple = ['''pixel_values''']
def __init__( self , _A = True , _A = None , _A = PILImageResampling.BILINEAR , _A = False , _A = 1 , _A = True , _A = 1 / 255 , _A = True , _A = None , _A = None , **_A , ):
'''simple docstring'''
super().__init__(**_a )
__SCREAMING_SNAKE_CASE = size if size is not None else {'height': 384, 'width': 384}
__SCREAMING_SNAKE_CASE = get_size_dict(_a )
__SCREAMING_SNAKE_CASE = do_resize
__SCREAMING_SNAKE_CASE = size
__SCREAMING_SNAKE_CASE = keep_aspect_ratio
__SCREAMING_SNAKE_CASE = ensure_multiple_of
__SCREAMING_SNAKE_CASE = resample
__SCREAMING_SNAKE_CASE = do_rescale
__SCREAMING_SNAKE_CASE = rescale_factor
__SCREAMING_SNAKE_CASE = do_normalize
__SCREAMING_SNAKE_CASE = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__SCREAMING_SNAKE_CASE = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _A ( self , _A , _A , _A = False , _A = 1 , _A = PILImageResampling.BICUBIC , _A = None , **_A , ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = get_size_dict(_a )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
__SCREAMING_SNAKE_CASE = get_resize_output_image_size(
_a , output_size=(size['height'], size['width']) , keep_aspect_ratio=_a , multiple=_a , )
return resize(_a , size=_a , resample=_a , data_format=_a , **_a )
def _A ( self , _A , _A , _A = None , **_A , ):
'''simple docstring'''
return rescale(_a , scale=_a , data_format=_a , **_a )
def _A ( self , _A , _A , _A , _A = None , **_A , ):
'''simple docstring'''
return normalize(_a , mean=_a , std=_a , data_format=_a , **_a )
def _A ( self , _A , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = ChannelDimension.FIRST , **_A , ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = do_resize if do_resize is not None else self.do_resize
__SCREAMING_SNAKE_CASE = size if size is not None else self.size
__SCREAMING_SNAKE_CASE = get_size_dict(_a )
__SCREAMING_SNAKE_CASE = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
__SCREAMING_SNAKE_CASE = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
__SCREAMING_SNAKE_CASE = resample if resample is not None else self.resample
__SCREAMING_SNAKE_CASE = do_rescale if do_rescale is not None else self.do_rescale
__SCREAMING_SNAKE_CASE = rescale_factor if rescale_factor is not None else self.rescale_factor
__SCREAMING_SNAKE_CASE = do_normalize if do_normalize is not None else self.do_normalize
__SCREAMING_SNAKE_CASE = image_mean if image_mean is not None else self.image_mean
__SCREAMING_SNAKE_CASE = image_std if image_std is not None else self.image_std
__SCREAMING_SNAKE_CASE = make_list_of_images(_a )
if not valid_images(_a ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
__SCREAMING_SNAKE_CASE = [to_numpy_array(_a ) for image in images]
if do_resize:
__SCREAMING_SNAKE_CASE = [self.resize(image=_a , size=_a , resample=_a ) for image in images]
if do_rescale:
__SCREAMING_SNAKE_CASE = [self.rescale(image=_a , scale=_a ) for image in images]
if do_normalize:
__SCREAMING_SNAKE_CASE = [self.normalize(image=_a , mean=_a , std=_a ) for image in images]
__SCREAMING_SNAKE_CASE = [to_channel_dimension_format(_a , _a ) for image in images]
__SCREAMING_SNAKE_CASE = {'pixel_values': images}
return BatchFeature(data=_a , tensor_type=_a )
def _A ( self , _A , _A = None ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(_a ) != len(_a ):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits' )
if is_torch_tensor(_a ):
__SCREAMING_SNAKE_CASE = target_sizes.numpy()
__SCREAMING_SNAKE_CASE = []
for idx in range(len(_a ) ):
__SCREAMING_SNAKE_CASE = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='bilinear' , align_corners=_a )
__SCREAMING_SNAKE_CASE = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(_a )
else:
__SCREAMING_SNAKE_CASE = logits.argmax(dim=1 )
__SCREAMING_SNAKE_CASE = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 350 |
import os
def __lowercase ( a__ = "input.txt" ) -> int:
with open(os.path.join(os.path.dirname(a__ ) , a__ ) ) as input_file:
__SCREAMING_SNAKE_CASE = [
[int(a__ ) for element in line.split(',' )]
for line in input_file.readlines()
]
__SCREAMING_SNAKE_CASE = len(a__ )
__SCREAMING_SNAKE_CASE = len(matrix[0] )
__SCREAMING_SNAKE_CASE = [[-1 for _ in range(a__ )] for _ in range(a__ )]
for i in range(a__ ):
__SCREAMING_SNAKE_CASE = matrix[i][0]
for j in range(1 , a__ ):
for i in range(a__ ):
__SCREAMING_SNAKE_CASE = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , a__ ):
__SCREAMING_SNAKE_CASE = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
__SCREAMING_SNAKE_CASE = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 118 | 0 |
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {
"b0": efficientnet.EfficientNetBa,
"b1": efficientnet.EfficientNetBa,
"b2": efficientnet.EfficientNetBa,
"b3": efficientnet.EfficientNetBa,
"b4": efficientnet.EfficientNetBa,
"b5": efficientnet.EfficientNetBa,
"b6": efficientnet.EfficientNetBa,
"b7": efficientnet.EfficientNetBa,
}
_UpperCamelCase = {
"b0": {
"hidden_dim": 1280,
"width_coef": 1.0,
"depth_coef": 1.0,
"image_size": 224,
"dropout_rate": 0.2,
"dw_padding": [],
},
"b1": {
"hidden_dim": 1280,
"width_coef": 1.0,
"depth_coef": 1.1,
"image_size": 240,
"dropout_rate": 0.2,
"dw_padding": [16],
},
"b2": {
"hidden_dim": 1408,
"width_coef": 1.1,
"depth_coef": 1.2,
"image_size": 260,
"dropout_rate": 0.3,
"dw_padding": [5, 8, 16],
},
"b3": {
"hidden_dim": 1536,
"width_coef": 1.2,
"depth_coef": 1.4,
"image_size": 300,
"dropout_rate": 0.3,
"dw_padding": [5, 18],
},
"b4": {
"hidden_dim": 1792,
"width_coef": 1.4,
"depth_coef": 1.8,
"image_size": 380,
"dropout_rate": 0.4,
"dw_padding": [6],
},
"b5": {
"hidden_dim": 2048,
"width_coef": 1.6,
"depth_coef": 2.2,
"image_size": 456,
"dropout_rate": 0.4,
"dw_padding": [13, 27],
},
"b6": {
"hidden_dim": 2304,
"width_coef": 1.8,
"depth_coef": 2.6,
"image_size": 528,
"dropout_rate": 0.5,
"dw_padding": [31],
},
"b7": {
"hidden_dim": 2560,
"width_coef": 2.0,
"depth_coef": 3.1,
"image_size": 600,
"dropout_rate": 0.5,
"dw_padding": [18],
},
}
def _lowercase ( lowercase__ ):
__lowerCAmelCase : Any = EfficientNetConfig()
__lowerCAmelCase : Tuple = CONFIG_MAP[model_name]['''hidden_dim''']
__lowerCAmelCase : Optional[Any] = CONFIG_MAP[model_name]['''width_coef''']
__lowerCAmelCase : Optional[int] = CONFIG_MAP[model_name]['''depth_coef''']
__lowerCAmelCase : Optional[Any] = CONFIG_MAP[model_name]['''image_size''']
__lowerCAmelCase : Optional[Any] = CONFIG_MAP[model_name]['''dropout_rate''']
__lowerCAmelCase : Union[str, Any] = CONFIG_MAP[model_name]['''dw_padding''']
__lowerCAmelCase : Union[str, Any] = '''huggingface/label-files'''
__lowerCAmelCase : List[Any] = '''imagenet-1k-id2label.json'''
__lowerCAmelCase : Union[str, Any] = 1_0_0_0
__lowerCAmelCase : Optional[int] = json.load(open(hf_hub_download(lowercase__ , lowercase__ , repo_type='''dataset''' ) , '''r''' ) )
__lowerCAmelCase : str = {int(lowercase__ ): v for k, v in idalabel.items()}
__lowerCAmelCase : Optional[Any] = idalabel
__lowerCAmelCase : Optional[int] = {v: k for k, v in idalabel.items()}
return config
def _lowercase ( ):
__lowerCAmelCase : str = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__lowerCAmelCase : List[str] = Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw )
return im
def _lowercase ( lowercase__ ):
__lowerCAmelCase : Dict = CONFIG_MAP[model_name]['''image_size''']
__lowerCAmelCase : Tuple = EfficientNetImageProcessor(
size={'''height''': size, '''width''': size} , image_mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , image_std=[0.4_7_8_5_3_9_4_4, 0.4_7_3_2_8_6_4, 0.4_7_4_3_4_1_6_3] , do_center_crop=lowercase__ , )
return preprocessor
def _lowercase ( lowercase__ ):
__lowerCAmelCase : Dict = [v.split('''_''' )[0].split('''block''' )[1] for v in original_param_names if v.startswith('''block''' )]
__lowerCAmelCase : Dict = sorted(set(lowercase__ ) )
__lowerCAmelCase : List[str] = len(lowercase__ )
__lowerCAmelCase : Optional[Any] = {b: str(lowercase__ ) for b, i in zip(lowercase__ , range(lowercase__ ) )}
__lowerCAmelCase : List[str] = []
rename_keys.append(('''stem_conv/kernel:0''', '''embeddings.convolution.weight''') )
rename_keys.append(('''stem_bn/gamma:0''', '''embeddings.batchnorm.weight''') )
rename_keys.append(('''stem_bn/beta:0''', '''embeddings.batchnorm.bias''') )
rename_keys.append(('''stem_bn/moving_mean:0''', '''embeddings.batchnorm.running_mean''') )
rename_keys.append(('''stem_bn/moving_variance:0''', '''embeddings.batchnorm.running_var''') )
for b in block_names:
__lowerCAmelCase : Dict = block_name_mapping[b]
rename_keys.append((f"""block{b}_expand_conv/kernel:0""", f"""encoder.blocks.{hf_b}.expansion.expand_conv.weight""") )
rename_keys.append((f"""block{b}_expand_bn/gamma:0""", f"""encoder.blocks.{hf_b}.expansion.expand_bn.weight""") )
rename_keys.append((f"""block{b}_expand_bn/beta:0""", f"""encoder.blocks.{hf_b}.expansion.expand_bn.bias""") )
rename_keys.append(
(f"""block{b}_expand_bn/moving_mean:0""", f"""encoder.blocks.{hf_b}.expansion.expand_bn.running_mean""") )
rename_keys.append(
(f"""block{b}_expand_bn/moving_variance:0""", f"""encoder.blocks.{hf_b}.expansion.expand_bn.running_var""") )
rename_keys.append(
(f"""block{b}_dwconv/depthwise_kernel:0""", f"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight""") )
rename_keys.append((f"""block{b}_bn/gamma:0""", f"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight""") )
rename_keys.append((f"""block{b}_bn/beta:0""", f"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias""") )
rename_keys.append(
(f"""block{b}_bn/moving_mean:0""", f"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean""") )
rename_keys.append(
(f"""block{b}_bn/moving_variance:0""", f"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var""") )
rename_keys.append((f"""block{b}_se_reduce/kernel:0""", f"""encoder.blocks.{hf_b}.squeeze_excite.reduce.weight""") )
rename_keys.append((f"""block{b}_se_reduce/bias:0""", f"""encoder.blocks.{hf_b}.squeeze_excite.reduce.bias""") )
rename_keys.append((f"""block{b}_se_expand/kernel:0""", f"""encoder.blocks.{hf_b}.squeeze_excite.expand.weight""") )
rename_keys.append((f"""block{b}_se_expand/bias:0""", f"""encoder.blocks.{hf_b}.squeeze_excite.expand.bias""") )
rename_keys.append(
(f"""block{b}_project_conv/kernel:0""", f"""encoder.blocks.{hf_b}.projection.project_conv.weight""") )
rename_keys.append((f"""block{b}_project_bn/gamma:0""", f"""encoder.blocks.{hf_b}.projection.project_bn.weight""") )
rename_keys.append((f"""block{b}_project_bn/beta:0""", f"""encoder.blocks.{hf_b}.projection.project_bn.bias""") )
rename_keys.append(
(f"""block{b}_project_bn/moving_mean:0""", f"""encoder.blocks.{hf_b}.projection.project_bn.running_mean""") )
rename_keys.append(
(f"""block{b}_project_bn/moving_variance:0""", f"""encoder.blocks.{hf_b}.projection.project_bn.running_var""") )
rename_keys.append(('''top_conv/kernel:0''', '''encoder.top_conv.weight''') )
rename_keys.append(('''top_bn/gamma:0''', '''encoder.top_bn.weight''') )
rename_keys.append(('''top_bn/beta:0''', '''encoder.top_bn.bias''') )
rename_keys.append(('''top_bn/moving_mean:0''', '''encoder.top_bn.running_mean''') )
rename_keys.append(('''top_bn/moving_variance:0''', '''encoder.top_bn.running_var''') )
__lowerCAmelCase : List[str] = {}
for item in rename_keys:
if item[0] in original_param_names:
__lowerCAmelCase : int = '''efficientnet.''' + item[1]
__lowerCAmelCase : Optional[int] = '''classifier.weight'''
__lowerCAmelCase : Optional[Any] = '''classifier.bias'''
return key_mapping
def _lowercase ( lowercase__ , lowercase__ , lowercase__ ):
for key, value in tf_params.items():
if "normalization" in key:
continue
__lowerCAmelCase : List[str] = key_mapping[key]
if "_conv" in key and "kernel" in key:
__lowerCAmelCase : List[Any] = torch.from_numpy(lowercase__ ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
__lowerCAmelCase : List[str] = torch.from_numpy(lowercase__ ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
__lowerCAmelCase : int = torch.from_numpy(np.transpose(lowercase__ ) )
else:
__lowerCAmelCase : int = torch.from_numpy(lowercase__ )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(lowercase__ )
@torch.no_grad()
def _lowercase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
__lowerCAmelCase : List[Any] = model_classes[model_name](
include_top=lowercase__ , weights='''imagenet''' , input_tensor=lowercase__ , input_shape=lowercase__ , pooling=lowercase__ , classes=1_0_0_0 , classifier_activation='''softmax''' , )
__lowerCAmelCase : Dict = original_model.trainable_variables
__lowerCAmelCase : Union[str, Any] = original_model.non_trainable_variables
__lowerCAmelCase : Optional[Any] = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
__lowerCAmelCase : Tuple = param.numpy()
__lowerCAmelCase : List[str] = list(tf_params.keys() )
# Load HuggingFace model
__lowerCAmelCase : Dict = get_efficientnet_config(lowercase__ )
__lowerCAmelCase : Union[str, Any] = EfficientNetForImageClassification(lowercase__ ).eval()
__lowerCAmelCase : Tuple = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print('''Converting parameters...''' )
__lowerCAmelCase : Dict = rename_keys(lowercase__ )
replace_params(lowercase__ , lowercase__ , lowercase__ )
# Initialize preprocessor and preprocess input image
__lowerCAmelCase : List[str] = convert_image_processor(lowercase__ )
__lowerCAmelCase : Union[str, Any] = preprocessor(images=prepare_img() , return_tensors='''pt''' )
# HF model inference
hf_model.eval()
with torch.no_grad():
__lowerCAmelCase : List[Any] = hf_model(**lowercase__ )
__lowerCAmelCase : Tuple = outputs.logits.detach().numpy()
# Original model inference
__lowerCAmelCase : str = False
__lowerCAmelCase : Optional[Any] = CONFIG_MAP[model_name]['''image_size''']
__lowerCAmelCase : int = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
__lowerCAmelCase : Any = image.img_to_array(lowercase__ )
__lowerCAmelCase : Dict = np.expand_dims(lowercase__ , axis=0 )
__lowerCAmelCase : Dict = original_model.predict(lowercase__ )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(lowercase__ , lowercase__ , atol=1E-3 ), "The predicted logits are not the same."
print('''Model outputs match!''' )
if save_model:
# Create folder to save model
if not os.path.isdir(lowercase__ ):
os.mkdir(lowercase__ )
# Save converted model and image processor
hf_model.save_pretrained(lowercase__ )
preprocessor.save_pretrained(lowercase__ )
if push_to_hub:
# Push model and image processor to hub
print(f"""Pushing converted {model_name} to the hub...""" )
__lowerCAmelCase : Tuple = f"""efficientnet-{model_name}"""
preprocessor.push_to_hub(lowercase__ )
hf_model.push_to_hub(lowercase__ )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="b0",
type=str,
help="Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="hf_model",
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument("--save_model", action="store_true", help="Save model to local")
parser.add_argument("--push_to_hub", action="store_true", help="Push model and image processor to the hub")
_UpperCamelCase = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 275 |
def _lowercase ( lowercase__ , lowercase__ ):
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
__lowerCAmelCase : int = str(bin(lowercase__ ) )[2:] # remove the leading "0b"
__lowerCAmelCase : Any = str(bin(lowercase__ ) )[2:]
__lowerCAmelCase : List[str] = max(len(lowercase__ ) , len(lowercase__ ) )
return "0b" + "".join(
str(int('''1''' in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(lowercase__ ) , b_binary.zfill(lowercase__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 275 | 1 |
"""simple docstring"""
from __future__ import annotations
class a__ :
def __init__( self , A=None ) -> Optional[int]:
'''simple docstring'''
a = data
a = None
def __repr__( self ) -> List[str]:
'''simple docstring'''
a = []
a = self
while temp:
string_rep.append(F'''{temp.data}''' )
a = temp.next
return "->".join(lowerCamelCase__ )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase) -> Any:
if not elements_list:
raise Exception("The Elements List is empty")
a = Node(elements_list[0])
for i in range(1 , len(__UpperCamelCase)):
a = Node(elements_list[i])
a = current.next
return head
def SCREAMING_SNAKE_CASE ( __UpperCamelCase) -> str:
if head_node is not None and isinstance(__UpperCamelCase , __UpperCamelCase):
print_reverse(head_node.next)
print(head_node.data)
def SCREAMING_SNAKE_CASE ( ) -> str:
from doctest import testmod
testmod()
a = make_linked_list([14, 52, 14, 12, 43])
print("Linked List:")
print(__UpperCamelCase)
print("Elements in Reverse:")
print_reverse(__UpperCamelCase)
if __name__ == "__main__":
main()
| 357 |
lowercase__ : List[Any] = {
"a": "AAAAA",
"b": "AAAAB",
"c": "AAABA",
"d": "AAABB",
"e": "AABAA",
"f": "AABAB",
"g": "AABBA",
"h": "AABBB",
"i": "ABAAA",
"j": "BBBAA",
"k": "ABAAB",
"l": "ABABA",
"m": "ABABB",
"n": "ABBAA",
"o": "ABBAB",
"p": "ABBBA",
"q": "ABBBB",
"r": "BAAAA",
"s": "BAAAB",
"t": "BAABA",
"u": "BAABB",
"v": "BBBAB",
"w": "BABAA",
"x": "BABAB",
"y": "BABBA",
"z": "BABBB",
" ": " ",
}
lowercase__ : str = {value: key for key, value in encode_dict.items()}
def SCREAMING_SNAKE_CASE ( __UpperCamelCase) -> str:
a = ""
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception("encode() accepts only letters of the alphabet and spaces")
return encoded
def SCREAMING_SNAKE_CASE ( __UpperCamelCase) -> str:
if set(__UpperCamelCase) - {"A", "B", " "} != set():
raise Exception("decode() accepts only 'A', 'B' and spaces")
a = ""
for word in coded.split():
while len(__UpperCamelCase) != 0:
decoded += decode_dict[word[:5]]
a = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 180 | 0 |
'''simple docstring'''
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
_A : int =logging.get_logger(__name__)
class _lowercase :
a = 42
a = None
@staticmethod
def lowerCamelCase_ ( ):
raise NotImplementedError
def lowerCamelCase_ ( self: Tuple , UpperCamelCase__: List[str] , UpperCamelCase__: int , UpperCamelCase__: str , **UpperCamelCase__: Union[str, Any] ):
raise NotImplementedError
def lowerCamelCase_ ( self: Any , UpperCamelCase__: Any ):
raise NotImplementedError
def lowerCamelCase_ ( self: List[Any] ):
if not self.is_available():
raise RuntimeError(
F'''You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.''' )
@classmethod
def lowerCamelCase_ ( cls: Optional[int] ):
return F'''`pip install {cls.pip_package or cls.name}`'''
class _lowercase ( _lowercase ):
a = """optuna"""
@staticmethod
def lowerCamelCase_ ( ):
return is_optuna_available()
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: Optional[Any] , UpperCamelCase__: int , UpperCamelCase__: str , **UpperCamelCase__: Dict ):
return run_hp_search_optuna(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ )
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase__: Any ):
return default_hp_space_optuna(UpperCamelCase__ )
class _lowercase ( _lowercase ):
a = """ray"""
a = """'ray[tune]'"""
@staticmethod
def lowerCamelCase_ ( ):
return is_ray_available()
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase__: str , UpperCamelCase__: int , UpperCamelCase__: str , **UpperCamelCase__: Union[str, Any] ):
return run_hp_search_ray(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ )
def lowerCamelCase_ ( self: List[str] , UpperCamelCase__: Any ):
return default_hp_space_ray(UpperCamelCase__ )
class _lowercase ( _lowercase ):
a = """sigopt"""
@staticmethod
def lowerCamelCase_ ( ):
return is_sigopt_available()
def lowerCamelCase_ ( self: Tuple , UpperCamelCase__: List[Any] , UpperCamelCase__: int , UpperCamelCase__: str , **UpperCamelCase__: Dict ):
return run_hp_search_sigopt(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ )
def lowerCamelCase_ ( self: Tuple , UpperCamelCase__: List[str] ):
return default_hp_space_sigopt(UpperCamelCase__ )
class _lowercase ( _lowercase ):
a = """wandb"""
@staticmethod
def lowerCamelCase_ ( ):
return is_wandb_available()
def lowerCamelCase_ ( self: int , UpperCamelCase__: Tuple , UpperCamelCase__: int , UpperCamelCase__: str , **UpperCamelCase__: int ):
return run_hp_search_wandb(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ )
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase__: Union[str, Any] ):
return default_hp_space_wandb(UpperCamelCase__ )
_A : Dict ={
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def SCREAMING_SNAKE_CASE_ () -> str:
lowerCamelCase__ : List[str] = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(UpperCamelCase ) > 0:
lowerCamelCase__ : Dict = available_backends[0].name
if len(UpperCamelCase ) > 1:
logger.info(
f'''{len(UpperCamelCase )} hyperparameter search backends available. Using {name} as the default.''' )
return name
raise RuntimeError(
"""No hyperparameter search backend available.\n"""
+ """\n""".join(
f''' - To install {backend.name} run {backend.pip_install()}'''
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
| 41 |
"""simple docstring"""
from __future__ import annotations
def _lowerCamelCase ( _UpperCamelCase = 4 ):
'''simple docstring'''
__lowerCAmelCase = abs(_UpperCamelCase ) or 4
return [[1 + x + y * row_size for x in range(_UpperCamelCase )] for y in range(_UpperCamelCase )]
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
return reverse_row(transpose(_UpperCamelCase ) )
# OR.. transpose(reverse_column(matrix))
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
return reverse_row(reverse_column(_UpperCamelCase ) )
# OR.. reverse_column(reverse_row(matrix))
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
return reverse_column(transpose(_UpperCamelCase ) )
# OR.. transpose(reverse_row(matrix))
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = [list(_UpperCamelCase ) for x in zip(*_UpperCamelCase )]
return matrix
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = matrix[::-1]
return matrix
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = [x[::-1] for x in matrix]
return matrix
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
for i in matrix:
print(*_UpperCamelCase )
if __name__ == "__main__":
A : Dict = make_matrix()
print("\norigin:\n")
print_matrix(matrix)
print("\nrotate 90 counterclockwise:\n")
print_matrix(rotate_aa(matrix))
A : List[str] = make_matrix()
print("\norigin:\n")
print_matrix(matrix)
print("\nrotate 180:\n")
print_matrix(rotate_aaa(matrix))
A : str = make_matrix()
print("\norigin:\n")
print_matrix(matrix)
print("\nrotate 270 counterclockwise:\n")
print_matrix(rotate_aaa(matrix))
| 57 | 0 |
import sys
from collections import defaultdict
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self : Optional[Any] ):
'''simple docstring'''
_snake_case = []
def A ( self : Tuple , lowercase : Union[str, Any] ):
'''simple docstring'''
return self.node_position[vertex]
def A ( self : Tuple , lowercase : List[str] , lowercase : Union[str, Any] ):
'''simple docstring'''
_snake_case = pos
def A ( self : List[Any] , lowercase : Union[str, Any] , lowercase : Union[str, Any] , lowercase : Optional[Any] , lowercase : Any ):
'''simple docstring'''
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
_snake_case = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
_snake_case = 2 * start + 1
else:
_snake_case = 2 * start + 2
if heap[smallest_child] < heap[start]:
_snake_case , _snake_case = heap[smallest_child], positions[smallest_child]
_snake_case , _snake_case = (
heap[start],
positions[start],
)
_snake_case , _snake_case = temp, tempa
_snake_case = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , lowercase )
self.top_to_bottom(lowercase , lowercase , lowercase , lowercase )
def A ( self : Dict , lowercase : Optional[Any] , lowercase : int , lowercase : Optional[Any] , lowercase : int ):
'''simple docstring'''
_snake_case = position[index]
while index != 0:
_snake_case = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
_snake_case = heap[parent]
_snake_case = position[parent]
self.set_position(position[parent] , lowercase )
else:
_snake_case = val
_snake_case = temp
self.set_position(lowercase , lowercase )
break
_snake_case = parent
else:
_snake_case = val
_snake_case = temp
self.set_position(lowercase , 0 )
def A ( self : Optional[Any] , lowercase : Dict , lowercase : Any ):
'''simple docstring'''
_snake_case = len(lowercase ) // 2 - 1
for i in range(lowercase , -1 , -1 ):
self.top_to_bottom(lowercase , lowercase , len(lowercase ) , lowercase )
def A ( self : int , lowercase : List[Any] , lowercase : Optional[int] ):
'''simple docstring'''
_snake_case = positions[0]
_snake_case = sys.maxsize
self.top_to_bottom(lowercase , 0 , len(lowercase ) , lowercase )
return temp
def a_ ( __lowercase : Optional[int] ) -> str:
_snake_case = Heap()
_snake_case = [0] * len(__lowercase )
_snake_case = [-1] * len(__lowercase ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
_snake_case = [] # Heap of Distance of vertices from their neighboring vertex
_snake_case = []
for vertex in range(len(__lowercase ) ):
distance_tv.append(sys.maxsize )
positions.append(__lowercase )
heap.node_position.append(__lowercase )
_snake_case = []
_snake_case = 1
_snake_case = sys.maxsize
for neighbor, distance in adjacency_list[0]:
_snake_case = 0
_snake_case = distance
heap.heapify(__lowercase , __lowercase )
for _ in range(1 , len(__lowercase ) ):
_snake_case = heap.delete_minimum(__lowercase , __lowercase )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
_snake_case = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(__lowercase )]
):
_snake_case = distance
heap.bottom_to_top(
__lowercase , heap.get_position(__lowercase ) , __lowercase , __lowercase )
_snake_case = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
_lowerCamelCase : Any = int(input('''Enter number of edges: ''').strip())
_lowerCamelCase : Union[str, Any] = defaultdict(list)
for _ in range(edges_number):
_lowerCamelCase : str = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list)) | 130 |
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
_lowerCamelCase : Union[str, Any] = logging.getLogger()
def a_ ( __lowercase : Path , __lowercase : list ) -> Tuple:
_snake_case = '\n'.join(__lowercase )
Path(__lowercase ).open('w' ).writelines(__lowercase )
_lowerCamelCase : Any = '''patrickvonplaten/t5-tiny-random'''
_lowerCamelCase : List[Any] = '''sshleifer/bart-tiny-random'''
_lowerCamelCase : List[Any] = '''sshleifer/tiny-mbart'''
_lowerCamelCase : Union[str, Any] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def A ( self : Optional[int] , lowercase : int ):
'''simple docstring'''
_snake_case = Path(self.get_auto_remove_tmp_dir() ) / 'utest_input.source'
_snake_case = input_file_name.parent / 'utest_output.txt'
assert not output_file_name.exists()
_snake_case = [' New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County.']
_dump_articles(lowercase , lowercase )
_snake_case = str(Path(self.get_auto_remove_tmp_dir() ) / 'scores.json' )
_snake_case = 'translation_en_to_de' if model == T5_TINY else 'summarization'
_snake_case = f'''
run_eval_search.py
{model}
{input_file_name}
{output_file_name}
--score_path {score_path}
--task {task}
--num_beams 2
--length_penalty 2.0
'''.split()
with patch.object(lowercase , 'argv' , lowercase ):
run_generate()
assert Path(lowercase ).exists()
# os.remove(Path(output_file_name))
def A ( self : List[Any] ):
'''simple docstring'''
self.run_eval_tester(lowercase )
@parameterized.expand([BART_TINY, MBART_TINY] )
@slow
def A ( self : Any , lowercase : int ):
'''simple docstring'''
self.run_eval_tester(lowercase )
@parameterized.expand([T5_TINY, MBART_TINY] )
@slow
def A ( self : Any , lowercase : str ):
'''simple docstring'''
_snake_case = Path(self.get_auto_remove_tmp_dir() ) / 'utest_input.source'
_snake_case = input_file_name.parent / 'utest_output.txt'
assert not output_file_name.exists()
_snake_case = {
'en': ['Machine learning is great, isn\'t it?', 'I like to eat bananas', 'Tomorrow is another great day!'],
'de': [
'Maschinelles Lernen ist großartig, oder?',
'Ich esse gerne Bananen',
'Morgen ist wieder ein toller Tag!',
],
}
_snake_case = Path(self.get_auto_remove_tmp_dir() )
_snake_case = str(tmp_dir / 'scores.json' )
_snake_case = str(tmp_dir / 'val.target' )
_dump_articles(lowercase , text['en'] )
_dump_articles(lowercase , text['de'] )
_snake_case = 'translation_en_to_de' if model == T5_TINY else 'summarization'
_snake_case = f'''
run_eval_search.py
{model}
{str(lowercase )}
{str(lowercase )}
--score_path {score_path}
--reference_path {reference_path}
--task {task}
'''.split()
testargs.extend(['--search', 'num_beams=1:2 length_penalty=0.9:1.0'] )
with patch.object(lowercase , 'argv' , lowercase ):
with CaptureStdout() as cs:
run_search()
_snake_case = [' num_beams | length_penalty', model, 'Best score args']
_snake_case = ['Info']
if "translation" in task:
expected_strings.append('bleu' )
else:
expected_strings.extend(lowercase )
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(lowercase ).exists()
os.remove(Path(lowercase ) ) | 130 | 1 |
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
_a = logging.get_logger(__name__) # pylint: disable=invalid-name
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ):
"""simple docstring"""
super().__init__()
if safety_checker is None:
logger.warning(
F"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'
' results in services or applications open to the public. Both the diffusers team and Hugging Face'
' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'
' it only for use-cases that involve analyzing network behavior or auditing its results. For more'
' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .' )
self.register_modules(
speech_model=UpperCAmelCase , speech_processor=UpperCAmelCase , vae=UpperCAmelCase , text_encoder=UpperCAmelCase , tokenizer=UpperCAmelCase , unet=UpperCAmelCase , scheduler=UpperCAmelCase , feature_extractor=UpperCAmelCase , )
def UpperCamelCase ( self , UpperCAmelCase = "auto" ):
"""simple docstring"""
if slice_size == "auto":
_UpperCAmelCase = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
self.enable_attention_slicing(UpperCAmelCase )
@torch.no_grad()
def __call__( self , UpperCAmelCase , UpperCAmelCase=1_6000 , UpperCAmelCase = 512 , UpperCAmelCase = 512 , UpperCAmelCase = 50 , UpperCAmelCase = 7.5 , UpperCAmelCase = None , UpperCAmelCase = 1 , UpperCAmelCase = 0.0 , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = "pil" , UpperCAmelCase = True , UpperCAmelCase = None , UpperCAmelCase = 1 , **UpperCAmelCase , ):
"""simple docstring"""
_UpperCAmelCase = self.speech_processor.feature_extractor(
UpperCAmelCase , return_tensors='pt' , sampling_rate=UpperCAmelCase ).input_features.to(self.device )
_UpperCAmelCase = self.speech_model.generate(UpperCAmelCase , max_length=48_0000 )
_UpperCAmelCase = self.speech_processor.tokenizer.batch_decode(UpperCAmelCase , skip_special_tokens=UpperCAmelCase , normalize=UpperCAmelCase )[
0
]
if isinstance(UpperCAmelCase , UpperCAmelCase ):
_UpperCAmelCase = 1
elif isinstance(UpperCAmelCase , UpperCAmelCase ):
_UpperCAmelCase = len(UpperCAmelCase )
else:
raise ValueError(F"""`prompt` has to be of type `str` or `list` but is {type(UpperCAmelCase )}""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(UpperCAmelCase , UpperCAmelCase ) or callback_steps <= 0)
):
raise ValueError(
F"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
F""" {type(UpperCAmelCase )}.""" )
# get prompt text embeddings
_UpperCAmelCase = self.tokenizer(
UpperCAmelCase , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , )
_UpperCAmelCase = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
_UpperCAmelCase = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
F""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
_UpperCAmelCase = text_input_ids[:, : self.tokenizer.model_max_length]
_UpperCAmelCase = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = text_embeddings.shape
_UpperCAmelCase = text_embeddings.repeat(1 , UpperCAmelCase , 1 )
_UpperCAmelCase = text_embeddings.view(bs_embed * num_images_per_prompt , UpperCAmelCase , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
_UpperCAmelCase = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
_UpperCAmelCase = 42
if negative_prompt is None:
_UpperCAmelCase = [''] * batch_size
elif type(UpperCAmelCase ) is not type(UpperCAmelCase ):
raise TypeError(
F"""`negative_prompt` should be the same type to `prompt`, but got {type(UpperCAmelCase )} !="""
F""" {type(UpperCAmelCase )}.""" )
elif isinstance(UpperCAmelCase , UpperCAmelCase ):
_UpperCAmelCase = [negative_prompt]
elif batch_size != len(UpperCAmelCase ):
raise ValueError(
F"""`negative_prompt`: {negative_prompt} has batch size {len(UpperCAmelCase )}, but `prompt`:"""
F""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
' the batch size of `prompt`.' )
else:
_UpperCAmelCase = negative_prompt
_UpperCAmelCase = text_input_ids.shape[-1]
_UpperCAmelCase = self.tokenizer(
UpperCAmelCase , padding='max_length' , max_length=UpperCAmelCase , truncation=UpperCAmelCase , return_tensors='pt' , )
_UpperCAmelCase = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
_UpperCAmelCase = uncond_embeddings.shape[1]
_UpperCAmelCase = uncond_embeddings.repeat(1 , UpperCAmelCase , 1 )
_UpperCAmelCase = uncond_embeddings.view(batch_size * num_images_per_prompt , UpperCAmelCase , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_UpperCAmelCase = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
_UpperCAmelCase = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
_UpperCAmelCase = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
_UpperCAmelCase = torch.randn(UpperCAmelCase , generator=UpperCAmelCase , device='cpu' , dtype=UpperCAmelCase ).to(
self.device )
else:
_UpperCAmelCase = torch.randn(UpperCAmelCase , generator=UpperCAmelCase , device=self.device , dtype=UpperCAmelCase )
else:
if latents.shape != latents_shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
_UpperCAmelCase = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(UpperCAmelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
_UpperCAmelCase = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
_UpperCAmelCase = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_UpperCAmelCase = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
_UpperCAmelCase = {}
if accepts_eta:
_UpperCAmelCase = eta
for i, t in enumerate(self.progress_bar(UpperCAmelCase ) ):
# expand the latents if we are doing classifier free guidance
_UpperCAmelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_UpperCAmelCase = self.scheduler.scale_model_input(UpperCAmelCase , UpperCAmelCase )
# predict the noise residual
_UpperCAmelCase = self.unet(UpperCAmelCase , UpperCAmelCase , encoder_hidden_states=UpperCAmelCase ).sample
# perform guidance
if do_classifier_free_guidance:
_UpperCAmelCase , _UpperCAmelCase = noise_pred.chunk(2 )
_UpperCAmelCase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
_UpperCAmelCase = self.scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
_UpperCAmelCase = 1 / 0.1_82_15 * latents
_UpperCAmelCase = self.vae.decode(UpperCAmelCase ).sample
_UpperCAmelCase = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
_UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
_UpperCAmelCase = self.numpy_to_pil(UpperCAmelCase )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=UpperCAmelCase , nsfw_content_detected=UpperCAmelCase )
| 39 |
"""simple docstring"""
def lowercase ( __snake_case : Optional[int] ):
lowercase_ : int = 0
lowercase_ : Optional[Any] = len(__snake_case )
for i in range(n - 1 ):
for j in range(i + 1 , __snake_case ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def lowercase ( __snake_case : str ):
if len(__snake_case ) <= 1:
return arr, 0
lowercase_ : Optional[Any] = len(__snake_case ) // 2
lowercase_ : List[Any] = arr[0:mid]
lowercase_ : Union[str, Any] = arr[mid:]
lowercase_ , lowercase_ : Tuple = count_inversions_recursive(__snake_case )
lowercase_ , lowercase_ : List[Any] = count_inversions_recursive(__snake_case )
lowercase_ , lowercase_ : List[Any] = _count_cross_inversions(__snake_case , __snake_case )
lowercase_ : List[Any] = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def lowercase ( __snake_case : str , __snake_case : Optional[int] ):
lowercase_ : Optional[Any] = []
lowercase_ : Any = 0
while i < len(__snake_case ) and j < len(__snake_case ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(__snake_case ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(__snake_case ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def lowercase ( ):
lowercase_ : Union[str, Any] = [1_0, 2, 1, 5, 5, 2, 1_1]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
lowercase_ : int = count_inversions_bf(__snake_case )
lowercase_ , lowercase_ : Dict = count_inversions_recursive(__snake_case )
assert num_inversions_bf == num_inversions_recursive == 8
print('''number of inversions = ''' , __snake_case )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
lowercase_ : Dict = count_inversions_bf(__snake_case )
lowercase_ , lowercase_ : Dict = count_inversions_recursive(__snake_case )
assert num_inversions_bf == num_inversions_recursive == 0
print('''number of inversions = ''' , __snake_case )
# an empty list should also have zero inversions
lowercase_ : List[Any] = []
lowercase_ : Any = count_inversions_bf(__snake_case )
lowercase_ , lowercase_ : List[str] = count_inversions_recursive(__snake_case )
assert num_inversions_bf == num_inversions_recursive == 0
print('''number of inversions = ''' , __snake_case )
if __name__ == "__main__":
main()
| 33 | 0 |
snake_case = [
(1_000, """M"""),
(900, """CM"""),
(500, """D"""),
(400, """CD"""),
(100, """C"""),
(90, """XC"""),
(50, """L"""),
(40, """XL"""),
(10, """X"""),
(9, """IX"""),
(5, """V"""),
(4, """IV"""),
(1, """I"""),
]
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = {"I": 1, "V": 5, "X": 10, "L": 50, "C": 100, "D": 500, "M": 1000}
SCREAMING_SNAKE_CASE : Dict = 0
SCREAMING_SNAKE_CASE : str = 0
while place < len(lowercase ):
if (place + 1 < len(lowercase )) and (vals[roman[place]] < vals[roman[place + 1]]):
total += vals[roman[place + 1]] - vals[roman[place]]
place += 2
else:
total += vals[roman[place]]
place += 1
return total
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = []
for arabic, roman in ROMAN:
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) : int = divmod(lowercase , lowercase )
result.append(roman * factor )
if number == 0:
break
return "".join(lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 319 |
def lowerCamelCase__ ( lowercase , lowercase = 0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = length or len(lowercase )
SCREAMING_SNAKE_CASE : Optional[Any] = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = list_data[i + 1], list_data[i]
SCREAMING_SNAKE_CASE : str = True
return list_data if not swapped else bubble_sort(lowercase , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 319 | 1 |
'''simple docstring'''
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
__SCREAMING_SNAKE_CASE :str = logging.get_logger(__name__)
def UpperCAmelCase_ ( __lowercase : bool , __lowercase : bool ) -> Dict:
'''simple docstring'''
def run_func(__lowercase : Any ):
@wraps(snake_case__ )
def run_in_eager_mode(*__lowercase : int , **__lowercase : int ):
return func(*snake_case__ , **snake_case__ )
@wraps(snake_case__ )
@tf.function(experimental_compile=snake_case__ )
def run_in_graph_mode(*__lowercase : Optional[int] , **__lowercase : str ):
return func(*snake_case__ , **snake_case__ )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
"Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`." )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def UpperCAmelCase_ ( __lowercase : int , __lowercase : int , __lowercase : int ) -> ["tf.Tensor"]:
'''simple docstring'''
_UpperCAmelCase = random.Random()
_UpperCAmelCase = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(snake_case__ , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class A_ ( _a ):
_lowerCamelCase : str = 42
_lowerCamelCase : Any = 42
_lowerCamelCase : int = """TensorFlow"""
@property
def lowercase ( self : Optional[int] ):
return tf.__version__
def lowercase ( self : Tuple , snake_case_ : str , snake_case_ : int , snake_case_ : int ):
# initialize GPU on separate process
_UpperCAmelCase = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
_UpperCAmelCase = self._prepare_inference_func(snake_case_ , snake_case_ , snake_case_ )
return self._measure_speed(_inference )
def lowercase ( self : Optional[int] , snake_case_ : str , snake_case_ : int , snake_case_ : int ):
_UpperCAmelCase = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
_UpperCAmelCase = self._prepare_train_func(snake_case_ , snake_case_ , snake_case_ )
return self._measure_speed(_train )
def lowercase ( self : str , snake_case_ : str , snake_case_ : int , snake_case_ : int ):
# initialize GPU on separate process
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , snake_case_ )
_UpperCAmelCase = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
_UpperCAmelCase = self._prepare_inference_func(snake_case_ , snake_case_ , snake_case_ )
return self._measure_memory(_inference )
def lowercase ( self : Optional[int] , snake_case_ : str , snake_case_ : int , snake_case_ : int ):
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , snake_case_ )
_UpperCAmelCase = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
_UpperCAmelCase = self._prepare_train_func(snake_case_ , snake_case_ , snake_case_ )
return self._measure_memory(_train )
def lowercase ( self : Dict , snake_case_ : str , snake_case_ : int , snake_case_ : int ):
_UpperCAmelCase = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError("Mixed precision is currently not supported." )
_UpperCAmelCase = (
hasattr(snake_case_ , "architectures" )
and isinstance(config.architectures , snake_case_ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
_UpperCAmelCase = "TF" + config.architectures[0] # prepend 'TF' for tensorflow model
_UpperCAmelCase = __import__("transformers" , fromlist=[model_class] )
_UpperCAmelCase = getattr(snake_case_ , snake_case_ )
_UpperCAmelCase = model_cls(snake_case_ )
except ImportError:
raise ImportError(
f'{model_class} does not exist. If you just want to test the pretrained model, you might want to'
" set `--only_pretrain_model` or `args.only_pretrain_model=True`." )
else:
_UpperCAmelCase = TF_MODEL_MAPPING[config.__class__](snake_case_ )
# encoder-decoder has vocab size saved differently
_UpperCAmelCase = config.vocab_size if hasattr(snake_case_ , "vocab_size" ) else config.encoder.vocab_size
_UpperCAmelCase = random_input_ids(snake_case_ , snake_case_ , snake_case_ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(snake_case_ , decoder_input_ids=snake_case_ , training=snake_case_ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(snake_case_ , training=snake_case_ )
_UpperCAmelCase = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def lowercase ( self : Any , snake_case_ : str , snake_case_ : int , snake_case_ : int ):
_UpperCAmelCase = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError("Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`." )
if self.args.fpaa:
raise NotImplementedError("Mixed precision is currently not supported." )
_UpperCAmelCase = (
hasattr(snake_case_ , "architectures" )
and isinstance(config.architectures , snake_case_ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
_UpperCAmelCase = "TF" + config.architectures[0] # prepend 'TF' for tensorflow model
_UpperCAmelCase = __import__("transformers" , fromlist=[model_class] )
_UpperCAmelCase = getattr(snake_case_ , snake_case_ )
_UpperCAmelCase = model_cls(snake_case_ )
except ImportError:
raise ImportError(
f'{model_class} does not exist. If you just want to test the pretrained model, you might want to'
" set `--only_pretrain_model` or `args.only_pretrain_model=True`." )
else:
_UpperCAmelCase = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](snake_case_ )
# encoder-decoder has vocab size saved differently
_UpperCAmelCase = config.vocab_size if hasattr(snake_case_ , "vocab_size" ) else config.encoder.vocab_size
_UpperCAmelCase = random_input_ids(snake_case_ , snake_case_ , snake_case_ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
_UpperCAmelCase = model(snake_case_ , decoder_input_ids=snake_case_ , labels=snake_case_ , training=snake_case_ )[0]
_UpperCAmelCase = tf.gradients(snake_case_ , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
_UpperCAmelCase = model(snake_case_ , labels=snake_case_ , training=snake_case_ )[0]
_UpperCAmelCase = tf.gradients(snake_case_ , model.trainable_variables )
return gradients
_UpperCAmelCase = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def lowercase ( self : Dict , snake_case_ : Optional[int] ):
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info("Do inference on TPU. Running model 5 times to stabilize compilation" )
timeit.repeat(snake_case_ , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
_UpperCAmelCase = timeit.repeat(
snake_case_ , repeat=self.args.repeat , number=1_0 , )
return min(snake_case_ ) / 1_0.0
except ResourceExhaustedError as e:
self.print_fn(f'Doesn\'t fit on GPU. {e}' )
def lowercase ( self : Dict , snake_case_ : Callable[[], None] ):
logger.info(
"Note that TensorFlow allocates more memory than "
"it might need to speed up computation. "
"The memory reported here corresponds to the memory "
"reported by `nvidia-smi`, which can vary depending "
"on total available memory on the GPU that is used." )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
"`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory"
" consumption line by line." )
_UpperCAmelCase = start_memory_tracing("transformers" )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
"Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking"
" with `args.memory=False`" )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
"py3nvml not installed, we won't log GPU memory usage. "
"Install py3nvml (pip install py3nvml) to log information about GPU." )
_UpperCAmelCase = "N/A"
else:
logger.info(
"Measuring total GPU usage on GPU device. Make sure to not have additional processes"
" running on the same GPU." )
# init nvml
nvml.nvmlInit()
func()
_UpperCAmelCase = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
_UpperCAmelCase = nvml.nvmlDeviceGetMemoryInfo(snake_case_ )
_UpperCAmelCase = meminfo.used
_UpperCAmelCase = Memory(snake_case_ )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
"When enabling line by line tracing, the max peak memory for CPU is inaccurate in"
" TensorFlow." )
_UpperCAmelCase = None
else:
_UpperCAmelCase = measure_peak_memory_cpu(snake_case_ )
_UpperCAmelCase = Memory(snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else memory_bytes
if self.args.trace_memory_line_by_line:
_UpperCAmelCase = stop_memory_tracing(snake_case_ )
if memory is None:
_UpperCAmelCase = summary.total
else:
_UpperCAmelCase = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(f'Doesn\'t fit on GPU. {e}' )
return "N/A", None
| 22 |
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=_a )
class SCREAMING_SNAKE_CASE__ ( _a ):
_a = field(default='audio-classification' , metadata={'include_in_asdict_even_if_is_default': True} )
_a = Features({'audio': Audio()} )
_a = Features({'labels': ClassLabel} )
_a = "audio"
_a = "labels"
def __lowercase ( self : List[str] , lowerCAmelCase : Optional[int] ):
if self.label_column not in features:
raise ValueError(f'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , lowerCAmelCase ):
raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' )
lowerCAmelCase = copy.deepcopy(self )
lowerCAmelCase = self.label_schema.copy()
lowerCAmelCase = features[self.label_column]
lowerCAmelCase = label_schema
return task_template
@property
def __lowercase ( self : List[str] ):
return {
self.audio_column: "audio",
self.label_column: "labels",
}
| 155 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
lowercase__ = {
"configuration_longt5": ["LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP", "LongT5Config", "LongT5OnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST",
"LongT5EncoderModel",
"LongT5ForConditionalGeneration",
"LongT5Model",
"LongT5PreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"FlaxLongT5ForConditionalGeneration",
"FlaxLongT5Model",
"FlaxLongT5PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 83 |
'''simple docstring'''
class snake_case__ :
"""simple docstring"""
def __init__( self : List[Any] , UpperCamelCase__ : list[int] ) -> None:
"""simple docstring"""
snake_case : List[Any] = len(UpperCamelCase__ )
snake_case : Tuple = [0] * len_array
if len_array > 0:
snake_case : List[str] = array[0]
for i in range(1 , UpperCamelCase__ ):
snake_case : Tuple = self.prefix_sum[i - 1] + array[i]
def lowerCAmelCase ( self : str , UpperCamelCase__ : int , UpperCamelCase__ : int ) -> int:
"""simple docstring"""
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def lowerCAmelCase ( self : str , UpperCamelCase__ : int ) -> bool:
"""simple docstring"""
snake_case : int = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(UpperCamelCase__ )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 83 | 1 |
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[str]:
lowerCAmelCase__ : Optional[Any] = args.log_outputs
lowerCAmelCase__ : Union[str, Any] = '_'.join(args.dataset.split('/' ) + [args.config, args.split] )
# load metric
lowerCAmelCase__ : Dict = load_metric('wer' )
lowerCAmelCase__ : Tuple = load_metric('cer' )
# compute metrics
lowerCAmelCase__ : Dict = wer.compute(references=result['target'] , predictions=result['prediction'] )
lowerCAmelCase__ : int = cer.compute(references=result['target'] , predictions=result['prediction'] )
# print & log results
lowerCAmelCase__ : Optional[int] = F'''WER: {wer_result}\nCER: {cer_result}'''
print(SCREAMING_SNAKE_CASE_ )
with open(F'''{dataset_id}_eval_results.txt''' , 'w' ) as f:
f.write(SCREAMING_SNAKE_CASE_ )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
lowerCAmelCase__ : List[str] = F'''log_{dataset_id}_predictions.txt'''
lowerCAmelCase__ : Union[str, Any] = F'''log_{dataset_id}_targets.txt'''
with open(SCREAMING_SNAKE_CASE_ , 'w' ) as p, open(SCREAMING_SNAKE_CASE_ , 'w' ) as t:
# mapping function to write output
def write_to_file(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
p.write(F'''{i}''' + '\n' )
p.write(batch['prediction'] + '\n' )
t.write(F'''{i}''' + '\n' )
t.write(batch['target'] + '\n' )
result.map(SCREAMING_SNAKE_CASE_ , with_indices=SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> str:
lowerCAmelCase__ : List[str] = '[,?.!\-\;\:"“%‘”�—’…–]' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
lowerCAmelCase__ : Union[str, Any] = re.sub(SCREAMING_SNAKE_CASE_ , '' , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
lowerCAmelCase__ : List[Any] = ['\n\n', '\n', ' ', ' ']
for t in token_sequences_to_ignore:
lowerCAmelCase__ : Optional[int] = ' '.join(text.split(SCREAMING_SNAKE_CASE_ ) )
return text
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> List[Any]:
# load dataset
lowerCAmelCase__ : Tuple = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=SCREAMING_SNAKE_CASE_ )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
lowerCAmelCase__ : List[Any] = AutoFeatureExtractor.from_pretrained(args.model_id )
lowerCAmelCase__ : Union[str, Any] = feature_extractor.sampling_rate
# resample audio
lowerCAmelCase__ : Union[str, Any] = dataset.cast_column('audio' , Audio(sampling_rate=SCREAMING_SNAKE_CASE_ ) )
# load eval pipeline
if args.device is None:
lowerCAmelCase__ : List[Any] = 0 if torch.cuda.is_available() else -1
lowerCAmelCase__ : List[Any] = pipeline('automatic-speech-recognition' , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ : Optional[int] = asr(
batch['audio']['array'] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
lowerCAmelCase__ : int = prediction['text']
lowerCAmelCase__ : Optional[int] = normalize_text(batch['sentence'] )
return batch
# run inference on all examples
lowerCAmelCase__ : Dict = dataset.map(SCREAMING_SNAKE_CASE_ , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument(
"""--model_id""", type=str, required=True, help="""Model identifier. Should be loadable with 🤗 Transformers"""
)
parser.add_argument(
"""--dataset""",
type=str,
required=True,
help="""Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets""",
)
parser.add_argument(
"""--config""", type=str, required=True, help="""Config of the dataset. *E.g.* `'en'` for Common Voice"""
)
parser.add_argument("""--split""", type=str, required=True, help="""Split of the dataset. *E.g.* `'test'`""")
parser.add_argument(
"""--chunk_length_s""", type=float, default=None, help="""Chunk length in seconds. Defaults to 5 seconds."""
)
parser.add_argument(
"""--stride_length_s""", type=float, default=None, help="""Stride of the audio chunks. Defaults to 1 second."""
)
parser.add_argument(
"""--log_outputs""", action="""store_true""", help="""If defined, write outputs to log file for analysis."""
)
parser.add_argument(
"""--device""",
type=int,
default=None,
help="""The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.""",
)
lowerCamelCase__ = parser.parse_args()
main(args) | 212 |
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> list:
if len(SCREAMING_SNAKE_CASE_ ) <= 1:
return [tuple(SCREAMING_SNAKE_CASE_ )]
lowerCAmelCase__ : Optional[Any] = []
def generate(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1 , SCREAMING_SNAKE_CASE_ )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
lowerCAmelCase__ , lowerCAmelCase__ : str = arr[k - 1], arr[i]
else: # k is odd
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = arr[k - 1], arr[0]
generate(k - 1 , SCREAMING_SNAKE_CASE_ )
generate(len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
return res
if __name__ == "__main__":
lowerCamelCase__ = input("""Enter numbers separated by a comma:\n""").strip()
lowerCamelCase__ = [int(item) for item in user_input.split(""",""")]
print(heaps(arr)) | 212 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def __lowerCamelCase ( A__ , A__ , A__=None , A__=None ) -> List[str]:
"""simple docstring"""
if attention_mask is None:
UpperCamelCase = tf.cast(tf.math.not_equal(A__ , config.pad_token_id ) , tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
_SCREAMING_SNAKE_CASE = OPTConfig
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = """gelu"""
def __init__( self : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any]=1_3 , UpperCamelCase__ : List[str]=7 , UpperCamelCase__ : Dict=True , UpperCamelCase__ : Union[str, Any]=False , UpperCamelCase__ : Optional[int]=9_9 , UpperCamelCase__ : Dict=1_6 , UpperCamelCase__ : List[Any]=2 , UpperCamelCase__ : Optional[int]=4 , UpperCamelCase__ : str=4 , UpperCamelCase__ : Optional[int]="gelu" , UpperCamelCase__ : Optional[int]=0.1 , UpperCamelCase__ : Tuple=0.1 , UpperCamelCase__ : Dict=2_0 , UpperCamelCase__ : List[str]=2 , UpperCamelCase__ : str=1 , UpperCamelCase__ : Union[str, Any]=0 , UpperCamelCase__ : Tuple=1_6 , UpperCamelCase__ : Any=1_6 , ):
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = seq_length
UpperCamelCase = is_training
UpperCamelCase = use_labels
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = eos_token_id
UpperCamelCase = pad_token_id
UpperCamelCase = bos_token_id
UpperCamelCase = embed_dim
UpperCamelCase = word_embed_proj_dim
UpperCamelCase = False
def A ( self : Any ):
"""simple docstring"""
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
UpperCamelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
UpperCamelCase = tf.concat([input_ids, eos_tensor] , axis=1 )
UpperCamelCase = self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=UpperCamelCase__ , **self.config_updates , )
UpperCamelCase = prepare_opt_inputs_dict(UpperCamelCase__ , UpperCamelCase__ )
return config, inputs_dict
def A ( self : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[str] ):
"""simple docstring"""
UpperCamelCase = TFOPTModel(config=UpperCamelCase__ )
UpperCamelCase = inputs_dict['input_ids']
UpperCamelCase = input_ids[:1, :]
UpperCamelCase = inputs_dict['attention_mask'][:1, :]
UpperCamelCase = 1
# first forward pass
UpperCamelCase = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , use_cache=UpperCamelCase__ )
UpperCamelCase , UpperCamelCase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCamelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCamelCase = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
UpperCamelCase = tf.concat([input_ids, next_tokens] , axis=-1 )
UpperCamelCase = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
UpperCamelCase = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ )[0]
UpperCamelCase = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , past_key_values=UpperCamelCase__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
UpperCamelCase = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
UpperCamelCase = output_from_no_past[:, -3:, random_slice_idx]
UpperCamelCase = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(UpperCamelCase__ , UpperCamelCase__ , rtol=1E-3 )
@require_tf
class SCREAMING_SNAKE_CASE ( _a , _a , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
_SCREAMING_SNAKE_CASE = (TFOPTForCausalLM,) if is_tf_available() else ()
_SCREAMING_SNAKE_CASE = (
{"""feature-extraction""": TFOPTModel, """text-generation""": TFOPTForCausalLM} if is_tf_available() else {}
)
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = 10
def A ( self : int ):
"""simple docstring"""
UpperCamelCase = TFOPTModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=UpperCamelCase__ )
def A ( self : Tuple ):
"""simple docstring"""
self.config_tester.run_common_tests()
def A ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*UpperCamelCase__ )
def A ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict ):
if hasattr(UpperCamelCase__ , 'weight' ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(UpperCamelCase__ , 'weight' ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 1_0, config.vocab_size + 1_0]:
# build the embeddings
UpperCamelCase = model_class(config=UpperCamelCase__ )
UpperCamelCase = _get_word_embedding_weight(UpperCamelCase__ , model.get_input_embeddings() )
UpperCamelCase = _get_word_embedding_weight(UpperCamelCase__ , model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(UpperCamelCase__ )
UpperCamelCase = _get_word_embedding_weight(UpperCamelCase__ , model.get_input_embeddings() )
UpperCamelCase = _get_word_embedding_weight(UpperCamelCase__ , model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
UpperCamelCase = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , UpperCamelCase__ )
# check that weights remain the same after resizing
UpperCamelCase = True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
UpperCamelCase = False
self.assertTrue(UpperCamelCase__ )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , UpperCamelCase__ )
UpperCamelCase = True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
UpperCamelCase = False
self.assertTrue(UpperCamelCase__ )
def __lowerCamelCase ( A__ ) -> Union[str, Any]:
"""simple docstring"""
return tf.constant(A__ , dtype=tf.intaa )
@require_tf
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 99
def A ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = tf.ones((4, 1) , dtype=tf.intaa ) * 2
UpperCamelCase = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 )
UpperCamelCase = input_ids.shape[0]
UpperCamelCase = OPTConfig(
vocab_size=self.vocab_size , hidden_size=2_4 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=3_2 , max_position_embeddings=4_8 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@slow
def A ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = TFOPTModel.from_pretrained('facebook/opt-350m' )
UpperCamelCase = _long_tensor([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] )
UpperCamelCase = tf.not_equal(UpperCamelCase__ , model.config.pad_token_id )
with tf.GradientTape():
UpperCamelCase = model(input_ids=UpperCamelCase__ , attention_mask=UpperCamelCase__ ).last_hidden_state
UpperCamelCase = (1, 1_1, 5_1_2)
self.assertEqual(output.shape , UpperCamelCase__ )
UpperCamelCase = tf.constant(
[[-0.2_8_7_3, -1.9_2_1_8, -0.3_0_3_3], [-1.2_7_1_0, -0.1_3_3_8, -0.1_9_0_2], [0.4_0_9_5, 0.1_2_1_4, -1.3_1_2_1]] )
self.assertTrue(np.allclose(output[:, :3, :3] , UpperCamelCase__ , atol=4E-3 ) )
UpperCamelCase = tf.function(UpperCamelCase__ , jit_compile=UpperCamelCase__ )
UpperCamelCase = xla_generate(UpperCamelCase__ , UpperCamelCase__ )[0]
self.assertTrue(np.allclose(output[:, :3, :3] , UpperCamelCase__ , atol=4E-2 ) )
@require_tf
@slow
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def A ( self : List[str] ):
"""simple docstring"""
super().setUp()
UpperCamelCase = 'facebook/opt-350m'
def A ( self : Any ):
"""simple docstring"""
UpperCamelCase = TFOPTForCausalLM.from_pretrained(self.path_model )
UpperCamelCase = GPTaTokenizer.from_pretrained(self.path_model )
UpperCamelCase = [
'Today is a beautiful day and I want to',
'In the city of',
'Paris is the capital of France and',
'Computers and mobile phones have taken',
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
UpperCamelCase = tokenizer(UpperCamelCase__ , return_tensors='tf' , padding=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
UpperCamelCase = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
UpperCamelCase = tf.constant(
[
[1.3_8_5_1, -1_3.8_9_2_3, -1_0.5_2_2_9, -1_0.7_5_3_3, -0.2_3_0_9, -1_0.2_3_8_4, -0.5_3_6_5, -9.0_9_4_7, -5.1_6_7_0],
[-4.7_0_7_3, -1_0.6_2_7_6, -3.9_4_1_5, -2_1.5_2_4_2, -0.2_8_2_2, -0.2_8_2_2, -0.2_8_2_2, -0.2_8_2_2, -0.2_8_2_2],
[0.6_2_4_7, -3.4_2_2_9, -8.9_1_7_9, -1.4_2_9_7, -1_4.1_6_5_0, 1.4_1_4_6, -9.0_2_1_8, -0.2_7_0_3, -0.2_7_0_3],
[6.4_7_8_3, -1.9_9_1_3, -1_0.7_9_2_6, -2.3_3_3_6, 1.5_0_9_2, -0.9_9_7_4, -6.8_2_1_3, 1.3_4_7_7, 1.3_4_7_7],
] )
self.assertTrue(np.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1E-4 ) )
UpperCamelCase = tf.function(UpperCamelCase__ , jit_compile=UpperCamelCase__ )
UpperCamelCase = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
self.assertTrue(np.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1E-4 ) )
@require_tf
@slow
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@property
def A ( self : Optional[int] ):
"""simple docstring"""
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def A ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = 'facebook/opt-125m'
UpperCamelCase = [
'Today is a beautiful day and I want to',
'In the city of New York, the city',
'Paris is the capital of France and the capital',
'Computers and mobile phones have taken over the',
]
UpperCamelCase = []
UpperCamelCase = GPTaTokenizer.from_pretrained(UpperCamelCase__ )
UpperCamelCase = TFOPTForCausalLM.from_pretrained(UpperCamelCase__ )
for prompt in self.prompts:
UpperCamelCase = tokenizer(UpperCamelCase__ , return_tensors='tf' ).input_ids
UpperCamelCase = model.generate(UpperCamelCase__ , max_length=1_0 )
UpperCamelCase = tokenizer.batch_decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ )
predicted_outputs += generated_string
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
def A ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = 'facebook/opt-350m'
UpperCamelCase = GPTaTokenizer.from_pretrained(UpperCamelCase__ )
UpperCamelCase = TFOPTForCausalLM.from_pretrained(UpperCamelCase__ )
UpperCamelCase = 'left'
# use different length sentences to test batching
UpperCamelCase = [
'Hello, my dog is a little',
'Today, I',
]
UpperCamelCase = tokenizer(UpperCamelCase__ , return_tensors='tf' , padding=UpperCamelCase__ )
UpperCamelCase = inputs['input_ids']
UpperCamelCase = model.generate(input_ids=UpperCamelCase__ , attention_mask=inputs['attention_mask'] )
UpperCamelCase = tokenizer(sentences[0] , return_tensors='tf' ).input_ids
UpperCamelCase = model.generate(input_ids=UpperCamelCase__ )
UpperCamelCase = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs['attention_mask'][-1] , tf.intaa ) )
UpperCamelCase = tokenizer(sentences[1] , return_tensors='tf' ).input_ids
UpperCamelCase = model.generate(input_ids=UpperCamelCase__ , max_length=model.config.max_length - num_paddings )
UpperCamelCase = tokenizer.batch_decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ )
UpperCamelCase = tokenizer.decode(output_non_padded[0] , skip_special_tokens=UpperCamelCase__ )
UpperCamelCase = tokenizer.decode(output_padded[0] , skip_special_tokens=UpperCamelCase__ )
UpperCamelCase = [
'Hello, my dog is a little bit of a dork.\nI\'m a little bit',
'Today, I was in the middle of a conversation with a friend about the',
]
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , [non_padded_sentence, padded_sentence] )
def A ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = 'facebook/opt-350m'
UpperCamelCase = [
'Today is a beautiful day and I want to',
'In the city of San Francisco, the city',
'Paris is the capital of France and the capital',
'Computers and mobile phones have taken over the',
]
UpperCamelCase = []
UpperCamelCase = GPTaTokenizer.from_pretrained(UpperCamelCase__ )
UpperCamelCase = TFOPTForCausalLM.from_pretrained(UpperCamelCase__ )
for prompt in self.prompts:
UpperCamelCase = tokenizer(UpperCamelCase__ , return_tensors='tf' ).input_ids
UpperCamelCase = model.generate(UpperCamelCase__ , max_length=1_0 )
UpperCamelCase = tokenizer.batch_decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ )
predicted_outputs += generated_string
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
| 249 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCamelCase : Union[str, Any] = {
"configuration_roberta": ["ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP", "RobertaConfig", "RobertaOnnxConfig"],
"tokenization_roberta": ["RobertaTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Tuple = ["RobertaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : List[Any] = [
"ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"RobertaForCausalLM",
"RobertaForMaskedLM",
"RobertaForMultipleChoice",
"RobertaForQuestionAnswering",
"RobertaForSequenceClassification",
"RobertaForTokenClassification",
"RobertaModel",
"RobertaPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[Any] = [
"TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRobertaForCausalLM",
"TFRobertaForMaskedLM",
"TFRobertaForMultipleChoice",
"TFRobertaForQuestionAnswering",
"TFRobertaForSequenceClassification",
"TFRobertaForTokenClassification",
"TFRobertaMainLayer",
"TFRobertaModel",
"TFRobertaPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : int = [
"FlaxRobertaForCausalLM",
"FlaxRobertaForMaskedLM",
"FlaxRobertaForMultipleChoice",
"FlaxRobertaForQuestionAnswering",
"FlaxRobertaForSequenceClassification",
"FlaxRobertaForTokenClassification",
"FlaxRobertaModel",
"FlaxRobertaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
_lowerCamelCase : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 249 | 1 |
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def a_ ( _A ) -> Union[str, Any]:
"""simple docstring"""
if isinstance(_a , collections.abc.Iterable ):
return x
return (x, x)
@require_tf
class __SCREAMING_SNAKE_CASE:
def lowerCAmelCase_ ( self: List[Any] , UpperCamelCase: Optional[int] , UpperCamelCase: Any ) -> Tuple:
pass
def lowerCAmelCase_ ( self: str ) -> Union[str, Any]:
pass
def lowerCAmelCase_ ( self: Dict ) -> Optional[Any]:
pass
def lowerCAmelCase_ ( self: Tuple , UpperCamelCase: Union[str, Any] , UpperCamelCase: Dict , UpperCamelCase: Optional[int] , UpperCamelCase: Union[str, Any] , UpperCamelCase: List[Any]=None , **UpperCamelCase: Tuple ) -> List[Any]:
snake_case__ = VisionTextDualEncoderConfig.from_vision_text_configs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case__ = TFVisionTextDualEncoderModel(_SCREAMING_SNAKE_CASE )
snake_case__ = model(input_ids=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], config.projection_dim) )
def lowerCAmelCase_ ( self: Dict , UpperCamelCase: Any , UpperCamelCase: Tuple , UpperCamelCase: Any , UpperCamelCase: List[Any] , UpperCamelCase: Optional[Any]=None , **UpperCamelCase: int ) -> int:
snake_case__ = self.get_vision_text_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case__ = TFVisionTextDualEncoderModel(vision_model=_SCREAMING_SNAKE_CASE , text_model=_SCREAMING_SNAKE_CASE )
snake_case__ = model(input_ids=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], model.config.projection_dim) )
def lowerCAmelCase_ ( self: List[str] , UpperCamelCase: Tuple , UpperCamelCase: List[str] , UpperCamelCase: int , UpperCamelCase: str , UpperCamelCase: List[str]=None , **UpperCamelCase: Any ) -> Optional[int]:
snake_case__ = self.get_vision_text_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case__ = {'''vision_model''': vision_model, '''text_model''': text_model}
snake_case__ = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**_SCREAMING_SNAKE_CASE )
snake_case__ = model(input_ids=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], model.config.projection_dim) )
def lowerCAmelCase_ ( self: Any , UpperCamelCase: str , UpperCamelCase: Dict , UpperCamelCase: str , UpperCamelCase: Union[str, Any] , UpperCamelCase: Optional[int]=None , **UpperCamelCase: Tuple ) -> Dict:
snake_case__ = self.get_vision_text_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case__ = TFVisionTextDualEncoderModel(vision_model=_SCREAMING_SNAKE_CASE , text_model=_SCREAMING_SNAKE_CASE )
snake_case__ = model(input_ids=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )
snake_case__ = output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_SCREAMING_SNAKE_CASE )
snake_case__ = TFVisionTextDualEncoderModel.from_pretrained(_SCREAMING_SNAKE_CASE )
snake_case__ = model(input_ids=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )
snake_case__ = after_output[0].numpy()
snake_case__ = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1e-5 )
def lowerCAmelCase_ ( self: Any , UpperCamelCase: Dict , UpperCamelCase: Optional[int] , UpperCamelCase: Union[str, Any] , UpperCamelCase: int , UpperCamelCase: Tuple=None , **UpperCamelCase: Any ) -> str:
snake_case__ = self.get_vision_text_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case__ = TFVisionTextDualEncoderModel(vision_model=_SCREAMING_SNAKE_CASE , text_model=_SCREAMING_SNAKE_CASE )
snake_case__ = model(
input_ids=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , output_attentions=_SCREAMING_SNAKE_CASE )
snake_case__ = output.vision_model_output.attentions
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
snake_case__ = to_atuple(vision_model.config.image_size )
snake_case__ = to_atuple(vision_model.config.patch_size )
snake_case__ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
snake_case__ = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
snake_case__ = output.text_model_output.attentions
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def lowerCAmelCase_ ( self: Union[str, Any] , UpperCamelCase: np.ndarray , UpperCamelCase: np.ndarray , UpperCamelCase: float ) -> Optional[Any]:
snake_case__ = np.abs((a - b) ).max()
self.assertLessEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , F'''Difference between torch and flax is {diff} (>= {tol}).''' )
def lowerCAmelCase_ ( self: List[Any] ) -> int:
snake_case__ = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**_SCREAMING_SNAKE_CASE )
def lowerCAmelCase_ ( self: List[str] ) -> int:
snake_case__ = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**_SCREAMING_SNAKE_CASE )
def lowerCAmelCase_ ( self: List[Any] ) -> List[Any]:
snake_case__ = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**_SCREAMING_SNAKE_CASE )
def lowerCAmelCase_ ( self: List[Any] ) -> int:
snake_case__ = self.prepare_config_and_inputs()
self.check_save_load(**_SCREAMING_SNAKE_CASE )
def lowerCAmelCase_ ( self: List[Any] ) -> Union[str, Any]:
snake_case__ = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**_SCREAMING_SNAKE_CASE )
@slow
def lowerCAmelCase_ ( self: Tuple ) -> int:
snake_case__ = self.get_pretrained_model_and_inputs()
snake_case__ = model_a(**_SCREAMING_SNAKE_CASE )
snake_case__ = outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(_SCREAMING_SNAKE_CASE )
snake_case__ = TFVisionTextDualEncoderModel.from_pretrained(_SCREAMING_SNAKE_CASE )
snake_case__ = model_a(**_SCREAMING_SNAKE_CASE )
snake_case__ = after_outputs[0].numpy()
snake_case__ = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1e-5 )
@require_tf
class __SCREAMING_SNAKE_CASE( _lowercase , unittest.TestCase ):
def lowerCAmelCase_ ( self: Optional[int] ) -> Tuple:
snake_case__ = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'hf-internal-testing/tiny-random-vit' , 'hf-internal-testing/tiny-random-bert' )
snake_case__ = 13
snake_case__ = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
snake_case__ = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
snake_case__ = random_attention_mask([batch_size, 4] )
snake_case__ = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def lowerCAmelCase_ ( self: Tuple , UpperCamelCase: List[str] , UpperCamelCase: Any ) -> List[Any]:
snake_case__ = TFViTModel(_SCREAMING_SNAKE_CASE , name='vision_model' )
snake_case__ = TFBertModel(_SCREAMING_SNAKE_CASE , name='text_model' )
return vision_model, text_model
def lowerCAmelCase_ ( self: Any ) -> Union[str, Any]:
snake_case__ = TFViTModelTester(self )
snake_case__ = TFBertModelTester(self )
snake_case__ = vit_model_tester.prepare_config_and_inputs()
snake_case__ = bert_model_tester.prepare_config_and_inputs()
snake_case__ = vision_config_and_inputs
(
snake_case__
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class __SCREAMING_SNAKE_CASE( _lowercase , unittest.TestCase ):
def lowerCAmelCase_ ( self: int ) -> Tuple:
# DeiT repo doesn't have TF weights, but we don't actually use the weights at all so let's
# just reinitialize it.
snake_case__ = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'Rocketknight1/tiny-random-deit-tf' , 'hf-internal-testing/tiny-random-roberta' )
snake_case__ = 13
snake_case__ = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
snake_case__ = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
snake_case__ = random_attention_mask([batch_size, 4] )
snake_case__ = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def lowerCAmelCase_ ( self: int , UpperCamelCase: Optional[int] , UpperCamelCase: Optional[int] , UpperCamelCase: Any , UpperCamelCase: Union[str, Any] , UpperCamelCase: str=None , **UpperCamelCase: Dict ) -> Optional[Any]:
snake_case__ = self.get_vision_text_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case__ = TFVisionTextDualEncoderModel(vision_model=_SCREAMING_SNAKE_CASE , text_model=_SCREAMING_SNAKE_CASE )
snake_case__ = model(
input_ids=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , output_attentions=_SCREAMING_SNAKE_CASE )
snake_case__ = output.vision_model_output.attentions
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , vision_config.num_hidden_layers )
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
snake_case__ = to_atuple(vision_model.config.image_size )
snake_case__ = to_atuple(vision_model.config.patch_size )
snake_case__ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
snake_case__ = num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
snake_case__ = output.text_model_output.attentions
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def lowerCAmelCase_ ( self: List[str] , UpperCamelCase: List[Any] , UpperCamelCase: List[str] ) -> Optional[int]:
snake_case__ = TFDeiTModel(_SCREAMING_SNAKE_CASE , name='vision_model' )
snake_case__ = TFRobertaModel(_SCREAMING_SNAKE_CASE , name='text_model' )
return vision_model, text_model
def lowerCAmelCase_ ( self: int ) -> int:
snake_case__ = TFDeiTModelTester(self )
snake_case__ = TFRobertaModelTester(self )
snake_case__ = vit_model_tester.prepare_config_and_inputs()
snake_case__ = bert_model_tester.prepare_config_and_inputs()
snake_case__ = vision_config_and_inputs
(
snake_case__
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class __SCREAMING_SNAKE_CASE( _lowercase , unittest.TestCase ):
def lowerCAmelCase_ ( self: Union[str, Any] ) -> List[Any]:
snake_case__ = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'Rocketknight1/tiny-random-clip-tf' , 'hf-internal-testing/tiny-random-bert' )
snake_case__ = 13
snake_case__ = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
snake_case__ = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
snake_case__ = random_attention_mask([batch_size, 4] )
snake_case__ = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def lowerCAmelCase_ ( self: Union[str, Any] , UpperCamelCase: Optional[Any] , UpperCamelCase: int ) -> int:
snake_case__ = TFCLIPVisionModel(_SCREAMING_SNAKE_CASE , name='vision_model' )
snake_case__ = TFBertModel(_SCREAMING_SNAKE_CASE , name='text_model' )
return vision_model, text_model
def lowerCAmelCase_ ( self: List[str] ) -> Any:
snake_case__ = TFCLIPVisionModelTester(self )
snake_case__ = TFBertModelTester(self )
snake_case__ = clip_model_tester.prepare_config_and_inputs()
snake_case__ = bert_model_tester.prepare_config_and_inputs()
snake_case__ = vision_config_and_inputs
(
snake_case__
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class __SCREAMING_SNAKE_CASE( unittest.TestCase ):
@slow
def lowerCAmelCase_ ( self: int ) -> List[Any]:
snake_case__ = TFVisionTextDualEncoderModel.from_pretrained(
'clip-italian/clip-italian' , logit_scale_init_value=1.0 , from_pt=_SCREAMING_SNAKE_CASE )
snake_case__ = VisionTextDualEncoderProcessor.from_pretrained('clip-italian/clip-italian' )
snake_case__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
snake_case__ = processor(
text=['una foto di un gatto', 'una foto di un cane'] , images=_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , return_tensors='np' )
snake_case__ = model(**_SCREAMING_SNAKE_CASE )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
snake_case__ = np.array([[1.2_284_727, 0.3_104_122]] )
self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , _SCREAMING_SNAKE_CASE , atol=1e-3 ) )
| 307 |
import unittest
from transformers import DonutProcessor
lowerCamelCase = '''naver-clova-ix/donut-base'''
class _a ( unittest.TestCase):
def UpperCAmelCase__( self : str )-> int:
lowerCAmelCase__ : Any = DonutProcessor.from_pretrained(_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : Optional[int] )-> List[Any]:
lowerCAmelCase__ : Dict = {
'''name''': '''John Doe''',
'''age''': '''99''',
'''city''': '''Atlanta''',
'''state''': '''GA''',
'''zip''': '''30301''',
'''phone''': '''123-4567''',
'''nicknames''': [{'''nickname''': '''Johnny'''}, {'''nickname''': '''JD'''}],
}
lowerCAmelCase__ : Any = (
'''<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>'''
'''<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>'''
'''<s_nicknames><s_nickname>Johnny</s_nickname>'''
'''<sep/><s_nickname>JD</s_nickname></s_nicknames>'''
)
lowerCAmelCase__ : str = self.processor.tokenajson(_SCREAMING_SNAKE_CASE )
self.assertDictEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
| 131 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
a_ : Optional[Any] = logging.get_logger(__name__)
class a ( _SCREAMING_SNAKE_CASE ):
def __init__( self , *__magic_name__ , **__magic_name__ ) -> None:
warnings.warn(
'The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use CLIPImageProcessor instead.' , __magic_name__ , )
super().__init__(*__magic_name__ , **__magic_name__ )
| 104 |
'''simple docstring'''
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
a_ : Optional[int] = logging.get_logger(__name__)
a_ : List[Any] = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
a_ : List[str] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class a :
_lowerCAmelCase = field(
default=_SCREAMING_SNAKE_CASE , metadata={"""help""": """Model type selected in the list: """ + """, """.join(_SCREAMING_SNAKE_CASE )} )
_lowerCAmelCase = field(
default=_SCREAMING_SNAKE_CASE , metadata={"""help""": """The input data dir. Should contain the .json files for the SQuAD task."""} )
_lowerCAmelCase = field(
default=1_2_8 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
_lowerCAmelCase = field(
default=1_2_8 , metadata={"""help""": """When splitting up a long document into chunks, how much stride to take between chunks."""} , )
_lowerCAmelCase = field(
default=6_4 , metadata={
"""help""": (
"""The maximum number of tokens for the question. Questions longer than this will """
"""be truncated to this length."""
)
} , )
_lowerCAmelCase = field(
default=3_0 , metadata={
"""help""": (
"""The maximum length of an answer that can be generated. This is needed because the start """
"""and end predictions are not conditioned on one another."""
)
} , )
_lowerCAmelCase = field(
default=_SCREAMING_SNAKE_CASE , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
_lowerCAmelCase = field(
default=_SCREAMING_SNAKE_CASE , metadata={"""help""": """If true, the SQuAD examples contain some that do not have an answer."""} )
_lowerCAmelCase = field(
default=0.0 , metadata={"""help""": """If null_score - best_non_null is greater than the threshold predict null."""} )
_lowerCAmelCase = field(
default=2_0 , metadata={"""help""": """If null_score - best_non_null is greater than the threshold predict null."""} )
_lowerCAmelCase = field(
default=0 , metadata={
"""help""": (
"""language id of input for language-specific xlm models (see"""
""" tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)"""
)
} , )
_lowerCAmelCase = field(default=1 , metadata={"""help""": """multiple threads for converting example to features"""} )
class a ( _SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = """train"""
_lowerCAmelCase = """dev"""
class a ( _SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = 42
def __init__( self , __magic_name__ , __magic_name__ , __magic_name__ = None , __magic_name__ = Split.train , __magic_name__ = False , __magic_name__ = None , __magic_name__ = "pt" , ) -> Any:
_a = args
_a = is_language_sensitive
_a = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(__magic_name__ , __magic_name__ ):
try:
_a = Split[mode]
except KeyError:
raise KeyError('mode is not a valid split name' )
_a = mode
# Load data features from cache or dataset file
_a = 'v2' if args.version_2_with_negative else 'v1'
_a = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}' , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_a = cached_features_file + '.lock'
with FileLock(__magic_name__ ):
if os.path.exists(__magic_name__ ) and not args.overwrite_cache:
_a = time.time()
_a = torch.load(__magic_name__ )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
_a = self.old_features['features']
_a = self.old_features.get('dataset' , __magic_name__ )
_a = self.old_features.get('examples' , __magic_name__ )
logger.info(
f'Loading features from cached file {cached_features_file} [took %.3f s]' , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f'Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'
' future run' )
else:
if mode == Split.dev:
_a = self.processor.get_dev_examples(args.data_dir )
else:
_a = self.processor.get_train_examples(args.data_dir )
_a , _a = squad_convert_examples_to_features(
examples=self.examples , tokenizer=__magic_name__ , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=__magic_name__ , )
_a = time.time()
torch.save(
{'features': self.features, 'dataset': self.dataset, 'examples': self.examples} , __magic_name__ , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' )
def __len__( self ) -> List[Any]:
return len(self.features )
def __getitem__( self , __magic_name__ ) -> Dict[str, torch.Tensor]:
# Convert to Tensors and build dataset
_a = self.features[i]
_a = torch.tensor(feature.input_ids , dtype=torch.long )
_a = torch.tensor(feature.attention_mask , dtype=torch.long )
_a = torch.tensor(feature.token_type_ids , dtype=torch.long )
_a = torch.tensor(feature.cls_index , dtype=torch.long )
_a = torch.tensor(feature.p_mask , dtype=torch.float )
_a = torch.tensor(feature.is_impossible , dtype=torch.float )
_a = {
'input_ids': input_ids,
'attention_mask': attention_mask,
'token_type_ids': token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({'cls_index': cls_index, 'p_mask': p_mask} )
if self.args.version_2_with_negative:
inputs.update({'is_impossible': is_impossible} )
if self.is_language_sensitive:
inputs.update({'langs': (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
_a = torch.tensor(feature.start_position , dtype=torch.long )
_a = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({'start_positions': start_positions, 'end_positions': end_positions} )
return inputs
| 104 | 1 |
"""simple docstring"""
import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def lowercase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
_UpperCAmelCase = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = FlaxAutoModelForSeqaSeqLM.from_config(config=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = checkpoints.load_tax_checkpoint(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = '''wi_0''' in tax_model['''target''']['''encoder''']['''layers_0''']['''mlp''']
if config.model_type == "t5":
_UpperCAmelCase = '''SelfAttention'''
if config.model_type == "longt5" and config.encoder_attention_type == "local":
_UpperCAmelCase = '''LocalSelfAttention'''
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_UpperCAmelCase = '''TransientGlobalSelfAttention'''
else:
raise ValueError(
'''Given config is expected to have `model_type=\'t5\'`, or `model_type=\'longt5` with `encoder_attention_type`'''
''' attribute with a value from [\'local\', \'transient-global].''' )
# Encoder
for layer_index in range(config.num_layers ):
_UpperCAmelCase = f'layers_{str(_SCREAMING_SNAKE_CASE )}'
# Self-Attention
_UpperCAmelCase = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''key''']['''kernel''']
_UpperCAmelCase = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''out''']['''kernel''']
_UpperCAmelCase = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''query''']['''kernel''']
_UpperCAmelCase = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''value''']['''kernel''']
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_UpperCAmelCase = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''T5LayerNorm_0''']['''scale''']
# Layer Normalization
_UpperCAmelCase = tax_model['''target''']['''encoder'''][layer_name]['''pre_attention_layer_norm''']['''scale''']
if split_mlp_wi:
_UpperCAmelCase = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi_0''']['''kernel''']
_UpperCAmelCase = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi_1''']['''kernel''']
else:
_UpperCAmelCase = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi''']['''kernel''']
_UpperCAmelCase = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wo''']['''kernel''']
# Layer Normalization
_UpperCAmelCase = tax_model['''target''']['''encoder'''][layer_name]['''pre_mlp_layer_norm''']['''scale''']
# Assigning
_UpperCAmelCase = flax_model.params['''encoder''']['''block'''][str(_SCREAMING_SNAKE_CASE )]['''layer''']
_UpperCAmelCase = tax_attention_key
_UpperCAmelCase = tax_attention_out
_UpperCAmelCase = tax_attention_query
_UpperCAmelCase = tax_attention_value
_UpperCAmelCase = tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_UpperCAmelCase = tax_global_layer_norm
if split_mlp_wi:
_UpperCAmelCase = tax_mlp_wi_a
_UpperCAmelCase = tax_mlp_wi_a
else:
_UpperCAmelCase = tax_mlp_wi
_UpperCAmelCase = tax_mlp_wo
_UpperCAmelCase = tax_mlp_layer_norm
_UpperCAmelCase = flax_model_encoder_layer_block
# Only for layer 0:
_UpperCAmelCase = tax_model['''target''']['''encoder''']['''relpos_bias''']['''rel_embedding'''].T
_UpperCAmelCase = tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_UpperCAmelCase = tax_model['''target''']['''encoder''']['''side_relpos_bias''']['''rel_embedding'''].T
_UpperCAmelCase = tax_encoder_global_rel_embedding
# Assigning
_UpperCAmelCase = tax_model['''target''']['''encoder''']['''encoder_norm''']['''scale''']
_UpperCAmelCase = tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers ):
_UpperCAmelCase = f'layers_{str(_SCREAMING_SNAKE_CASE )}'
# Self-Attention
_UpperCAmelCase = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''key''']['''kernel''']
_UpperCAmelCase = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''out''']['''kernel''']
_UpperCAmelCase = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''query''']['''kernel''']
_UpperCAmelCase = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''value''']['''kernel''']
# Layer Normalization
_UpperCAmelCase = tax_model['''target''']['''decoder'''][layer_name]['''pre_self_attention_layer_norm'''][
'''scale'''
]
# Encoder-Decoder-Attention
_UpperCAmelCase = tax_model['''target''']['''decoder'''][layer_name]['''encoder_decoder_attention''']
_UpperCAmelCase = tax_enc_dec_attention_module['''key''']['''kernel''']
_UpperCAmelCase = tax_enc_dec_attention_module['''out''']['''kernel''']
_UpperCAmelCase = tax_enc_dec_attention_module['''query''']['''kernel''']
_UpperCAmelCase = tax_enc_dec_attention_module['''value''']['''kernel''']
# Layer Normalization
_UpperCAmelCase = tax_model['''target''']['''decoder'''][layer_name]['''pre_cross_attention_layer_norm''']['''scale''']
# MLP
if split_mlp_wi:
_UpperCAmelCase = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi_0''']['''kernel''']
_UpperCAmelCase = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi_1''']['''kernel''']
else:
_UpperCAmelCase = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi''']['''kernel''']
_UpperCAmelCase = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wo''']['''kernel''']
# Layer Normalization
_UpperCAmelCase = tax_model['''target''']['''decoder'''][layer_name]['''pre_mlp_layer_norm''']['''scale''']
# Assigning
_UpperCAmelCase = flax_model.params['''decoder''']['''block'''][str(_SCREAMING_SNAKE_CASE )]['''layer''']
_UpperCAmelCase = tax_attention_key
_UpperCAmelCase = tax_attention_out
_UpperCAmelCase = tax_attention_query
_UpperCAmelCase = tax_attention_value
_UpperCAmelCase = tax_pre_attention_layer_norm
_UpperCAmelCase = tax_enc_dec_attention_key
_UpperCAmelCase = tax_enc_dec_attention_out
_UpperCAmelCase = tax_enc_dec_attention_query
_UpperCAmelCase = tax_enc_dec_attention_value
_UpperCAmelCase = tax_cross_layer_norm
if split_mlp_wi:
_UpperCAmelCase = tax_mlp_wi_a
_UpperCAmelCase = tax_mlp_wi_a
else:
_UpperCAmelCase = tax_mlp_wi
_UpperCAmelCase = tax_mlp_wo
_UpperCAmelCase = txa_mlp_layer_norm
_UpperCAmelCase = flax_model_decoder_layer_block
# Decoder Normalization
_UpperCAmelCase = tax_model['''target''']['''decoder''']['''decoder_norm''']['''scale''']
_UpperCAmelCase = txa_decoder_norm
# Only for layer 0:
_UpperCAmelCase = tax_model['''target''']['''decoder''']['''relpos_bias''']['''rel_embedding'''].T
_UpperCAmelCase = tax_decoder_rel_embedding
# Token Embeddings
_UpperCAmelCase = tax_model['''target''']['''token_embedder''']['''embedding''']
_UpperCAmelCase = txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
_UpperCAmelCase = tax_model['''target''']['''decoder''']['''logits_dense''']['''kernel''']
flax_model.save_pretrained(_SCREAMING_SNAKE_CASE )
print('''T5X Model was sucessfully converted!''' )
if __name__ == "__main__":
__A : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--t5x_checkpoint_path", default=None, type=str, required=True, help="Path the T5X checkpoint."
)
parser.add_argument("--config_name", default=None, type=str, required=True, help="Config name of LongT5/T5 model.")
parser.add_argument(
"--flax_dump_folder_path", default=None, type=str, required=True, help="Path to the output FLAX model."
)
__A : List[Any] = parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
| 260 |
"""simple docstring"""
import re
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = re.compile(r"^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$" )
if match := re.search(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return match.string == phone
return False
if __name__ == "__main__":
print(indian_phone_validator('''+918827897895'''))
| 153 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a :List[str] = logging.get_logger(__name__)
a :Union[str, Any] = {
"MIT/ast-finetuned-audioset-10-10-0.4593": (
"https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json"
),
}
class __a (UpperCamelCase_):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :List[str] = """audio-spectrogram-transformer"""
def __init__( self , _a=768 , _a=12 , _a=12 , _a=3_072 , _a="gelu" , _a=0.0 , _a=0.0 , _a=0.02 , _a=1E-1_2 , _a=16 , _a=True , _a=10 , _a=10 , _a=1_024 , _a=128 , **_a , ) -> List[Any]:
"""simple docstring"""
super().__init__(**_a )
SCREAMING_SNAKE_CASE__ : Dict = hidden_size
SCREAMING_SNAKE_CASE__ : Any = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Optional[Any] = num_attention_heads
SCREAMING_SNAKE_CASE__ : Optional[Any] = intermediate_size
SCREAMING_SNAKE_CASE__ : Dict = hidden_act
SCREAMING_SNAKE_CASE__ : Dict = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : List[Any] = initializer_range
SCREAMING_SNAKE_CASE__ : Dict = layer_norm_eps
SCREAMING_SNAKE_CASE__ : List[Any] = patch_size
SCREAMING_SNAKE_CASE__ : Dict = qkv_bias
SCREAMING_SNAKE_CASE__ : Any = frequency_stride
SCREAMING_SNAKE_CASE__ : int = time_stride
SCREAMING_SNAKE_CASE__ : int = max_length
SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_mel_bins
| 56 |
"""simple docstring"""
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
a :List[Any] = get_tests_dir() + "/test_data/fsmt/fsmt_val_data.json"
with io.open(filename, "r", encoding="utf-8") as f:
a :str = json.load(f)
@require_torch
class __a (unittest.TestCase):
'''simple docstring'''
def _a ( self , _a ) -> Optional[int]:
"""simple docstring"""
return FSMTTokenizer.from_pretrained(_a )
def _a ( self , _a ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = FSMTForConditionalGeneration.from_pretrained(_a ).to(_a )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
["""en-ru""", 26.0],
["""ru-en""", 22.0],
["""en-de""", 22.0],
["""de-en""", 29.0],
] )
@slow
def _a ( self , _a , _a ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = f'''facebook/wmt19-{pair}'''
SCREAMING_SNAKE_CASE__ : Dict = self.get_tokenizer(_a )
SCREAMING_SNAKE_CASE__ : Any = self.get_model(_a )
SCREAMING_SNAKE_CASE__ : Tuple = bleu_data[pair]["""src"""]
SCREAMING_SNAKE_CASE__ : Any = bleu_data[pair]["""tgt"""]
SCREAMING_SNAKE_CASE__ : Any = tokenizer(_a , return_tensors="""pt""" , truncation=_a , padding="""longest""" ).to(_a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
SCREAMING_SNAKE_CASE__ : List[Any] = tokenizer.batch_decode(
_a , skip_special_tokens=_a , clean_up_tokenization_spaces=_a )
SCREAMING_SNAKE_CASE__ : Dict = calculate_bleu(_a , _a )
print(_a )
self.assertGreaterEqual(scores["""bleu"""] , _a )
| 56 | 1 |
'''simple docstring'''
import logging
from transformers import PretrainedConfig
lowerCamelCase__ = logging.getLogger(__name__)
lowerCamelCase__ = {
"bertabs-finetuned-cnndm": "https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json",
}
class lowerCAmelCase__ ( lowerCamelCase_ ):
lowerCAmelCase : Dict = "bertabs"
def __init__( self : Optional[Any] , lowerCamelCase__ : List[str]=3_05_22 , lowerCamelCase__ : Tuple=5_12 , lowerCamelCase__ : List[Any]=6 , lowerCamelCase__ : Any=5_12 , lowerCamelCase__ : Optional[Any]=8 , lowerCamelCase__ : Union[str, Any]=5_12 , lowerCamelCase__ : Dict=0.2 , lowerCamelCase__ : Dict=6 , lowerCamelCase__ : Any=7_68 , lowerCamelCase__ : List[str]=8 , lowerCamelCase__ : List[str]=20_48 , lowerCamelCase__ : Optional[int]=0.2 , **lowerCamelCase__ : Dict , ) ->List[str]:
'''simple docstring'''
super().__init__(**__SCREAMING_SNAKE_CASE )
_UpperCAmelCase : List[Any] = vocab_size
_UpperCAmelCase : List[Any] = max_pos
_UpperCAmelCase : Union[str, Any] = enc_layers
_UpperCAmelCase : Optional[Any] = enc_hidden_size
_UpperCAmelCase : str = enc_heads
_UpperCAmelCase : str = enc_ff_size
_UpperCAmelCase : List[str] = enc_dropout
_UpperCAmelCase : List[str] = dec_layers
_UpperCAmelCase : List[str] = dec_hidden_size
_UpperCAmelCase : List[str] = dec_heads
_UpperCAmelCase : Optional[int] = dec_ff_size
_UpperCAmelCase : Optional[Any] = dec_dropout
| 234 |
'''simple docstring'''
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
_lowercase : Any = (7_2_0, 1_2_8_0) # Height, Width
_lowercase : List[Any] = (0.4, 0.6) # if height or width lower than this scale, drop it.
_lowercase : str = 1 / 1_0_0
_lowercase : Any = ""
_lowercase : Union[str, Any] = ""
_lowercase : Optional[int] = ""
_lowercase : List[Any] = 2_5_0
def snake_case_ ( ):
"""simple docstring"""
lowercase_ , lowercase_ : Any = get_dataset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
for index in range(__SCREAMING_SNAKE_CASE ):
lowercase_ : str = random.sample(range(len(__SCREAMING_SNAKE_CASE ) ) , 4 )
lowercase_ , lowercase_ , lowercase_ : Any = update_image_and_anno(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , filter_scale=__SCREAMING_SNAKE_CASE , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
lowercase_ : int = random_chars(32 )
lowercase_ : str = path.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
lowercase_ : int = F'''{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}'''
cva.imwrite(F'''{file_root}.jpg''' , __SCREAMING_SNAKE_CASE , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F'''Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}''' )
lowercase_ : List[Any] = []
for anno in new_annos:
lowercase_ : List[Any] = anno[3] - anno[1]
lowercase_ : List[str] = anno[4] - anno[2]
lowercase_ : Dict = anno[1] + width / 2
lowercase_ : Dict = anno[2] + height / 2
lowercase_ : int = F'''{anno[0]} {x_center} {y_center} {width} {height}'''
annos_list.append(__SCREAMING_SNAKE_CASE )
with open(F'''{file_root}.txt''' , '''w''' ) as outfile:
outfile.write('''\n'''.join(line for line in annos_list ) )
def snake_case_ ( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
lowercase_ : Optional[Any] = []
lowercase_ : Optional[Any] = []
for label_file in glob.glob(os.path.join(__SCREAMING_SNAKE_CASE , '''*.txt''' ) ):
lowercase_ : int = label_file.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
with open(__SCREAMING_SNAKE_CASE ) as in_file:
lowercase_ : List[str] = in_file.readlines()
lowercase_ : Optional[Any] = os.path.join(__SCREAMING_SNAKE_CASE , F'''{label_name}.jpg''' )
lowercase_ : Optional[int] = []
for obj_list in obj_lists:
lowercase_ : List[str] = obj_list.rstrip('''\n''' ).split(''' ''' )
lowercase_ : Optional[int] = float(obj[1] ) - float(obj[3] ) / 2
lowercase_ : Any = float(obj[2] ) - float(obj[4] ) / 2
lowercase_ : str = float(obj[1] ) + float(obj[3] ) / 2
lowercase_ : List[str] = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(__SCREAMING_SNAKE_CASE )
labels.append(__SCREAMING_SNAKE_CASE )
return img_paths, labels
def snake_case_ ( __SCREAMING_SNAKE_CASE : list , __SCREAMING_SNAKE_CASE : list , __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : tuple[int, int] , __SCREAMING_SNAKE_CASE : tuple[float, float] , __SCREAMING_SNAKE_CASE : float = 0.0 , ):
"""simple docstring"""
lowercase_ : List[Any] = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
lowercase_ : Tuple = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
lowercase_ : List[Any] = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
lowercase_ : Optional[int] = int(scale_x * output_size[1] )
lowercase_ : Dict = int(scale_y * output_size[0] )
lowercase_ : Union[str, Any] = []
lowercase_ : List[Any] = []
for i, index in enumerate(__SCREAMING_SNAKE_CASE ):
lowercase_ : Union[str, Any] = all_img_list[index]
path_list.append(__SCREAMING_SNAKE_CASE )
lowercase_ : int = all_annos[index]
lowercase_ : Dict = cva.imread(__SCREAMING_SNAKE_CASE )
if i == 0: # top-left
lowercase_ : Optional[Any] = cva.resize(__SCREAMING_SNAKE_CASE , (divid_point_x, divid_point_y) )
lowercase_ : Tuple = img
for bbox in img_annos:
lowercase_ : Optional[int] = bbox[1] * scale_x
lowercase_ : Optional[Any] = bbox[2] * scale_y
lowercase_ : str = bbox[3] * scale_x
lowercase_ : Tuple = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
lowercase_ : Dict = cva.resize(__SCREAMING_SNAKE_CASE , (output_size[1] - divid_point_x, divid_point_y) )
lowercase_ : Dict = img
for bbox in img_annos:
lowercase_ : int = scale_x + bbox[1] * (1 - scale_x)
lowercase_ : Dict = bbox[2] * scale_y
lowercase_ : Optional[int] = scale_x + bbox[3] * (1 - scale_x)
lowercase_ : int = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
lowercase_ : List[Any] = cva.resize(__SCREAMING_SNAKE_CASE , (divid_point_x, output_size[0] - divid_point_y) )
lowercase_ : List[str] = img
for bbox in img_annos:
lowercase_ : Any = bbox[1] * scale_x
lowercase_ : Optional[int] = scale_y + bbox[2] * (1 - scale_y)
lowercase_ : str = bbox[3] * scale_x
lowercase_ : Optional[int] = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
lowercase_ : int = cva.resize(
__SCREAMING_SNAKE_CASE , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
lowercase_ : List[str] = img
for bbox in img_annos:
lowercase_ : int = scale_x + bbox[1] * (1 - scale_x)
lowercase_ : Any = scale_y + bbox[2] * (1 - scale_y)
lowercase_ : Optional[Any] = scale_x + bbox[3] * (1 - scale_x)
lowercase_ : int = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
lowercase_ : Optional[Any] = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def snake_case_ ( __SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
assert number_char > 1, "The number of character should greater than 1"
lowercase_ : Any = ascii_lowercase + digits
return "".join(random.choice(__SCREAMING_SNAKE_CASE ) for _ in range(__SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
main()
print("DONE ✅")
| 93 | 0 |
'''simple docstring'''
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
lowerCAmelCase : str = logging.get_logger(__name__)
lowerCAmelCase : Optional[Any] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
lowerCAmelCase : str = {
'vocab_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
lowerCAmelCase : Dict = {
'vocab_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
lowerCAmelCase : Dict = {
'vocab_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'
),
},
}
lowerCAmelCase : Union[str, Any] = {
'facebook/dpr-ctx_encoder-single-nq-base': 5_12,
'facebook/dpr-ctx_encoder-multiset-base': 5_12,
}
lowerCAmelCase : Dict = {
'facebook/dpr-question_encoder-single-nq-base': 5_12,
'facebook/dpr-question_encoder-multiset-base': 5_12,
}
lowerCAmelCase : List[str] = {
'facebook/dpr-reader-single-nq-base': 5_12,
'facebook/dpr-reader-multiset-base': 5_12,
}
lowerCAmelCase : List[Any] = {
'facebook/dpr-ctx_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-ctx_encoder-multiset-base': {'do_lower_case': True},
}
lowerCAmelCase : Optional[int] = {
'facebook/dpr-question_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-question_encoder-multiset-base': {'do_lower_case': True},
}
lowerCAmelCase : Union[str, Any] = {
'facebook/dpr-reader-single-nq-base': {'do_lower_case': True},
'facebook/dpr-reader-multiset-base': {'do_lower_case': True},
}
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase : Any = collections.namedtuple(
'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text']
)
lowerCAmelCase : Dict = collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits'])
lowerCAmelCase : int = r'\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n '
@add_start_docstrings(snake_case_)
class SCREAMING_SNAKE_CASE__ :
def __call__( self , A_ , A_ = None , A_ = None , A_ = False , A_ = False , A_ = None , A_ = None , A_ = None , **A_ , )-> BatchEncoding:
'''simple docstring'''
if titles is None and texts is None:
return super().__call__(
A_ , padding=A_ , truncation=A_ , max_length=A_ , return_tensors=A_ , return_attention_mask=A_ , **A_ , )
elif titles is None or texts is None:
UpperCamelCase = titles if texts is None else texts
return super().__call__(
A_ , A_ , padding=A_ , truncation=A_ , max_length=A_ , return_tensors=A_ , return_attention_mask=A_ , **A_ , )
UpperCamelCase = titles if not isinstance(A_ , A_ ) else [titles]
UpperCamelCase = texts if not isinstance(A_ , A_ ) else [texts]
UpperCamelCase = len(A_ )
UpperCamelCase = questions if not isinstance(A_ , A_ ) else [questions] * n_passages
if len(A_ ) != len(A_ ):
raise ValueError(
F'''There should be as many titles than texts but got {len(A_ )} titles and {len(A_ )} texts.''' )
UpperCamelCase = super().__call__(A_ , A_ , padding=A_ , truncation=A_ )['input_ids']
UpperCamelCase = super().__call__(A_ , add_special_tokens=A_ , padding=A_ , truncation=A_ )['input_ids']
UpperCamelCase = {
'input_ids': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(A_ , A_ )
]
}
if return_attention_mask is not False:
UpperCamelCase = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
UpperCamelCase = attention_mask
return self.pad(A_ , padding=A_ , max_length=A_ , return_tensors=A_ )
def UpperCAmelCase_ ( self , A_ , A_ , A_ = 16 , A_ = 64 , A_ = 4 , )-> List[DPRSpanPrediction]:
'''simple docstring'''
UpperCamelCase = reader_input['input_ids']
UpperCamelCase , UpperCamelCase , UpperCamelCase = reader_output[:3]
UpperCamelCase = len(A_ )
UpperCamelCase = sorted(range(A_ ) , reverse=A_ , key=relevance_logits.__getitem__ )
UpperCamelCase = []
for doc_id in sorted_docs:
UpperCamelCase = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
UpperCamelCase = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
UpperCamelCase = sequence_ids.index(self.pad_token_id )
else:
UpperCamelCase = len(A_ )
UpperCamelCase = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=A_ , top_spans=A_ , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=A_ , start_index=A_ , end_index=A_ , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(A_ ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def UpperCAmelCase_ ( self , A_ , A_ , A_ , A_ , )-> List[DPRSpanPrediction]:
'''simple docstring'''
UpperCamelCase = []
for start_index, start_score in enumerate(A_ ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
UpperCamelCase = sorted(A_ , key=lambda A_ : x[1] , reverse=A_ )
UpperCamelCase = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(F'''Wrong span indices: [{start_index}:{end_index}]''' )
UpperCamelCase = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(F'''Span is too long: {length} > {max_answer_length}''' )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(A_ ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(snake_case_)
class SCREAMING_SNAKE_CASE__ ( snake_case_ , snake_case_):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = READER_PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = READER_PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase_ = ["""input_ids""", """attention_mask"""]
| 251 |
'''simple docstring'''
from itertools import product
def A_( A : int , A : int):
UpperCamelCase = sides_number
UpperCamelCase = max_face_number * dice_number
UpperCamelCase = [0] * (max_total + 1)
UpperCamelCase = 1
UpperCamelCase = range(A , max_face_number + 1)
for dice_numbers in product(A , repeat=A):
UpperCamelCase = sum(A)
totals_frequencies[total] += 1
return totals_frequencies
def A_( ):
UpperCamelCase = total_frequency_distribution(
sides_number=4 , dice_number=9)
UpperCamelCase = total_frequency_distribution(
sides_number=6 , dice_number=6)
UpperCamelCase = 0
UpperCamelCase = 9
UpperCamelCase = 4 * 9
UpperCamelCase = 6
for peter_total in range(A , max_peter_total + 1):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total])
UpperCamelCase = (4**9) * (6**6)
UpperCamelCase = peter_wins_count / total_games_number
UpperCamelCase = round(A , ndigits=7)
return rounded_peter_win_probability
if __name__ == "__main__":
print(f"""{solution() = }""")
| 251 | 1 |
'''simple docstring'''
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def __snake_case( _lowerCAmelCase ) -> Union[str, Any]:
for param in module.parameters():
snake_case__ : Any = False
def __snake_case( ) -> int:
snake_case__ : List[Any] = '''cuda''' if torch.cuda.is_available() else '''cpu'''
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
snake_case__ : List[Any] = '''mps'''
if device == "mps":
print(
"""WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch"""
""" errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues"""
""" with generations.""" )
return device
def __snake_case( _lowerCAmelCase ) -> List[str]:
snake_case__ : Union[str, Any] = plt.imshow(snake_case_ )
fig.axes.get_xaxis().set_visible(snake_case_ )
fig.axes.get_yaxis().set_visible(snake_case_ )
plt.show()
def __snake_case( ) -> Union[str, Any]:
snake_case__ : str = datetime.now()
snake_case__ : List[Any] = current_time.strftime("""%H:%M:%S""" )
return timestamp
| 35 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
a_ = logging.get_logger(__name__)
a_ = {
"""shi-labs/nat-mini-in1k-224""": """https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json""",
# See all Nat models at https://huggingface.co/models?filter=nat
}
class __snake_case ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = """nat"""
_lowerCamelCase = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self , __lowerCamelCase=4 , __lowerCamelCase=3 , __lowerCamelCase=64 , __lowerCamelCase=[3, 4, 6, 5] , __lowerCamelCase=[2, 4, 8, 16] , __lowerCamelCase=7 , __lowerCamelCase=3.0 , __lowerCamelCase=True , __lowerCamelCase=0.0 , __lowerCamelCase=0.0 , __lowerCamelCase=0.1 , __lowerCamelCase="gelu" , __lowerCamelCase=0.0_2 , __lowerCamelCase=1e-5 , __lowerCamelCase=0.0 , __lowerCamelCase=None , __lowerCamelCase=None , **__lowerCamelCase , ):
'''simple docstring'''
super().__init__(**__lowerCamelCase )
__A : Union[str, Any] = patch_size
__A : Optional[Any] = num_channels
__A : Tuple = embed_dim
__A : Dict = depths
__A : str = len(__lowerCamelCase )
__A : Optional[Any] = num_heads
__A : str = kernel_size
__A : Any = mlp_ratio
__A : Optional[int] = qkv_bias
__A : str = hidden_dropout_prob
__A : Any = attention_probs_dropout_prob
__A : int = drop_path_rate
__A : int = hidden_act
__A : Any = layer_norm_eps
__A : Tuple = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__A : int = int(embed_dim * 2 ** (len(__lowerCamelCase ) - 1) )
__A : Union[str, Any] = layer_scale_init_value
__A : List[str] = ['''stem'''] + [F"""stage{idx}""" for idx in range(1 , len(__lowerCamelCase ) + 1 )]
__A , __A : Any = get_aligned_output_features_output_indices(
out_features=__lowerCamelCase , out_indices=__lowerCamelCase , stage_names=self.stage_names )
| 179 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
snake_case__ : Union[str, Any] = {
'configuration_canine': ['CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CanineConfig'],
'tokenization_canine': ['CanineTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : List[str] = [
'CANINE_PRETRAINED_MODEL_ARCHIVE_LIST',
'CanineForMultipleChoice',
'CanineForQuestionAnswering',
'CanineForSequenceClassification',
'CanineForTokenClassification',
'CanineLayer',
'CanineModel',
'CaninePreTrainedModel',
'load_tf_weights_in_canine',
]
if TYPE_CHECKING:
from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig
from .tokenization_canine import CanineTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_canine import (
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineLayer,
CanineModel,
CaninePreTrainedModel,
load_tf_weights_in_canine,
)
else:
import sys
snake_case__ : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 250 |
def _a ( lowerCamelCase: Optional[Any] , lowerCamelCase: str , lowerCamelCase: Tuple , lowerCamelCase: Union[str, Any] ) -> str:
'''simple docstring'''
__A = [False] * len(lowerCamelCase )
__A = []
queue.append(lowerCamelCase )
__A = True
while queue:
__A = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(lowerCamelCase )
__A = True
__A = u
return visited[t]
def _a ( lowerCamelCase: Tuple , lowerCamelCase: Union[str, Any] , lowerCamelCase: Optional[Any] ) -> Optional[int]:
'''simple docstring'''
__A = [-1] * (len(lowerCamelCase ))
__A = 0
while bfs(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__A = float('''Inf''' )
__A = sink
while s != source:
# Find the minimum value in select path
__A = min(lowerCamelCase , graph[parent[s]][s] )
__A = parent[s]
max_flow += path_flow
__A = sink
while v != source:
__A = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
__A = parent[v]
return max_flow
snake_case__ : List[Any] = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
snake_case__ , snake_case__ : List[Any] = 0, 5
print(ford_fulkerson(graph, source, sink))
| 250 | 1 |
'''simple docstring'''
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class A_ ( lowercase_ , lowercase_ , unittest.TestCase ):
_lowerCamelCase : List[str] = StableDiffusionDiffEditPipeline
_lowerCamelCase : List[str] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""height""", """width""", """image"""} | {"""image_latents"""}
_lowerCamelCase : Dict = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {"""image"""} | {"""image_latents"""}
_lowerCamelCase : List[str] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_lowerCamelCase : Dict = frozenset([] )
def lowercase ( self : Optional[Any] ):
torch.manual_seed(0 )
_UpperCAmelCase = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=3_2 , attention_head_dim=(2, 4) , use_linear_projection=__UpperCamelCase , )
_UpperCAmelCase = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="scaled_linear" , clip_sample=__UpperCamelCase , set_alpha_to_one=__UpperCamelCase , )
_UpperCAmelCase = DDIMInverseScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="scaled_linear" , clip_sample=__UpperCamelCase , set_alpha_to_zero=__UpperCamelCase , )
torch.manual_seed(0 )
_UpperCAmelCase = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
_UpperCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act="gelu" , projection_dim=5_1_2 , )
_UpperCAmelCase = CLIPTextModel(__UpperCamelCase )
_UpperCAmelCase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_UpperCAmelCase = {
'unet': unet,
'scheduler': scheduler,
'inverse_scheduler': inverse_scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def lowercase ( self : List[Any] , snake_case_ : List[str] , snake_case_ : Union[str, Any]=0 ):
_UpperCAmelCase = floats_tensor((1, 1_6, 1_6) , rng=random.Random(__UpperCamelCase ) ).to(__UpperCamelCase )
_UpperCAmelCase = floats_tensor((1, 2, 4, 1_6, 1_6) , rng=random.Random(__UpperCamelCase ) ).to(__UpperCamelCase )
if str(__UpperCamelCase ).startswith("mps" ):
_UpperCAmelCase = torch.manual_seed(__UpperCamelCase )
else:
_UpperCAmelCase = torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase )
_UpperCAmelCase = {
'prompt': 'a dog and a newt',
'mask_image': mask,
'image_latents': latents,
'generator': generator,
'num_inference_steps': 2,
'inpaint_strength': 1.0,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def lowercase ( self : Any , snake_case_ : List[str] , snake_case_ : List[str]=0 ):
_UpperCAmelCase = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(__UpperCamelCase ) ).to(__UpperCamelCase )
_UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_UpperCAmelCase = Image.fromarray(np.uinta(__UpperCamelCase ) ).convert("RGB" )
if str(__UpperCamelCase ).startswith("mps" ):
_UpperCAmelCase = torch.manual_seed(__UpperCamelCase )
else:
_UpperCAmelCase = torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase )
_UpperCAmelCase = {
'image': image,
'source_prompt': 'a cat and a frog',
'target_prompt': 'a dog and a newt',
'generator': generator,
'num_inference_steps': 2,
'num_maps_per_mask': 2,
'mask_encode_strength': 1.0,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def lowercase ( self : str , snake_case_ : Union[str, Any] , snake_case_ : int=0 ):
_UpperCAmelCase = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(__UpperCamelCase ) ).to(__UpperCamelCase )
_UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_UpperCAmelCase = Image.fromarray(np.uinta(__UpperCamelCase ) ).convert("RGB" )
if str(__UpperCamelCase ).startswith("mps" ):
_UpperCAmelCase = torch.manual_seed(__UpperCamelCase )
else:
_UpperCAmelCase = torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase )
_UpperCAmelCase = {
'image': image,
'prompt': 'a cat and a frog',
'generator': generator,
'num_inference_steps': 2,
'inpaint_strength': 1.0,
'guidance_scale': 6.0,
'decode_latents': True,
'output_type': 'numpy',
}
return inputs
def lowercase ( self : List[Any] ):
if not hasattr(self.pipeline_class , "_optional_components" ):
return
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = self.pipeline_class(**__UpperCamelCase )
pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
_UpperCAmelCase = self.get_dummy_inputs(__UpperCamelCase )
_UpperCAmelCase = pipe(**__UpperCamelCase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(__UpperCamelCase )
_UpperCAmelCase = self.pipeline_class.from_pretrained(__UpperCamelCase )
pipe_loaded.to(__UpperCamelCase )
pipe_loaded.set_progress_bar_config(disable=__UpperCamelCase )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(__UpperCamelCase , __UpperCamelCase ) is None , f'`{optional_component}` did not stay set to None after loading.' , )
_UpperCAmelCase = self.get_dummy_inputs(__UpperCamelCase )
_UpperCAmelCase = pipe_loaded(**__UpperCamelCase )[0]
_UpperCAmelCase = np.abs(output - output_loaded ).max()
self.assertLess(__UpperCamelCase , 1e-4 )
def lowercase ( self : Dict ):
_UpperCAmelCase = 'cpu'
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = self.pipeline_class(**__UpperCamelCase )
pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
_UpperCAmelCase = self.get_dummy_mask_inputs(__UpperCamelCase )
_UpperCAmelCase = pipe.generate_mask(**__UpperCamelCase )
_UpperCAmelCase = mask[0, -3:, -3:]
self.assertEqual(mask.shape , (1, 1_6, 1_6) )
_UpperCAmelCase = np.array([0] * 9 )
_UpperCAmelCase = np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__UpperCamelCase , 1e-3 )
self.assertEqual(mask[0, -3, -4] , 0 )
def lowercase ( self : Optional[Any] ):
_UpperCAmelCase = 'cpu'
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = self.pipeline_class(**__UpperCamelCase )
pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
_UpperCAmelCase = self.get_dummy_inversion_inputs(__UpperCamelCase )
_UpperCAmelCase = pipe.invert(**__UpperCamelCase ).images
_UpperCAmelCase = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 3_2, 3_2, 3) )
_UpperCAmelCase = np.array(
[0.5_1_5_0, 0.5_1_3_4, 0.5_0_4_3, 0.5_3_7_6, 0.4_6_9_4, 0.5_1_0_5_0, 0.5_0_1_5, 0.4_4_0_7, 0.4_7_9_9] , )
_UpperCAmelCase = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__UpperCamelCase , 1e-3 )
def lowercase ( self : int ):
super().test_inference_batch_single_identical(expected_max_diff=5e-3 )
def lowercase ( self : int ):
_UpperCAmelCase = 'cpu'
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = {'beta_start': 0.0_0_0_8_5, 'beta_end': 0.0_1_2, 'beta_schedule': 'scaled_linear'}
_UpperCAmelCase = DPMSolverMultistepScheduler(**__UpperCamelCase )
_UpperCAmelCase = DPMSolverMultistepInverseScheduler(**__UpperCamelCase )
_UpperCAmelCase = self.pipeline_class(**__UpperCamelCase )
pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
_UpperCAmelCase = self.get_dummy_inversion_inputs(__UpperCamelCase )
_UpperCAmelCase = pipe.invert(**__UpperCamelCase ).images
_UpperCAmelCase = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 3_2, 3_2, 3) )
_UpperCAmelCase = np.array(
[0.5_1_5_0, 0.5_1_3_4, 0.5_0_4_3, 0.5_3_7_6, 0.4_6_9_4, 0.5_1_0_5_0, 0.5_0_1_5, 0.4_4_0_7, 0.4_7_9_9] , )
_UpperCAmelCase = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__UpperCamelCase , 1e-3 )
@require_torch_gpu
@slow
class A_ ( unittest.TestCase ):
def lowercase ( self : Optional[int] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def lowercase ( cls : Tuple ):
_UpperCAmelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png" )
_UpperCAmelCase = raw_image.convert("RGB" ).resize((7_6_8, 7_6_8) )
_UpperCAmelCase = raw_image
def lowercase ( self : List[str] ):
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = StableDiffusionDiffEditPipeline.from_pretrained(
"stabilityai/stable-diffusion-2-1" , safety_checker=__UpperCamelCase , torch_dtype=torch.floataa )
_UpperCAmelCase = DDIMScheduler.from_config(pipe.scheduler.config )
_UpperCAmelCase = DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=__UpperCamelCase )
_UpperCAmelCase = 'a bowl of fruit'
_UpperCAmelCase = 'a bowl of pears'
_UpperCAmelCase = pipe.generate_mask(
image=self.raw_image , source_prompt=__UpperCamelCase , target_prompt=__UpperCamelCase , generator=__UpperCamelCase , )
_UpperCAmelCase = pipe.invert(
prompt=__UpperCamelCase , image=self.raw_image , inpaint_strength=0.7 , generator=__UpperCamelCase ).latents
_UpperCAmelCase = pipe(
prompt=__UpperCamelCase , mask_image=__UpperCamelCase , image_latents=__UpperCamelCase , generator=__UpperCamelCase , negative_prompt=__UpperCamelCase , inpaint_strength=0.7 , output_type="numpy" , ).images[0]
_UpperCAmelCase = (
np.array(
load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/diffedit/pears.png" ).resize((7_6_8, 7_6_8) ) )
/ 2_5_5
)
assert np.abs((expected_image - image).max() ) < 5e-1
def lowercase ( self : str ):
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = StableDiffusionDiffEditPipeline.from_pretrained(
"stabilityai/stable-diffusion-2-1" , safety_checker=__UpperCamelCase , torch_dtype=torch.floataa )
_UpperCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
_UpperCAmelCase = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=__UpperCamelCase )
_UpperCAmelCase = 'a bowl of fruit'
_UpperCAmelCase = 'a bowl of pears'
_UpperCAmelCase = pipe.generate_mask(
image=self.raw_image , source_prompt=__UpperCamelCase , target_prompt=__UpperCamelCase , generator=__UpperCamelCase , )
_UpperCAmelCase = pipe.invert(
prompt=__UpperCamelCase , image=self.raw_image , inpaint_strength=0.7 , generator=__UpperCamelCase , num_inference_steps=2_5 , ).latents
_UpperCAmelCase = pipe(
prompt=__UpperCamelCase , mask_image=__UpperCamelCase , image_latents=__UpperCamelCase , generator=__UpperCamelCase , negative_prompt=__UpperCamelCase , inpaint_strength=0.7 , num_inference_steps=2_5 , output_type="numpy" , ).images[0]
_UpperCAmelCase = (
np.array(
load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/diffedit/pears.png" ).resize((7_6_8, 7_6_8) ) )
/ 2_5_5
)
assert np.abs((expected_image - image).max() ) < 5e-1
| 22 | """simple docstring"""
__SCREAMING_SNAKE_CASE ={}
def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ):
# if we are absent twice, or late 3 consecutive days,
# no further prize strings are possible
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
lowercase_ : Any = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
lowercase_ : Optional[int] = _calculate(days - 1 , __SCREAMING_SNAKE_CASE , late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
lowercase_ : Any = _calculate(days - 1 , absent + 1 , 0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
lowercase_ : Dict = _calculate(days - 1 , __SCREAMING_SNAKE_CASE , 0 )
lowercase_ : str = state_late + state_absent + state_ontime
lowercase_ : Tuple = prizestrings
return prizestrings
def lowercase__( __SCREAMING_SNAKE_CASE : int = 30 ):
return _calculate(__SCREAMING_SNAKE_CASE , absent=0 , late=0 )
if __name__ == "__main__":
print(solution())
| 213 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A: Tuple = {
"configuration_clipseg": [
"CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CLIPSegConfig",
"CLIPSegTextConfig",
"CLIPSegVisionConfig",
],
"processing_clipseg": ["CLIPSegProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A: Tuple = [
"CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST",
"CLIPSegModel",
"CLIPSegPreTrainedModel",
"CLIPSegTextModel",
"CLIPSegVisionModel",
"CLIPSegForImageSegmentation",
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
A: List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 360 |
"""simple docstring"""
from __future__ import annotations
from decimal import Decimal
from numpy import array
def _snake_case ( UpperCamelCase : list[list[float]] ):
UpperCAmelCase : int = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(UpperCamelCase ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
UpperCAmelCase : Union[str, Any] = float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError("""This matrix has no inverse.""" )
# Creates a copy of the matrix with swapped positions of the elements
UpperCAmelCase : Dict = [[0.0, 0.0], [0.0, 0.0]]
UpperCAmelCase , UpperCAmelCase : Dict = matrix[1][1], matrix[0][0]
UpperCAmelCase , UpperCAmelCase : Optional[Any] = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(UpperCamelCase ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(UpperCamelCase ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
UpperCAmelCase : Optional[int] = float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError("""This matrix has no inverse.""" )
# Creating cofactor matrix
UpperCAmelCase : List[Any] = [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
UpperCAmelCase : Dict = (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
UpperCAmelCase : List[Any] = -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
UpperCAmelCase : int = (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
UpperCAmelCase : Dict = -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
UpperCAmelCase : Optional[int] = (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
UpperCAmelCase : Optional[Any] = -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
UpperCAmelCase : Optional[Any] = (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
UpperCAmelCase : str = -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
UpperCAmelCase : Optional[Any] = (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
UpperCAmelCase : Any = array(UpperCamelCase )
for i in range(3 ):
for j in range(3 ):
UpperCAmelCase : Optional[int] = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
UpperCAmelCase : int = array(UpperCamelCase )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(UpperCamelCase )
# Calculate the inverse of the matrix
return [[float(d(UpperCamelCase ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError("""Please provide a matrix of size 2x2 or 3x3.""" )
| 76 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
SCREAMING_SNAKE_CASE_: Optional[Any] ={
'configuration_mobilenet_v2': [
'MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP',
'MobileNetV2Config',
'MobileNetV2OnnxConfig',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_: Optional[Any] =['MobileNetV2FeatureExtractor']
SCREAMING_SNAKE_CASE_: Union[str, Any] =['MobileNetV2ImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_: Dict =[
'MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST',
'MobileNetV2ForImageClassification',
'MobileNetV2ForSemanticSegmentation',
'MobileNetV2Model',
'MobileNetV2PreTrainedModel',
'load_tf_weights_in_mobilenet_v2',
]
if TYPE_CHECKING:
from .configuration_mobilenet_va import (
MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileNetVaConfig,
MobileNetVaOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilenet_va import MobileNetVaFeatureExtractor
from .image_processing_mobilenet_va import MobileNetVaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilenet_va import (
MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileNetVaForImageClassification,
MobileNetVaForSemanticSegmentation,
MobileNetVaModel,
MobileNetVaPreTrainedModel,
load_tf_weights_in_mobilenet_va,
)
else:
import sys
SCREAMING_SNAKE_CASE_: Union[str, Any] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 1 |
'''simple docstring'''
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
'''The `inpainting.py` script is outdated. Please use directly `from diffusers import'''
''' StableDiffusionInpaintPipeline` instead.'''
) | 112 | 0 |
import argparse
import dataclasses
import json
import logging
import os
import shutil
from typing import List, Optional
import datasets
from accelerate import Accelerator
from datasets import load_dataset
from finetuning import finetune
from tqdm.auto import tqdm
import transformers
from transformers import AutoConfig, set_seed
from transformers.trainer_utils import IntervalStrategy
lowerCamelCase : Any = logging.getLogger(__name__)
lowerCamelCase : Union[str, Any] = '''pytorch_model.bin'''
@dataclasses.dataclass
class lowerCAmelCase :
'''simple docstring'''
_A : Optional[int] = dataclasses.field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models.'''} )
_A : int = dataclasses.field(
default=a__ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co.'''} , )
@dataclasses.dataclass
class lowerCAmelCase :
'''simple docstring'''
_A : Tuple = dataclasses.field(metadata={'''help''': '''A csv or a json file containing the training data.'''} )
_A : Optional[int] = dataclasses.field(metadata={'''help''': '''A csv or a json file containing the data to predict on.'''} )
_A : Any = dataclasses.field(
default=a__ , metadata={'''help''': '''A csv or a json file containing the validation data.'''} )
_A : Dict = dataclasses.field(
default=a__ , metadata={'''help''': '''The name of the task to train on.'''} , )
_A : Optional[Any] = dataclasses.field(
default=a__ , metadata={'''help''': '''The list of labels for the task.'''} )
@dataclasses.dataclass
class lowerCAmelCase :
'''simple docstring'''
_A : List[str] = dataclasses.field(
metadata={'''help''': '''The output directory where the model predictions and checkpoints will be written.'''} )
_A : Optional[int] = dataclasses.field(
default='''accuracy''' , metadata={'''help''': '''The evaluation metric used for the task.'''} )
_A : Union[str, Any] = dataclasses.field(
default='''no''' , metadata={
'''help''': '''The evaluation strategy to adopt during training. Possible values are: [\"no\", \"step\", \"epoch]'''
} , )
_A : Dict = dataclasses.field(
default=10 , metadata={'''help''': '''Number of evaluation calls with no improvement after which training will be stopped.'''} , )
_A : List[Any] = dataclasses.field(
default=0.0 , metadata={
'''help''': '''How much the specified evaluation metric must improve to satisfy early stopping conditions.'''
} , )
_A : Optional[int] = dataclasses.field(
default=a__ , metadata={'''help''': '''Whether to filter the pseudo-labeled data based on the confidence score.'''} , )
_A : Any = dataclasses.field(
default=a__ , metadata={'''help''': '''Whether to filter the pseudo-labeled data based on the validation performance.'''} , )
_A : Tuple = dataclasses.field(
default=a__ , metadata={'''help''': '''Whether to fine-tune on labeled data after pseudo training.'''} , )
_A : Union[str, Any] = dataclasses.field(
default=0.0 , metadata={'''help''': '''Confidence threshold for pseudo-labeled data filtering.'''} , )
_A : Optional[Any] = dataclasses.field(
default=100 , metadata={'''help''': '''Number of evaluation calls with no improvement after which training will be stopped.'''} , )
_A : Any = dataclasses.field(
default=a__ , metadata={'''help''': '''Random seed for initialization.'''} , )
def snake_case_ ( lowerCAmelCase_ : Any , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Any , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[Any] ):
__lowercase : Union[str, Any] = datasets.concatenate_datasets([infer_input, infer_output] , axis=1 )
if args.do_filter_by_confidence:
__lowercase : Optional[int] = dataset.filter(lambda lowerCAmelCase_ : example["probability"] > args.confidence_threshold )
if args.do_filter_by_val_performance:
assert eval_result >= 0.0 and eval_result <= 1.0
__lowercase : Any = int(eval_result * len(_lowerCamelCase ) )
print(_lowerCamelCase )
__lowercase : int = dataset.sort("""probability""" , reverse=_lowerCamelCase )
__lowercase : Optional[Any] = dataset.select(range(_lowerCamelCase ) )
__lowercase : List[str] = dataset.remove_columns(["""label""", """probability"""] )
__lowercase : Dict = dataset.rename_column("""prediction""" , """label""" )
__lowercase : Optional[int] = dataset.map(lambda lowerCAmelCase_ : {"label": idalabel[example["label"]]} )
__lowercase : str = dataset.shuffle(seed=args.seed )
__lowercase : int = os.path.join(_lowerCamelCase , F"train_pseudo.{args.data_file_extension}" )
if args.data_file_extension == "csv":
dataset.to_csv(_lowerCamelCase , index=_lowerCamelCase )
else:
dataset.to_json(_lowerCamelCase )
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[int] , **lowerCAmelCase_ : Any ):
__lowercase : List[str] = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO , )
logger.info(accelerator.state )
# Setup logging, we only want one process per machine to log things on the
# screen. accelerator.is_local_main_process is only True for one process per
# machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
__lowercase : Union[str, Any] = STModelArguments(model_name_or_path=_lowerCamelCase )
__lowercase : Dict = STDataArguments(train_file=_lowerCamelCase , infer_file=_lowerCamelCase )
__lowercase : int = STTrainingArguments(output_dir=_lowerCamelCase )
__lowercase : Any = argparse.Namespace()
for arg_class in (model_args, data_args, training_args):
for key, value in vars(_lowerCamelCase ).items():
setattr(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
for key, value in kwargs.items():
if hasattr(_lowerCamelCase , _lowerCamelCase ):
setattr(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Sanity checks
__lowercase : Any = {}
__lowercase : Union[str, Any] = None
# You need to provide the training data and the data to predict on
assert args.train_file is not None
assert args.infer_file is not None
__lowercase : Optional[int] = args.train_file
__lowercase : str = args.infer_file
if args.evaluation_strategy != IntervalStrategy.NO.value:
assert args.eval_file is not None
__lowercase : str = args.eval_file
for key in data_files:
__lowercase : Optional[Any] = data_files[key].split(""".""" )[-1]
assert extension in ["csv", "json"], F"`{key}_file` should be a csv or a json file."
if args.data_file_extension is None:
__lowercase : Optional[int] = extension
else:
assert extension == args.data_file_extension, F"`{key}_file` should be a {args.data_file_extension} file`."
assert (
args.eval_metric in datasets.list_metrics()
), F"{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}."
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed )
logger.info("""Creating the initial data directory for self-training...""" )
__lowercase : int = F"{args.output_dir}/self-train_iter-{{}}".format
__lowercase : Optional[int] = data_dir_format(0 )
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir , exist_ok=_lowerCamelCase )
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
accelerator.wait_for_everyone()
__lowercase : Tuple = None
__lowercase : Optional[Any] = None
__lowercase : Optional[int] = 0
__lowercase : Any = False
# Show the progress bar
__lowercase : Optional[Any] = tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process )
# Self-train
for iteration in range(0 , int(args.max_selftrain_iterations ) ):
__lowercase : Optional[Any] = data_dir_format(_lowerCamelCase )
assert os.path.exists(_lowerCamelCase )
# Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for
# iteration > 0
__lowercase : Any = os.path.join(_lowerCamelCase , """stage-1""" )
__lowercase : str = {
"accelerator": accelerator,
"model_name_or_path": args.model_name_or_path,
"cache_dir": args.cache_dir,
"do_train": True,
"train_file": data_files["train"] if iteration == 0 else data_files["train_pseudo"],
"do_eval": True if args.eval_file is not None else False,
"eval_file": data_files["eval"],
"do_predict": True,
"infer_file": data_files["infer"],
"task_name": args.task_name,
"label_list": args.label_list,
"output_dir": current_output_dir,
"eval_metric": args.eval_metric,
"evaluation_strategy": args.evaluation_strategy,
"early_stopping_patience": args.early_stopping_patience,
"early_stopping_threshold": args.early_stopping_threshold,
"seed": args.seed,
}
# Add additional training arguments
for key, value in kwargs.items():
if key not in arguments_dict and not hasattr(_lowerCamelCase , _lowerCamelCase ):
arguments_dict.update({key: value} )
__lowercase : List[str] = os.path.join(_lowerCamelCase , """best-checkpoint""" , _lowerCamelCase )
if os.path.exists(_lowerCamelCase ):
logger.info(
"""Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1.""" , _lowerCamelCase , _lowerCamelCase , )
else:
logger.info("""***** Running self-training: iteration: %d, stage: 1 *****""" , _lowerCamelCase )
finetune(**_lowerCamelCase )
accelerator.wait_for_everyone()
assert os.path.exists(_lowerCamelCase )
logger.info("""Self-training job completed: iteration: %d, stage: 1.""" , _lowerCamelCase )
if iteration > 0 and args.finetune_on_labeled_data:
# Stage 2 (optional): fine-tuning on the original labeled data
__lowercase : Union[str, Any] = os.path.join(_lowerCamelCase , """best-checkpoint""" )
__lowercase : List[Any] = os.path.join(_lowerCamelCase , """stage-2""" )
# Update arguments_dict
__lowercase : Any = model_path
__lowercase : List[Any] = data_files["train"]
__lowercase : Optional[Any] = current_output_dir
__lowercase : int = os.path.join(_lowerCamelCase , """best-checkpoint""" , _lowerCamelCase )
if os.path.exists(_lowerCamelCase ):
logger.info(
"""Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2.""" , _lowerCamelCase , _lowerCamelCase , )
else:
logger.info("""***** Running self-training: iteration: %d, stage: 2 *****""" , _lowerCamelCase )
finetune(**_lowerCamelCase )
accelerator.wait_for_everyone()
assert os.path.exists(_lowerCamelCase )
logger.info("""Self-training job completed: iteration: %d, stage: 2.""" , _lowerCamelCase )
__lowercase : Union[str, Any] = iteration
__lowercase : Optional[Any] = data_dir_format(iteration + 1 )
__lowercase : Optional[Any] = AutoConfig.from_pretrained(os.path.join(_lowerCamelCase , """best-checkpoint""" ) )
__lowercase : Tuple = config.idalabel
__lowercase : Tuple = os.path.join(_lowerCamelCase , """eval_results_best-checkpoint.json""" )
__lowercase : Any = os.path.join(_lowerCamelCase , """test_results_best-checkpoint.json""" )
assert os.path.exists(_lowerCamelCase )
with open(_lowerCamelCase , """r""" ) as f:
__lowercase : Optional[Any] = float(json.load(_lowerCamelCase )[args.eval_metric] )
__lowercase : str = os.path.join(_lowerCamelCase , """infer_output_best-checkpoint.csv""" )
assert os.path.exists(_lowerCamelCase )
# Loading the dataset from local csv or json files.
__lowercase : Union[str, Any] = load_dataset(args.data_file_extension , data_files={"""data""": data_files["""infer"""]} )["data"]
__lowercase : Any = load_dataset("""csv""" , data_files={"""data""": infer_output_file} )["data"]
if accelerator.is_main_process:
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
shutil.copy(_lowerCamelCase , os.path.join(_lowerCamelCase , F"eval_results_iter-{iteration}.json" ) )
if os.path.exists(_lowerCamelCase ):
shutil.copy(_lowerCamelCase , os.path.join(_lowerCamelCase , F"test_results_iter-{iteration}.json" ) )
create_pseudo_labeled_data(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
accelerator.wait_for_everyone()
__lowercase : str = os.path.join(_lowerCamelCase , F"train_pseudo.{args.data_file_extension}" )
if args.evaluation_strategy != IntervalStrategy.NO.value:
__lowercase : str = eval_result
if best_iteration is None:
__lowercase : Union[str, Any] = new_iteration
__lowercase : Optional[Any] = new_eval_result
else:
if new_eval_result - best_eval_result > args.early_stopping_threshold:
__lowercase : str = new_iteration
__lowercase : List[str] = new_eval_result
__lowercase : Any = 0
else:
if new_eval_result == best_eval_result:
__lowercase : Optional[Any] = new_iteration
__lowercase : Union[str, Any] = new_eval_result
early_stopping_patience_counter += 1
if early_stopping_patience_counter >= args.early_stopping_patience:
__lowercase : Optional[int] = True
progress_bar.update(1 )
if should_training_stop:
break
if best_iteration is not None:
# Save the best iteration
logger.info("""Best iteration: %d""" , _lowerCamelCase )
logger.info("""Best evaluation result: %s = %f""" , args.eval_metric , _lowerCamelCase )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(_lowerCamelCase , F"eval_results_iter-{iteration}.json" ) , os.path.join(_lowerCamelCase , """eval_results_best-iteration.json""" ) , )
else:
# Assume that the last iteration is the best
logger.info("""Best iteration: %d""" , args.max_selftrain_iterations - 1 )
logger.info("""Best evaluation result: %s = %f""" , args.eval_metric , _lowerCamelCase )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(_lowerCamelCase , F"eval_results_iter-{args.max_selftrain_iterations - 1}.json" ) , os.path.join(_lowerCamelCase , """eval_results_best-iteration.json""" ) , ) | 370 |
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
lowerCamelCase : str = trt.Logger(trt.Logger.WARNING)
lowerCamelCase : Any = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
lowerCamelCase : Optional[Any] = logging.getLogger(__name__)
lowerCamelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--onnx_model_path''',
default=None,
type=str,
required=True,
help='''Path to ONNX model: ''',
)
parser.add_argument(
'''--output_dir''',
default=None,
type=str,
required=True,
help='''The output directory where the model checkpoints and predictions will be written.''',
)
# Other parameters
parser.add_argument(
'''--tokenizer_name''',
default='''''',
type=str,
required=True,
help='''Pretrained tokenizer name or path if not the same as model_name''',
)
parser.add_argument(
'''--version_2_with_negative''',
action='''store_true''',
help='''If true, the SQuAD examples contain some that do not have an answer.''',
)
parser.add_argument(
'''--null_score_diff_threshold''',
type=float,
default=0.0,
help='''If null_score - best_non_null is greater than the threshold predict null.''',
)
parser.add_argument(
'''--max_seq_length''',
default=3_84,
type=int,
help=(
'''The maximum total input sequence length after WordPiece tokenization. Sequences '''
'''longer than this will be truncated, and sequences shorter than this will be padded.'''
),
)
parser.add_argument(
'''--doc_stride''',
default=1_28,
type=int,
help='''When splitting up a long document into chunks, how much stride to take between chunks.''',
)
parser.add_argument('''--per_device_eval_batch_size''', default=8, type=int, help='''Batch size per GPU/CPU for evaluation.''')
parser.add_argument(
'''--n_best_size''',
default=20,
type=int,
help='''The total number of n-best predictions to generate in the nbest_predictions.json output file.''',
)
parser.add_argument(
'''--max_answer_length''',
default=30,
type=int,
help=(
'''The maximum length of an answer that can be generated. This is needed because the start '''
'''and end predictions are not conditioned on one another.'''
),
)
parser.add_argument('''--seed''', type=int, default=42, help='''random seed for initialization''')
parser.add_argument(
'''--dataset_name''',
type=str,
default=None,
required=True,
help='''The name of the dataset to use (via the datasets library).''',
)
parser.add_argument(
'''--dataset_config_name''',
type=str,
default=None,
help='''The configuration name of the dataset to use (via the datasets library).''',
)
parser.add_argument(
'''--preprocessing_num_workers''', type=int, default=4, help='''A csv or a json file containing the training data.'''
)
parser.add_argument('''--overwrite_cache''', action='''store_true''', help='''Overwrite the cached training and evaluation sets''')
parser.add_argument(
'''--fp16''',
action='''store_true''',
help='''Whether to use 16-bit (mixed) precision instead of 32-bit''',
)
parser.add_argument(
'''--int8''',
action='''store_true''',
help='''Whether to use INT8''',
)
lowerCamelCase : Dict = parser.parse_args()
if args.tokenizer_name:
lowerCamelCase : str = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
'''You are instantiating a new tokenizer from scratch. This is not supported by this script.'''
'''You can do it from another script, save it, and load it from here, using --tokenizer_name.'''
)
logger.info('''Training/evaluation parameters %s''', args)
lowerCamelCase : List[str] = args.per_device_eval_batch_size
lowerCamelCase : Any = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
lowerCamelCase : List[str] = True
lowerCamelCase : List[Any] = '''temp_engine/bert-fp32.engine'''
if args.fpaa:
lowerCamelCase : Optional[Any] = '''temp_engine/bert-fp16.engine'''
if args.inta:
lowerCamelCase : int = '''temp_engine/bert-int8.engine'''
# import ONNX file
if not os.path.exists('''temp_engine'''):
os.makedirs('''temp_engine''')
lowerCamelCase : int = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, '''rb''') as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
lowerCamelCase : Union[str, Any] = [network.get_input(i) for i in range(network.num_inputs)]
lowerCamelCase : Dict = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
lowerCamelCase : List[str] = 1 << 50
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
lowerCamelCase : Optional[int] = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
lowerCamelCase : Optional[Any] = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, '''wb''') as f:
f.write(engine.serialize())
def snake_case_ ( lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : str , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Tuple ):
__lowercase : List[str] = np.asarray(inputs["""input_ids"""] , dtype=np.intaa )
__lowercase : Union[str, Any] = np.asarray(inputs["""attention_mask"""] , dtype=np.intaa )
__lowercase : int = np.asarray(inputs["""token_type_ids"""] , dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , lowerCAmelCase_ )
cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , lowerCAmelCase_ )
cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , lowerCAmelCase_ )
# start time
__lowercase : Optional[Any] = time.time()
# Run inference
context.execute_async(
bindings=[int(lowerCAmelCase_ ) for d_inp in d_inputs] + [int(lowerCAmelCase_ ), int(lowerCAmelCase_ )] , stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
cuda.memcpy_dtoh_async(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# Synchronize the stream and take time
stream.synchronize()
# end time
__lowercase : int = time.time()
__lowercase : Union[str, Any] = end_time - start_time
__lowercase : Any = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
lowerCamelCase : Tuple = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''',
datefmt='''%m/%d/%Y %H:%M:%S''',
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
lowerCamelCase : List[Any] = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError('''Evaluation requires a dataset name''')
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
lowerCamelCase : Optional[Any] = raw_datasets['''validation'''].column_names
lowerCamelCase : Union[str, Any] = '''question''' if '''question''' in column_names else column_names[0]
lowerCamelCase : str = '''context''' if '''context''' in column_names else column_names[1]
lowerCamelCase : Dict = '''answers''' if '''answers''' in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
lowerCamelCase : Dict = tokenizer.padding_side == '''right'''
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f'''The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the'''
f'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.'''
)
lowerCamelCase : Tuple = min(args.max_seq_length, tokenizer.model_max_length)
def snake_case_ ( lowerCAmelCase_ : int ):
# Some of the questions have lots of whitespace on the left, which is not useful and will make the
# truncation of the context fail (the tokenized question will take a lots of space). So we remove that
# left whitespace
__lowercase : str = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
__lowercase : List[str] = tokenizer(
examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation="""only_second""" if pad_on_right else """only_first""" , max_length=lowerCAmelCase_ , stride=args.doc_stride , return_overflowing_tokens=lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , padding="""max_length""" , )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
__lowercase : List[str] = tokenized_examples.pop("""overflow_to_sample_mapping""" )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
__lowercase : Any = []
for i in range(len(tokenized_examples["""input_ids"""] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
__lowercase : Dict = tokenized_examples.sequence_ids(lowerCAmelCase_ )
__lowercase : List[Any] = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
__lowercase : List[str] = sample_mapping[i]
tokenized_examples["example_id"].append(examples["""id"""][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
__lowercase : Dict = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples["""offset_mapping"""][i] )
]
return tokenized_examples
lowerCamelCase : Tuple = raw_datasets['''validation''']
# Validation Feature Creation
lowerCamelCase : Optional[int] = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc='''Running tokenizer on validation dataset''',
)
lowerCamelCase : Union[str, Any] = default_data_collator
lowerCamelCase : Optional[Any] = eval_dataset.remove_columns(['''example_id''', '''offset_mapping'''])
lowerCamelCase : List[str] = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def snake_case_ ( lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Dict="eval" ):
# Post-processing: we match the start logits and end logits to answers in the original context.
__lowercase : int = postprocess_qa_predictions(
examples=lowerCAmelCase_ , features=lowerCAmelCase_ , predictions=lowerCAmelCase_ , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=lowerCAmelCase_ , )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
__lowercase : Optional[int] = [
{"""id""": k, """prediction_text""": v, """no_answer_probability""": 0.0} for k, v in predictions.items()
]
else:
__lowercase : List[Any] = [{"""id""": k, """prediction_text""": v} for k, v in predictions.items()]
__lowercase : Optional[int] = [{"""id""": ex["""id"""], """answers""": ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=lowerCAmelCase_ , label_ids=lowerCAmelCase_ )
lowerCamelCase : Dict = load_metric('''squad_v2''' if args.version_2_with_negative else '''squad''')
# Evaluation!
logger.info('''Loading ONNX model %s for evaluation''', args.onnx_model_path)
with open(engine_name, '''rb''') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def snake_case_ ( lowerCAmelCase_ : str ):
return trt.volume(engine.get_binding_shape(lowerCAmelCase_ ) ) * engine.get_binding_dtype(lowerCAmelCase_ ).itemsize
# Allocate device memory for inputs and outputs.
lowerCamelCase : int = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
lowerCamelCase : Dict = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
lowerCamelCase : str = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
lowerCamelCase : Dict = cuda.mem_alloc(h_outputa.nbytes)
lowerCamelCase : Optional[Any] = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
lowerCamelCase : Optional[int] = cuda.Stream()
# Evaluation
logger.info('''***** Running Evaluation *****''')
logger.info(f''' Num examples = {len(eval_dataset)}''')
logger.info(f''' Batch size = {args.per_device_eval_batch_size}''')
lowerCamelCase : int = 0.0
lowerCamelCase : List[str] = 0
lowerCamelCase : List[str] = timeit.default_timer()
lowerCamelCase : List[Any] = None
for step, batch in enumerate(eval_dataloader):
lowerCamelCase ,lowerCamelCase : str = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
lowerCamelCase ,lowerCamelCase : Union[str, Any] = outputs
lowerCamelCase : Optional[Any] = torch.tensor(start_logits)
lowerCamelCase : List[str] = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
lowerCamelCase : Optional[int] = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-1_00)
lowerCamelCase : Dict = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-1_00)
lowerCamelCase : List[Any] = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
lowerCamelCase : Dict = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-1_00)
if all_preds is not None:
lowerCamelCase : Tuple = nested_truncate(all_preds, len(eval_dataset))
lowerCamelCase : Dict = timeit.default_timer() - start_time
logger.info(''' Evaluation done in total %f secs (%f sec per example)''', evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info('''Average Inference Time = {:.3f} ms'''.format(total_time * 10_00 / niter))
logger.info('''Total Inference Time = {:.3f} ms'''.format(total_time * 10_00))
logger.info('''Total Number of Inference = %d''', niter)
lowerCamelCase : str = post_processing_function(eval_examples, eval_dataset, all_preds)
lowerCamelCase : Optional[Any] = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(f'''Evaluation metrics: {eval_metric}''') | 306 | 0 |
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
SCREAMING_SNAKE_CASE__ : Any = {
'iou_prediction_head.layers.0': 'iou_prediction_head.proj_in',
'iou_prediction_head.layers.1': 'iou_prediction_head.layers.0',
'iou_prediction_head.layers.2': 'iou_prediction_head.proj_out',
'mask_decoder.output_upscaling.0': 'mask_decoder.upscale_conv1',
'mask_decoder.output_upscaling.1': 'mask_decoder.upscale_layer_norm',
'mask_decoder.output_upscaling.3': 'mask_decoder.upscale_conv2',
'mask_downscaling.0': 'mask_embed.conv1',
'mask_downscaling.1': 'mask_embed.layer_norm1',
'mask_downscaling.3': 'mask_embed.conv2',
'mask_downscaling.4': 'mask_embed.layer_norm2',
'mask_downscaling.6': 'mask_embed.conv3',
'point_embeddings': 'point_embed',
'pe_layer.positional_encoding_gaussian_matrix': 'shared_embedding.positional_embedding',
'image_encoder': 'vision_encoder',
'neck.0': 'neck.conv1',
'neck.1': 'neck.layer_norm1',
'neck.2': 'neck.conv2',
'neck.3': 'neck.layer_norm2',
'patch_embed.proj': 'patch_embed.projection',
'.norm': '.layer_norm',
'blocks': 'layers',
}
def A ( _SCREAMING_SNAKE_CASE ) -> Tuple:
lowerCamelCase : Optional[int] = {}
state_dict.pop("pixel_mean" ,_SCREAMING_SNAKE_CASE )
state_dict.pop("pixel_std" ,_SCREAMING_SNAKE_CASE )
lowerCamelCase : Tuple = r".*.output_hypernetworks_mlps.(\d+).layers.(\d+).*"
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
lowerCamelCase : Dict = key.replace(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
if re.match(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
lowerCamelCase : str = int(re.match(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ).group(2 ) )
if layer_nb == 0:
lowerCamelCase : int = key.replace("layers.0" ,"proj_in" )
elif layer_nb == 1:
lowerCamelCase : int = key.replace("layers.1" ,"layers.0" )
elif layer_nb == 2:
lowerCamelCase : Tuple = key.replace("layers.2" ,"proj_out" )
lowerCamelCase : List[Any] = value
lowerCamelCase : Tuple = model_state_dict[
"prompt_encoder.shared_embedding.positional_embedding"
]
return model_state_dict
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE="ybelkada/segment-anything" ) -> List[str]:
lowerCamelCase : Union[str, Any] = hf_hub_download(_SCREAMING_SNAKE_CASE ,f'''checkpoints/{model_name}.pth''' )
if "sam_vit_b" in model_name:
lowerCamelCase : Optional[int] = SamConfig()
elif "sam_vit_l" in model_name:
lowerCamelCase : Optional[Any] = SamVisionConfig(
hidden_size=1024 ,num_hidden_layers=24 ,num_attention_heads=16 ,global_attn_indexes=[5, 11, 17, 23] ,)
lowerCamelCase : int = SamConfig(
vision_config=_SCREAMING_SNAKE_CASE ,)
elif "sam_vit_h" in model_name:
lowerCamelCase : Optional[int] = SamVisionConfig(
hidden_size=1280 ,num_hidden_layers=32 ,num_attention_heads=16 ,global_attn_indexes=[7, 15, 23, 31] ,)
lowerCamelCase : int = SamConfig(
vision_config=_SCREAMING_SNAKE_CASE ,)
lowerCamelCase : Optional[Any] = torch.load(_SCREAMING_SNAKE_CASE ,map_location="cpu" )
lowerCamelCase : List[Any] = replace_keys(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Optional[int] = SamImageProcessor()
lowerCamelCase : List[str] = SamProcessor(image_processor=_SCREAMING_SNAKE_CASE )
lowerCamelCase : List[Any] = SamModel(_SCREAMING_SNAKE_CASE )
hf_model.load_state_dict(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Optional[int] = hf_model.to("cuda" )
lowerCamelCase : Dict = "https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png"
lowerCamelCase : Tuple = Image.open(requests.get(_SCREAMING_SNAKE_CASE ,stream=_SCREAMING_SNAKE_CASE ).raw ).convert("RGB" )
lowerCamelCase : Dict = [[[400, 650]]]
lowerCamelCase : str = [[1]]
lowerCamelCase : Dict = processor(images=np.array(_SCREAMING_SNAKE_CASE ) ,return_tensors="pt" ).to("cuda" )
with torch.no_grad():
lowerCamelCase : str = hf_model(**_SCREAMING_SNAKE_CASE )
lowerCamelCase : Any = output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.579890251159668
lowerCamelCase : str = processor(
images=np.array(_SCREAMING_SNAKE_CASE ) ,input_points=_SCREAMING_SNAKE_CASE ,input_labels=_SCREAMING_SNAKE_CASE ,return_tensors="pt" ).to("cuda" )
with torch.no_grad():
lowerCamelCase : Any = hf_model(**_SCREAMING_SNAKE_CASE )
lowerCamelCase : List[str] = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9712603092193604
lowerCamelCase : Any = ((75, 275, 1725, 850),)
lowerCamelCase : Any = processor(images=np.array(_SCREAMING_SNAKE_CASE ) ,input_boxes=_SCREAMING_SNAKE_CASE ,return_tensors="pt" ).to("cuda" )
with torch.no_grad():
lowerCamelCase : Union[str, Any] = hf_model(**_SCREAMING_SNAKE_CASE )
lowerCamelCase : int = output.iou_scores.squeeze()
assert scores[-1].item() == 0.8686015605926514
# Test with 2 points and 1 image.
lowerCamelCase : Optional[Any] = [[[400, 650], [800, 650]]]
lowerCamelCase : Any = [[1, 1]]
lowerCamelCase : int = processor(
images=np.array(_SCREAMING_SNAKE_CASE ) ,input_points=_SCREAMING_SNAKE_CASE ,input_labels=_SCREAMING_SNAKE_CASE ,return_tensors="pt" ).to("cuda" )
with torch.no_grad():
lowerCamelCase : Optional[Any] = hf_model(**_SCREAMING_SNAKE_CASE )
lowerCamelCase : int = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9936047792434692
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : str = argparse.ArgumentParser()
SCREAMING_SNAKE_CASE__ : int = ['sam_vit_b_01ec64', 'sam_vit_h_4b8939', 'sam_vit_l_0b3195']
parser.add_argument(
'--model_name',
default='sam_vit_h_4b8939',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub after converting',
)
parser.add_argument(
'--model_hub_id',
default='ybelkada/segment-anything',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
SCREAMING_SNAKE_CASE__ : Any = parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
| 48 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
_lowerCAmelCase :Any = False
@skip_mps
class _UpperCAmelCase ( a ,a ,a ,unittest.TestCase ):
'''simple docstring'''
a__ =StableDiffusionAttendAndExcitePipeline
a__ =False
a__ =TEXT_TO_IMAGE_PARAMS
a__ =TEXT_TO_IMAGE_BATCH_PARAMS.union({'''token_indices'''} )
a__ =TEXT_TO_IMAGE_IMAGE_PARAMS
a__ =TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def __lowerCAmelCase ( cls ) -> List[str]:
super().setUpClass()
torch.use_deterministic_algorithms(A )
@classmethod
def __lowerCAmelCase ( cls ) -> Union[str, Any]:
super().tearDownClass()
torch.use_deterministic_algorithms(A )
def __lowerCAmelCase ( self ) -> Tuple:
torch.manual_seed(0 )
_UpperCAmelCase : Optional[int] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=1 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=3_2 , attention_head_dim=(2, 4) , use_linear_projection=A , )
_UpperCAmelCase : List[Any] = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=A , set_alpha_to_one=A , )
torch.manual_seed(0 )
_UpperCAmelCase : int = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
_UpperCAmelCase : int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='''gelu''' , projection_dim=5_1_2 , )
_UpperCAmelCase : List[str] = CLIPTextModel(A )
_UpperCAmelCase : str = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
_UpperCAmelCase : Union[str, Any] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def __lowerCAmelCase ( self , A , A=0 ) -> List[Any]:
if str(A ).startswith('''mps''' ):
_UpperCAmelCase : Optional[int] = torch.manual_seed(A )
else:
_UpperCAmelCase : Union[str, Any] = torch.Generator(device=A ).manual_seed(A )
_UpperCAmelCase : List[str] = {
'''prompt''': '''a cat and a frog''',
'''token_indices''': [2, 5],
'''generator''': generator,
'''num_inference_steps''': 1,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''max_iter_to_alter''': 2,
'''thresholds''': {0: 0.7},
}
return inputs
def __lowerCAmelCase ( self ) -> int:
_UpperCAmelCase : List[str] = '''cpu'''
_UpperCAmelCase : Tuple = self.get_dummy_components()
_UpperCAmelCase : int = self.pipeline_class(**A )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
_UpperCAmelCase : Dict = self.get_dummy_inputs(A )
_UpperCAmelCase : Union[str, Any] = pipe(**A ).images
_UpperCAmelCase : Tuple = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 6_4, 6_4, 3) )
_UpperCAmelCase : int = np.array(
[0.63_905_364, 0.62_897_307, 0.48_599_017, 0.5_133_624, 0.5_550_048, 0.45_769_516, 0.50_326_973, 0.5_023_139, 0.45_384_496] )
_UpperCAmelCase : Tuple = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(A , 1E-3 )
def __lowerCAmelCase ( self ) -> Dict:
super().test_cpu_offload_forward_pass(expected_max_diff=5E-4 )
def __lowerCAmelCase ( self ) -> List[str]:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7E-4 )
def __lowerCAmelCase ( self ) -> List[str]:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def __lowerCAmelCase ( self ) -> List[str]:
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5E-4 )
def __lowerCAmelCase ( self ) -> str:
super().test_save_load_local(expected_max_difference=5E-4 )
def __lowerCAmelCase ( self ) -> Optional[int]:
super().test_save_load_optional_components(expected_max_difference=4E-4 )
@require_torch_gpu
@slow
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def __lowerCAmelCase ( cls ) -> Union[str, Any]:
super().setUpClass()
torch.use_deterministic_algorithms(A )
@classmethod
def __lowerCAmelCase ( cls ) -> Optional[int]:
super().tearDownClass()
torch.use_deterministic_algorithms(A )
def __lowerCAmelCase ( self ) -> List[str]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ) -> str:
_UpperCAmelCase : Any = torch.manual_seed(5_1 )
_UpperCAmelCase : Optional[Any] = StableDiffusionAttendAndExcitePipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , safety_checker=A , torch_dtype=torch.floataa )
pipe.to('''cuda''' )
_UpperCAmelCase : Optional[int] = '''a painting of an elephant with glasses'''
_UpperCAmelCase : int = [5, 7]
_UpperCAmelCase : Dict = pipe(
prompt=A , token_indices=A , guidance_scale=7.5 , generator=A , num_inference_steps=5 , max_iter_to_alter=5 , output_type='''numpy''' , ).images[0]
_UpperCAmelCase : List[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy''' )
assert np.abs((expected_image - image).max() ) < 5E-1
| 263 | 0 |
"""simple docstring"""
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] )-> Optional[Any]:
'''simple docstring'''
for param in module.parameters():
UpperCAmelCase__ : Union[str, Any] = False
def SCREAMING_SNAKE_CASE__ ( )-> str:
'''simple docstring'''
UpperCAmelCase__ : int = "cuda" if torch.cuda.is_available() else "cpu"
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
UpperCAmelCase__ : List[str] = "mps"
if device == "mps":
print(
"WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch"
" errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues"
" with generations." )
return device
def SCREAMING_SNAKE_CASE__ ( snake_case : int )-> int:
'''simple docstring'''
UpperCAmelCase__ : List[str] = plt.imshow(snake_case )
fig.axes.get_xaxis().set_visible(snake_case )
fig.axes.get_yaxis().set_visible(snake_case )
plt.show()
def SCREAMING_SNAKE_CASE__ ( )-> Any:
'''simple docstring'''
UpperCAmelCase__ : Any = datetime.now()
UpperCAmelCase__ : Dict = current_time.strftime("%H:%M:%S" )
return timestamp
| 298 |
"""simple docstring"""
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : Dict = logging.get_logger(__name__)
_lowerCAmelCase : Union[str, Any] = {
"""snap-research/efficientformer-l1-300""": (
"""https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json"""
),
}
class lowerCAmelCase__ ( __magic_name__ ):
SCREAMING_SNAKE_CASE_ ='''efficientformer'''
def __init__( self : List[Any] , snake_case__ : List[int] = [3, 2, 6, 4] , snake_case__ : List[int] = [4_8, 9_6, 2_2_4, 4_4_8] , snake_case__ : List[bool] = [True, True, True, True] , snake_case__ : int = 4_4_8 , snake_case__ : int = 3_2 , snake_case__ : int = 4 , snake_case__ : int = 7 , snake_case__ : int = 5 , snake_case__ : int = 8 , snake_case__ : int = 4 , snake_case__ : float = 0.0 , snake_case__ : int = 1_6 , snake_case__ : int = 3 , snake_case__ : int = 3 , snake_case__ : int = 3 , snake_case__ : int = 2 , snake_case__ : int = 1 , snake_case__ : float = 0.0 , snake_case__ : int = 1 , snake_case__ : bool = True , snake_case__ : bool = True , snake_case__ : float = 1e-5 , snake_case__ : str = "gelu" , snake_case__ : float = 0.02 , snake_case__ : float = 1e-12 , snake_case__ : int = 2_2_4 , snake_case__ : float = 1e-05 , **snake_case__ : str , ):
'''simple docstring'''
super().__init__(**snake_case__ )
UpperCAmelCase__ : int = hidden_act
UpperCAmelCase__ : Optional[int] = hidden_dropout_prob
UpperCAmelCase__ : List[str] = hidden_sizes
UpperCAmelCase__ : Union[str, Any] = num_hidden_layers
UpperCAmelCase__ : int = num_attention_heads
UpperCAmelCase__ : List[Any] = initializer_range
UpperCAmelCase__ : List[Any] = layer_norm_eps
UpperCAmelCase__ : Optional[int] = patch_size
UpperCAmelCase__ : Tuple = num_channels
UpperCAmelCase__ : Optional[int] = depths
UpperCAmelCase__ : Union[str, Any] = mlp_expansion_ratio
UpperCAmelCase__ : Dict = downsamples
UpperCAmelCase__ : Any = dim
UpperCAmelCase__ : str = key_dim
UpperCAmelCase__ : List[Any] = attention_ratio
UpperCAmelCase__ : Optional[Any] = resolution
UpperCAmelCase__ : Optional[Any] = pool_size
UpperCAmelCase__ : Any = downsample_patch_size
UpperCAmelCase__ : int = downsample_stride
UpperCAmelCase__ : Dict = downsample_pad
UpperCAmelCase__ : List[Any] = drop_path_rate
UpperCAmelCase__ : Optional[Any] = num_metaad_blocks
UpperCAmelCase__ : List[str] = distillation
UpperCAmelCase__ : Dict = use_layer_scale
UpperCAmelCase__ : List[Any] = layer_scale_init_value
UpperCAmelCase__ : Optional[Any] = image_size
UpperCAmelCase__ : Optional[int] = batch_norm_eps
| 298 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.