code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
import tensorflow as tf
from ...tf_utils import shape_list
class __lowercase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=1 , _UpperCAmelCase=False , **_UpperCAmelCase ):
super().__init__(**_UpperCAmelCase )
__a : Dict = vocab_size
__a : Optional[int] = d_embed
__a : Union[str, Any] = d_proj
__a : Optional[Any] = cutoffs + [vocab_size]
__a : Any = [0] + self.cutoffs
__a : Union[str, Any] = div_val
__a : Union[str, Any] = self.cutoffs[0]
__a : List[Any] = len(self.cutoffs ) - 1
__a : Dict = self.shortlist_size + self.n_clusters
__a : int = keep_order
__a : str = []
__a : Optional[int] = []
def _lowerCamelCase ( self , _UpperCAmelCase ):
if self.n_clusters > 0:
__a : Union[str, Any] = self.add_weight(
shape=(self.n_clusters, self.d_embed) , initializer='''zeros''' , trainable=_UpperCAmelCase , name='''cluster_weight''' )
__a : List[str] = self.add_weight(
shape=(self.n_clusters,) , initializer='''zeros''' , trainable=_UpperCAmelCase , name='''cluster_bias''' )
if self.div_val == 1:
for i in range(len(self.cutoffs ) ):
if self.d_proj != self.d_embed:
__a : Dict = self.add_weight(
shape=(self.d_embed, self.d_proj) , initializer='''zeros''' , trainable=_UpperCAmelCase , name=f"""out_projs_._{i}""" , )
self.out_projs.append(_UpperCAmelCase )
else:
self.out_projs.append(_UpperCAmelCase )
__a : List[str] = self.add_weight(
shape=(self.vocab_size, self.d_embed) , initializer='''zeros''' , trainable=_UpperCAmelCase , name=f"""out_layers_._{i}_._weight""" , )
__a : Any = self.add_weight(
shape=(self.vocab_size,) , initializer='''zeros''' , trainable=_UpperCAmelCase , name=f"""out_layers_._{i}_._bias""" , )
self.out_layers.append((weight, bias) )
else:
for i in range(len(self.cutoffs ) ):
__a , __a : Dict = self.cutoff_ends[i], self.cutoff_ends[i + 1]
__a : Union[str, Any] = self.d_embed // (self.div_val**i)
__a : List[Any] = self.add_weight(
shape=(d_emb_i, self.d_proj) , initializer='''zeros''' , trainable=_UpperCAmelCase , name=f"""out_projs_._{i}""" )
self.out_projs.append(_UpperCAmelCase )
__a : List[Any] = self.add_weight(
shape=(r_idx - l_idx, d_emb_i) , initializer='''zeros''' , trainable=_UpperCAmelCase , name=f"""out_layers_._{i}_._weight""" , )
__a : List[Any] = self.add_weight(
shape=(r_idx - l_idx,) , initializer='''zeros''' , trainable=_UpperCAmelCase , name=f"""out_layers_._{i}_._bias""" , )
self.out_layers.append((weight, bias) )
super().build(_UpperCAmelCase )
@staticmethod
def _lowerCamelCase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None ):
__a : Any = x
if proj is not None:
__a : Optional[int] = tf.einsum('''ibd,ed->ibe''' , _UpperCAmelCase , _UpperCAmelCase )
return tf.einsum('''ibd,nd->ibn''' , _UpperCAmelCase , _UpperCAmelCase ) + b
@staticmethod
def _lowerCamelCase ( _UpperCAmelCase , _UpperCAmelCase ):
__a : Optional[Any] = shape_list(_UpperCAmelCase )
__a : List[str] = tf.range(lp_size[0] , dtype=target.dtype )
__a : Optional[int] = tf.stack([r, target] , 1 )
return tf.gather_nd(_UpperCAmelCase , _UpperCAmelCase )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=True , _UpperCAmelCase=False ):
__a : List[str] = 0
if self.n_clusters == 0:
__a : Union[str, Any] = self._logit(_UpperCAmelCase , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0] )
if target is not None:
__a : str = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=_UpperCAmelCase , logits=_UpperCAmelCase )
__a : Dict = tf.nn.log_softmax(_UpperCAmelCase , axis=-1 )
else:
__a : Any = shape_list(_UpperCAmelCase )
__a : Optional[Any] = []
__a : Any = tf.zeros(hidden_sizes[:2] )
for i in range(len(self.cutoffs ) ):
__a , __a : Optional[Any] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
__a : Dict = (target >= l_idx) & (target < r_idx)
__a : Any = tf.where(_UpperCAmelCase )
__a : Optional[int] = tf.boolean_mask(_UpperCAmelCase , _UpperCAmelCase ) - l_idx
if self.div_val == 1:
__a : Tuple = self.out_layers[0][0][l_idx:r_idx]
__a : Optional[Any] = self.out_layers[0][1][l_idx:r_idx]
else:
__a : Union[str, Any] = self.out_layers[i][0]
__a : Optional[int] = self.out_layers[i][1]
if i == 0:
__a : Optional[int] = tf.concat([cur_W, self.cluster_weight] , 0 )
__a : List[Any] = tf.concat([cur_b, self.cluster_bias] , 0 )
__a : Union[str, Any] = self._logit(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , self.out_projs[0] )
__a : List[str] = tf.nn.log_softmax(_UpperCAmelCase )
out.append(head_logprob[..., : self.cutoffs[0]] )
if target is not None:
__a : List[Any] = tf.boolean_mask(_UpperCAmelCase , _UpperCAmelCase )
__a : Dict = self._gather_logprob(_UpperCAmelCase , _UpperCAmelCase )
else:
__a : Any = self._logit(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , self.out_projs[i] )
__a : Optional[Any] = tf.nn.log_softmax(_UpperCAmelCase )
__a : List[str] = self.cutoffs[0] + i - 1 # No probability for the head cluster
__a : Union[str, Any] = head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(_UpperCAmelCase )
if target is not None:
__a : Union[str, Any] = tf.boolean_mask(_UpperCAmelCase , _UpperCAmelCase )
__a : Optional[Any] = tf.boolean_mask(_UpperCAmelCase , _UpperCAmelCase )
__a : str = self._gather_logprob(_UpperCAmelCase , _UpperCAmelCase )
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(_UpperCAmelCase , -cur_logprob , shape_list(_UpperCAmelCase ) )
__a : str = tf.concat(_UpperCAmelCase , axis=-1 )
if target is not None:
if return_mean:
__a : Tuple = tf.reduce_mean(_UpperCAmelCase )
# Add the training-time loss value to the layer using `self.add_loss()`.
self.add_loss(_UpperCAmelCase )
# Log the loss as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.
self.add_metric(_UpperCAmelCase , name=self.name , aggregation='''mean''' if return_mean else '''''' )
return out | 52 |
'''simple docstring'''
import math
import sys
import cva
import numpy as np
def a__ ( a__ , a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = math.sqrt(a__ )
__SCREAMING_SNAKE_CASE = 1 / (sigma * math.sqrt(2 * math.pi ))
return cons * np.exp(-((img / sigma) ** 2) * 0.5 )
def a__ ( a__ , a__ , a__ , a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = kernel_size // 2
return img[x - half : x + half + 1, y - half : y + half + 1]
def a__ ( a__ , a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = np.zeros((kernel_size, kernel_size) )
for i in range(0 , a__ ):
for j in range(0 , a__ ):
__SCREAMING_SNAKE_CASE = math.sqrt(
abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 )
return vec_gaussian(a__ , a__ )
def a__ ( a__ , a__ , a__ , a__ , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = np.zeros(img.shape )
__SCREAMING_SNAKE_CASE = get_gauss_kernel(a__ , a__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = img.shape
for i in range(kernel_size // 2 , size_x - kernel_size // 2 ):
for j in range(kernel_size // 2 , size_y - kernel_size // 2 ):
__SCREAMING_SNAKE_CASE = get_slice(a__ , a__ , a__ , a__ )
__SCREAMING_SNAKE_CASE = img_s - img_s[kernel_size // 2, kernel_size // 2]
__SCREAMING_SNAKE_CASE = vec_gaussian(a__ , a__ )
__SCREAMING_SNAKE_CASE = np.multiply(a__ , a__ )
__SCREAMING_SNAKE_CASE = np.multiply(a__ , a__ )
__SCREAMING_SNAKE_CASE = np.sum(a__ ) / np.sum(a__ )
__SCREAMING_SNAKE_CASE = val
return imga
def a__ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = args[1] if args[1:] else """../image_data/lena.jpg"""
__SCREAMING_SNAKE_CASE = float(args[2] ) if args[2:] else 1.0
__SCREAMING_SNAKE_CASE = float(args[3] ) if args[3:] else 1.0
if args[4:]:
__SCREAMING_SNAKE_CASE = int(args[4] )
__SCREAMING_SNAKE_CASE = kernel_size + abs(kernel_size % 2 - 1 )
else:
__SCREAMING_SNAKE_CASE = 5
return filename, spatial_variance, intensity_variance, kernel_size
if __name__ == "__main__":
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Any = parse_args(sys.argv)
UpperCAmelCase : Union[str, Any] = cva.imread(filename, 0)
cva.imshow('input image', img)
UpperCAmelCase : Optional[int] = img / 2_5_5
UpperCAmelCase : Optional[Any] = out.astype('float32')
UpperCAmelCase : Tuple = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size)
UpperCAmelCase : List[Any] = out * 2_5_5
UpperCAmelCase : Any = np.uinta(out)
cva.imshow('output image', out)
cva.waitKey(0)
cva.destroyAllWindows()
| 627 | 0 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ : str = logging.get_logger(__name__)
lowerCAmelCase_ : Any = {
'microsoft/unispeech-large-1500h-cv': (
'https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json'
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class lowerCamelCase_ ( snake_case_ ):
_lowerCAmelCase : Union[str, Any] = 'unispeech'
def __init__( self : str , lowerCAmelCase__ : Optional[Any]=32 , lowerCAmelCase__ : Dict=7_68 , lowerCAmelCase__ : List[Any]=12 , lowerCAmelCase__ : Tuple=12 , lowerCAmelCase__ : List[str]=30_72 , lowerCAmelCase__ : str="gelu" , lowerCAmelCase__ : List[Any]=0.1 , lowerCAmelCase__ : Optional[int]=0.1 , lowerCAmelCase__ : Optional[Any]=0.1 , lowerCAmelCase__ : Any=0.0 , lowerCAmelCase__ : List[Any]=0.0 , lowerCAmelCase__ : int=0.1 , lowerCAmelCase__ : List[str]=0.1 , lowerCAmelCase__ : Any=0.02 , lowerCAmelCase__ : Tuple=1e-5 , lowerCAmelCase__ : str="group" , lowerCAmelCase__ : Optional[int]="gelu" , lowerCAmelCase__ : Optional[Any]=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , lowerCAmelCase__ : str=(5, 2, 2, 2, 2, 2, 2) , lowerCAmelCase__ : Dict=(10, 3, 3, 3, 3, 2, 2) , lowerCAmelCase__ : Any=False , lowerCAmelCase__ : Optional[Any]=1_28 , lowerCAmelCase__ : List[Any]=16 , lowerCAmelCase__ : int=False , lowerCAmelCase__ : int=True , lowerCAmelCase__ : Optional[int]=0.05 , lowerCAmelCase__ : int=10 , lowerCAmelCase__ : List[str]=2 , lowerCAmelCase__ : str=0.0 , lowerCAmelCase__ : Tuple=10 , lowerCAmelCase__ : Union[str, Any]=0 , lowerCAmelCase__ : List[Any]=3_20 , lowerCAmelCase__ : Optional[int]=2 , lowerCAmelCase__ : str=0.1 , lowerCAmelCase__ : Dict=1_00 , lowerCAmelCase__ : Tuple=2_56 , lowerCAmelCase__ : int=2_56 , lowerCAmelCase__ : int=0.1 , lowerCAmelCase__ : List[str]="mean" , lowerCAmelCase__ : List[Any]=False , lowerCAmelCase__ : Dict=False , lowerCAmelCase__ : int=2_56 , lowerCAmelCase__ : Optional[Any]=80 , lowerCAmelCase__ : Union[str, Any]=0 , lowerCAmelCase__ : List[Any]=1 , lowerCAmelCase__ : Any=2 , lowerCAmelCase__ : List[str]=0.5 , **lowerCAmelCase__ : List[Any] , ):
"""simple docstring"""
super().__init__(**lowerCAmelCase__ , pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Tuple = hidden_size
SCREAMING_SNAKE_CASE : Dict = feat_extract_norm
SCREAMING_SNAKE_CASE : List[str] = feat_extract_activation
SCREAMING_SNAKE_CASE : Optional[int] = list(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = list(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Tuple = list(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : List[str] = conv_bias
SCREAMING_SNAKE_CASE : Optional[Any] = num_conv_pos_embeddings
SCREAMING_SNAKE_CASE : Tuple = num_conv_pos_embedding_groups
SCREAMING_SNAKE_CASE : Tuple = len(self.conv_dim )
SCREAMING_SNAKE_CASE : Optional[int] = num_hidden_layers
SCREAMING_SNAKE_CASE : Optional[Any] = intermediate_size
SCREAMING_SNAKE_CASE : int = hidden_act
SCREAMING_SNAKE_CASE : str = num_attention_heads
SCREAMING_SNAKE_CASE : str = hidden_dropout
SCREAMING_SNAKE_CASE : List[Any] = attention_dropout
SCREAMING_SNAKE_CASE : int = activation_dropout
SCREAMING_SNAKE_CASE : Optional[int] = feat_proj_dropout
SCREAMING_SNAKE_CASE : Any = final_dropout
SCREAMING_SNAKE_CASE : Optional[Any] = layerdrop
SCREAMING_SNAKE_CASE : Optional[Any] = layer_norm_eps
SCREAMING_SNAKE_CASE : List[Any] = initializer_range
SCREAMING_SNAKE_CASE : Union[str, Any] = num_ctc_classes
SCREAMING_SNAKE_CASE : Optional[Any] = vocab_size
SCREAMING_SNAKE_CASE : Any = do_stable_layer_norm
SCREAMING_SNAKE_CASE : Optional[Any] = use_weighted_layer_sum
SCREAMING_SNAKE_CASE : Dict = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
SCREAMING_SNAKE_CASE : List[str] = apply_spec_augment
SCREAMING_SNAKE_CASE : Union[str, Any] = mask_time_prob
SCREAMING_SNAKE_CASE : int = mask_time_length
SCREAMING_SNAKE_CASE : Optional[Any] = mask_time_min_masks
SCREAMING_SNAKE_CASE : Union[str, Any] = mask_feature_prob
SCREAMING_SNAKE_CASE : int = mask_feature_length
SCREAMING_SNAKE_CASE : List[Any] = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
SCREAMING_SNAKE_CASE : Dict = num_codevectors_per_group
SCREAMING_SNAKE_CASE : Union[str, Any] = num_codevector_groups
SCREAMING_SNAKE_CASE : str = contrastive_logits_temperature
SCREAMING_SNAKE_CASE : Any = feat_quantizer_dropout
SCREAMING_SNAKE_CASE : Tuple = num_negatives
SCREAMING_SNAKE_CASE : Optional[Any] = codevector_dim
SCREAMING_SNAKE_CASE : int = proj_codevector_dim
SCREAMING_SNAKE_CASE : Union[str, Any] = diversity_loss_weight
# ctc loss
SCREAMING_SNAKE_CASE : Optional[Any] = ctc_loss_reduction
SCREAMING_SNAKE_CASE : Union[str, Any] = ctc_zero_infinity
# pretraining loss
SCREAMING_SNAKE_CASE : Union[str, Any] = replace_prob
@property
def __lowercase ( self : Dict ):
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 464 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
| 464 | 1 |
'''simple docstring'''
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]:
# Initialise PyTorch model
UpperCAmelCase__ : Dict = LxmertConfig.from_json_file(lowerCAmelCase__ )
print(F"""Building PyTorch model from configuration: {config}""" )
UpperCAmelCase__ : str = LxmertForPreTraining(lowerCAmelCase__ )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , lowerCAmelCase__ )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
UpperCamelCase__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 75 |
def _UpperCAmelCase ( a__ , a__):
'''simple docstring'''
return x if y == 0 else greatest_common_divisor(a__ , x % y)
def _UpperCAmelCase ( a__ , a__):
'''simple docstring'''
return (x * y) // greatest_common_divisor(a__ , a__)
def _UpperCAmelCase ( a__ = 2_0):
'''simple docstring'''
a_ : Any = 1
for i in range(1 , n + 1):
a_ : Tuple = lcm(a__ , a__)
return g
if __name__ == "__main__":
print(F"""{solution() = }""")
| 540 | 0 |
"""simple docstring"""
import qiskit
def SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ) -> qiskit.result.counts.Counts:
SCREAMING_SNAKE_CASE__ = qiskit.Aer.get_backend("aer_simulator" )
SCREAMING_SNAKE_CASE__ = qiskit.QuantumCircuit(4 , 2 )
# encode inputs in qubits 0 and 1
if bita == 1:
qc_ha.x(0 )
if bita == 1:
qc_ha.x(1 )
qc_ha.barrier()
# use cnots to write XOR of the inputs on qubit2
qc_ha.cx(0 , 2 )
qc_ha.cx(1 , 2 )
# use ccx / toffoli gate to write AND of the inputs on qubit3
qc_ha.ccx(0 , 1 , 3 )
qc_ha.barrier()
# extract outputs
qc_ha.measure(2 , 0 ) # extract XOR value
qc_ha.measure(3 , 1 ) # extract AND value
# Execute the circuit on the qasm simulator
SCREAMING_SNAKE_CASE__ = qiskit.execute(__UpperCAmelCase , __UpperCAmelCase , shots=1_000 )
# Return the histogram data of the results of the experiment
return job.result().get_counts(__UpperCAmelCase )
if __name__ == "__main__":
_A = half_adder(1, 1)
print(F'Half Adder Output Qubit Counts: {counts}')
| 717 | """simple docstring"""
import math
def SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ) -> float:
if initial_intensity < 0:
raise ValueError("The value of intensity cannot be negative" )
# handling of negative values of initial intensity
if angle < 0 or angle > 360:
raise ValueError("In Malus Law, the angle is in the range 0-360 degrees" )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(__UpperCAmelCase ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name='malus_law')
| 538 | 0 |
'''simple docstring'''
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
A__ : Any = """\
@misc{wu2016googles,
title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},
author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey
and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin
Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto
Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and
Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes
and Jeffrey Dean},
year={2016},
eprint={1609.08144},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
A__ : List[Any] = """\
The BLEU score has some undesirable properties when used for single
sentences, as it was designed to be a corpus measure. We therefore
use a slightly different score for our RL experiments which we call
the 'GLEU score'. For the GLEU score, we record all sub-sequences of
1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then
compute a recall, which is the ratio of the number of matching n-grams
to the number of total n-grams in the target (ground truth) sequence,
and a precision, which is the ratio of the number of matching n-grams
to the number of total n-grams in the generated output sequence. Then
GLEU score is simply the minimum of recall and precision. This GLEU
score's range is always between 0 (no matches) and 1 (all match) and
it is symmetrical when switching output and target. According to
our experiments, GLEU score correlates quite well with the BLEU
metric on a corpus level but does not have its drawbacks for our per
sentence reward objective.
"""
A__ : List[str] = """\
Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.
Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching
tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.
Args:
predictions (list of str): list of translations to score.
Each translation should be tokenized into a list of tokens.
references (list of list of str): list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.
max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.
Returns:
'google_bleu': google_bleu score
Examples:
Example 1:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.44
Example 2:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.61
Example 3:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)
>>> print(round(results[\"google_bleu\"], 2))
0.53
Example 4:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)
>>> print(round(results[\"google_bleu\"], 2))
0.4
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ (datasets.Metric ):
"""simple docstring"""
def lowercase_ ( self ) -> MetricInfo:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ),
'references': datasets.Sequence(
datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ) , id='references' ),
} ) , )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = 4 , ) -> Dict[str, float]:
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=SCREAMING_SNAKE_CASE_ , hypotheses=SCREAMING_SNAKE_CASE_ , min_len=SCREAMING_SNAKE_CASE_ , max_len=SCREAMING_SNAKE_CASE_ )
}
| 13 |
'''simple docstring'''
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class UpperCAmelCase_ (tf.keras.optimizers.schedules.LearningRateSchedule ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 1.0 , SCREAMING_SNAKE_CASE_ = None , ) -> Any:
super().__init__()
__lowerCamelCase : Optional[Any] = initial_learning_rate
__lowerCamelCase : Optional[Any] = warmup_steps
__lowerCamelCase : Union[str, Any] = power
__lowerCamelCase : Optional[int] = decay_schedule_fn
__lowerCamelCase : Any = name
def __call__( self , SCREAMING_SNAKE_CASE_ ) -> str:
with tf.name_scope(self.name or 'WarmUp' ) as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
__lowerCamelCase : str = tf.cast(SCREAMING_SNAKE_CASE_ , tf.floataa )
__lowerCamelCase : Optional[int] = tf.cast(self.warmup_steps , tf.floataa )
__lowerCamelCase : List[Any] = global_step_float / warmup_steps_float
__lowerCamelCase : Optional[Any] = self.initial_learning_rate * tf.math.pow(SCREAMING_SNAKE_CASE_ , self.power )
return tf.cond(
global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=SCREAMING_SNAKE_CASE_ , )
def lowercase_ ( self ) -> Optional[Any]:
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def UpperCAmelCase__ ( UpperCAmelCase_ : float , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : float = 0.0 , UpperCAmelCase_ : float = 0.9 , UpperCAmelCase_ : float = 0.999 , UpperCAmelCase_ : float = 1e-8 , UpperCAmelCase_ : Optional[float] = None , UpperCAmelCase_ : Optional[float] = None , UpperCAmelCase_ : float = 0.0 , UpperCAmelCase_ : float = 1.0 , UpperCAmelCase_ : Optional[List[str]] = None , ) -> int:
__lowerCamelCase : int = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=UpperCAmelCase_ , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=UpperCAmelCase_ , )
if num_warmup_steps:
__lowerCamelCase : str = WarmUp(
initial_learning_rate=UpperCAmelCase_ , decay_schedule_fn=UpperCAmelCase_ , warmup_steps=UpperCAmelCase_ , )
if weight_decay_rate > 0.0:
__lowerCamelCase : List[Any] = AdamWeightDecay(
learning_rate=UpperCAmelCase_ , weight_decay_rate=UpperCAmelCase_ , beta_a=UpperCAmelCase_ , beta_a=UpperCAmelCase_ , epsilon=UpperCAmelCase_ , clipnorm=UpperCAmelCase_ , global_clipnorm=UpperCAmelCase_ , exclude_from_weight_decay=['LayerNorm', 'layer_norm', 'bias'] , include_in_weight_decay=UpperCAmelCase_ , )
else:
__lowerCamelCase : Tuple = tf.keras.optimizers.Adam(
learning_rate=UpperCAmelCase_ , beta_a=UpperCAmelCase_ , beta_a=UpperCAmelCase_ , epsilon=UpperCAmelCase_ , clipnorm=UpperCAmelCase_ , global_clipnorm=UpperCAmelCase_ , )
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ = 0.0_0_1 , SCREAMING_SNAKE_CASE_ = 0.9 , SCREAMING_SNAKE_CASE_ = 0.9_9_9 , SCREAMING_SNAKE_CASE_ = 1E-7 , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "AdamWeightDecay" , **SCREAMING_SNAKE_CASE_ , ) -> int:
super().__init__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : int = weight_decay_rate
__lowerCamelCase : str = include_in_weight_decay
__lowerCamelCase : List[Any] = exclude_from_weight_decay
@classmethod
def lowercase_ ( cls , SCREAMING_SNAKE_CASE_ ) -> Dict:
__lowerCamelCase : Any = {'WarmUp': WarmUp}
return super(SCREAMING_SNAKE_CASE_ , cls ).from_config(SCREAMING_SNAKE_CASE_ , custom_objects=SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
super(SCREAMING_SNAKE_CASE_ , self )._prepare_local(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Union[str, Any] = tf.constant(
self.weight_decay_rate , name='adam_weight_decay_rate' )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int:
__lowerCamelCase : Tuple = self._do_use_weight_decay(var.name )
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]['weight_decay_rate'] , use_locking=self._use_locking , )
return tf.no_op()
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ ) -> List[str]:
__lowerCamelCase , __lowerCamelCase : Optional[Any] = list(zip(*SCREAMING_SNAKE_CASE_ ) )
return super(SCREAMING_SNAKE_CASE_ , self ).apply_gradients(zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , name=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
__lowerCamelCase : Optional[int] = apply_state or {}
__lowerCamelCase : Dict = apply_state.get((var_device, var_dtype) )
if coefficients is None:
__lowerCamelCase : List[Any] = self._fallback_apply_state(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Optional[Any] = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> str:
__lowerCamelCase , __lowerCamelCase : Dict = self._get_lr(var.device , var.dtype.base_dtype , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Optional[Any] = self._decay_weights_op(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
with tf.control_dependencies([decay] ):
return super(SCREAMING_SNAKE_CASE_ , self )._resource_apply_dense(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> Union[str, Any]:
__lowerCamelCase , __lowerCamelCase : Tuple = self._get_lr(var.device , var.dtype.base_dtype , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : List[Any] = self._decay_weights_op(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
with tf.control_dependencies([decay] ):
return super(SCREAMING_SNAKE_CASE_ , self )._resource_apply_sparse(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self ) -> List[Any]:
__lowerCamelCase : Any = super().get_config()
config.update({'weight_decay_rate': self.weight_decay_rate} )
return config
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> Dict:
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) is not None:
return False
return True
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
def __init__( self ) -> Tuple:
__lowerCamelCase : Tuple = []
__lowerCamelCase : Optional[Any] = None
@property
def lowercase_ ( self ) -> List[str]:
if self._accum_steps is None:
__lowerCamelCase : Tuple = tf.Variable(
tf.constant(0 , dtype=tf.intaa ) , trainable=SCREAMING_SNAKE_CASE_ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
return self._accum_steps.value()
@property
def lowercase_ ( self ) -> List[str]:
if not self._gradients:
raise ValueError('The accumulator should be called first to initialize the gradients' )
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self , SCREAMING_SNAKE_CASE_ ) -> str:
if not self._gradients:
__lowerCamelCase : List[str] = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(SCREAMING_SNAKE_CASE_ ) , trainable=SCREAMING_SNAKE_CASE_ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
if gradient is not None
else gradient
for gradient in gradients
] )
if len(SCREAMING_SNAKE_CASE_ ) != len(self._gradients ):
raise ValueError(f'Expected {len(self._gradients )} gradients, but got {len(SCREAMING_SNAKE_CASE_ )}' )
for accum_gradient, gradient in zip(self._gradients , SCREAMING_SNAKE_CASE_ ):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(SCREAMING_SNAKE_CASE_ )
self._accum_steps.assign_add(1 )
def lowercase_ ( self ) -> int:
if not self._gradients:
return
self._accum_steps.assign(0 )
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(SCREAMING_SNAKE_CASE_ ) )
| 13 | 1 |
"""simple docstring"""
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class __SCREAMING_SNAKE_CASE ( _lowerCAmelCase , _lowerCAmelCase ):
snake_case_ = 'pixel_values'
snake_case_ = False
snake_case_ = TimmBackboneConfig
def __init__( self : Tuple , snake_case : Optional[Any] , **snake_case : Dict ):
'''simple docstring'''
requires_backends(self , """timm""" )
super().__init__(_lowerCAmelCase )
A__ : Any = config
if config.backbone is None:
raise ValueError("""backbone is not set in the config. Please set it to a timm model name.""" )
if config.backbone not in timm.list_models():
raise ValueError(F'backbone {config.backbone} is not supported by timm.' )
if hasattr(_lowerCAmelCase , """out_features""" ) and config.out_features is not None:
raise ValueError("""out_features is not supported by TimmBackbone. Please use out_indices instead.""" )
A__ : int = getattr(_lowerCAmelCase , """use_pretrained_backbone""" , _lowerCAmelCase )
if pretrained is None:
raise ValueError("""use_pretrained_backbone is not set in the config. Please set it to True or False.""" )
# We just take the final layer by default. This matches the default for the transformers models.
A__ : Optional[Any] = config.out_indices if getattr(_lowerCAmelCase , """out_indices""" , _lowerCAmelCase ) is not None else (-1,)
A__ : List[Any] = timm.create_model(
config.backbone , pretrained=_lowerCAmelCase , features_only=config.features_only , in_chans=config.num_channels , out_indices=_lowerCAmelCase , **_lowerCAmelCase , )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
A__ : Tuple = self._backbone.return_layers
A__ : Optional[Any] = {layer["""module"""]: str(_lowerCAmelCase ) for i, layer in enumerate(self._backbone.feature_info.info )}
super()._init_backbone(_lowerCAmelCase )
@classmethod
def _UpperCamelCase ( cls : List[str] , snake_case : Optional[int] , *snake_case : Optional[Any] , **snake_case : Any ):
'''simple docstring'''
requires_backends(cls , ["""vision""", """timm"""] )
from ...models.timm_backbone import TimmBackboneConfig
A__ : int = kwargs.pop("""config""" , TimmBackboneConfig() )
A__ : List[Any] = kwargs.pop("""use_timm_backbone""" , _lowerCAmelCase )
if not use_timm:
raise ValueError("""use_timm_backbone must be True for timm backbones""" )
A__ : Any = kwargs.pop("""num_channels""" , config.num_channels )
A__ : Union[str, Any] = kwargs.pop("""features_only""" , config.features_only )
A__ : Union[str, Any] = kwargs.pop("""use_pretrained_backbone""" , config.use_pretrained_backbone )
A__ : List[Any] = kwargs.pop("""out_indices""" , config.out_indices )
A__ : str = TimmBackboneConfig(
backbone=_lowerCAmelCase , num_channels=_lowerCAmelCase , features_only=_lowerCAmelCase , use_pretrained_backbone=_lowerCAmelCase , out_indices=_lowerCAmelCase , )
return super()._from_config(_lowerCAmelCase , **_lowerCAmelCase )
def _UpperCamelCase ( self : str , snake_case : Any ):
'''simple docstring'''
pass
def _UpperCamelCase ( self : Optional[int] , snake_case : str , snake_case : List[Any]=None , snake_case : str=None , snake_case : List[Any]=None , **snake_case : Union[str, Any] ):
'''simple docstring'''
A__ : Tuple = return_dict if return_dict is not None else self.config.use_return_dict
A__ : int = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A__ : List[Any] = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError("""Cannot output attentions for timm backbones at the moment""" )
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
A__ : int = self._all_layers
A__ : int = self._backbone(_lowerCAmelCase , **_lowerCAmelCase )
A__ : Union[str, Any] = self._return_layers
A__ : Dict = tuple(hidden_states[i] for i in self.out_indices )
else:
A__ : Dict = self._backbone(_lowerCAmelCase , **_lowerCAmelCase )
A__ : List[Any] = None
A__ : List[str] = tuple(_lowerCAmelCase )
A__ : List[str] = tuple(_lowerCAmelCase ) if hidden_states is not None else None
if not return_dict:
A__ : str = (feature_maps,)
if output_hidden_states:
A__ : Optional[Any] = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=_lowerCAmelCase , hidden_states=_lowerCAmelCase , attentions=_lowerCAmelCase )
| 709 |
"""simple docstring"""
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def _lowerCAmelCase ( UpperCAmelCase__ : Tuple, UpperCAmelCase__ : Union[str, Any]=None ) ->Tuple:
A__ : Dict = None
if token is not None:
A__ : str = {"""Accept""": """application/vnd.github+json""", """Authorization""": f'Bearer {token}'}
A__ : Dict = f'https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'
A__ : Any = requests.get(UpperCAmelCase__, headers=UpperCAmelCase__ ).json()
A__ : Tuple = {}
try:
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
A__ : Optional[Any] = math.ceil((result["""total_count"""] - 1_0_0) / 1_0_0 )
for i in range(UpperCAmelCase__ ):
A__ : str = requests.get(url + f'&page={i + 2}', headers=UpperCAmelCase__ ).json()
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
return job_links
except Exception:
print(f'Unknown error, could not fetch links:\n{traceback.format_exc()}' )
return {}
def _lowerCAmelCase ( UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : str=None ) ->List[str]:
A__ : Optional[Any] = None
if token is not None:
A__ : Any = {"""Accept""": """application/vnd.github+json""", """Authorization""": f'Bearer {token}'}
A__ : str = f'https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100'
A__ : Dict = requests.get(UpperCAmelCase__, headers=UpperCAmelCase__ ).json()
A__ : Any = {}
try:
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
A__ : Union[str, Any] = math.ceil((result["""total_count"""] - 1_0_0) / 1_0_0 )
for i in range(UpperCAmelCase__ ):
A__ : Union[str, Any] = requests.get(url + f'&page={i + 2}', headers=UpperCAmelCase__ ).json()
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
return artifacts
except Exception:
print(f'Unknown error, could not fetch links:\n{traceback.format_exc()}' )
return {}
def _lowerCAmelCase ( UpperCAmelCase__ : Dict, UpperCAmelCase__ : Optional[Any], UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : Any ) ->Tuple:
A__ : Tuple = None
if token is not None:
A__ : List[Any] = {"""Accept""": """application/vnd.github+json""", """Authorization""": f'Bearer {token}'}
A__ : Tuple = requests.get(UpperCAmelCase__, headers=UpperCAmelCase__, allow_redirects=UpperCAmelCase__ )
A__ : Dict = result.headers["""Location"""]
A__ : Union[str, Any] = requests.get(UpperCAmelCase__, allow_redirects=UpperCAmelCase__ )
A__ : int = os.path.join(UpperCAmelCase__, f'{artifact_name}.zip' )
with open(UpperCAmelCase__, """wb""" ) as fp:
fp.write(response.content )
def _lowerCAmelCase ( UpperCAmelCase__ : Optional[int], UpperCAmelCase__ : Tuple=None ) ->Tuple:
A__ : int = []
A__ : Union[str, Any] = []
A__ : Optional[Any] = None
with zipfile.ZipFile(UpperCAmelCase__ ) as z:
for filename in z.namelist():
if not os.path.isdir(UpperCAmelCase__ ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(UpperCAmelCase__ ) as f:
for line in f:
A__ : int = line.decode("""UTF-8""" ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
A__ : List[str] = line[: line.index(""": """ )]
A__ : str = line[line.index(""": """ ) + len(""": """ ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("""FAILED """ ):
# `test` is the test method that failed
A__ : Any = line[len("""FAILED """ ) :]
failed_tests.append(UpperCAmelCase__ )
elif filename == "job_name.txt":
A__ : Any = line
if len(UpperCAmelCase__ ) != len(UpperCAmelCase__ ):
raise ValueError(
f'`errors` and `failed_tests` should have the same number of elements. Got {len(UpperCAmelCase__ )} for `errors` '
f'and {len(UpperCAmelCase__ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some'
""" problem.""" )
A__ : List[str] = None
if job_name and job_links:
A__ : Any = job_links.get(UpperCAmelCase__, UpperCAmelCase__ )
# A list with elements of the form (line of error, error, failed test)
A__ : str = [x + [y] + [job_link] for x, y in zip(UpperCAmelCase__, UpperCAmelCase__ )]
return result
def _lowerCAmelCase ( UpperCAmelCase__ : Optional[Any], UpperCAmelCase__ : int=None ) ->str:
A__ : List[Any] = []
A__ : Dict = [os.path.join(UpperCAmelCase__, UpperCAmelCase__ ) for p in os.listdir(UpperCAmelCase__ ) if p.endswith(""".zip""" )]
for p in paths:
errors.extend(get_errors_from_single_artifact(UpperCAmelCase__, job_links=UpperCAmelCase__ ) )
return errors
def _lowerCAmelCase ( UpperCAmelCase__ : Dict, UpperCAmelCase__ : Dict=None ) ->List[Any]:
A__ : Dict = Counter()
counter.update([x[1] for x in logs] )
A__ : str = counter.most_common()
A__ : Dict = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
A__ : Optional[int] = {"""count""": count, """failed_tests""": [(x[2], x[0]) for x in logs if x[1] == error]}
A__ : List[str] = dict(sorted(r.items(), key=lambda UpperCAmelCase__ : item[1]["count"], reverse=UpperCAmelCase__ ) )
return r
def _lowerCAmelCase ( UpperCAmelCase__ : Optional[Any] ) ->str:
A__ : List[str] = test.split("""::""" )[0]
if test.startswith("""tests/models/""" ):
A__ : Union[str, Any] = test.split("""/""" )[2]
else:
A__ : int = None
return test
def _lowerCAmelCase ( UpperCAmelCase__ : str, UpperCAmelCase__ : str=None ) ->Optional[Any]:
A__ : Any = [(x[0], x[1], get_model(x[2] )) for x in logs]
A__ : List[Any] = [x for x in logs if x[2] is not None]
A__ : Union[str, Any] = {x[2] for x in logs}
A__ : int = {}
for test in tests:
A__ : Optional[int] = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
A__ : Any = counter.most_common()
A__ : Any = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
A__ : List[str] = sum(error_counts.values() )
if n_errors > 0:
A__ : str = {"""count""": n_errors, """errors""": error_counts}
A__ : Dict = dict(sorted(r.items(), key=lambda UpperCAmelCase__ : item[1]["count"], reverse=UpperCAmelCase__ ) )
return r
def _lowerCAmelCase ( UpperCAmelCase__ : Dict ) ->List[Any]:
A__ : List[Any] = """| no. | error | status |"""
A__ : Union[str, Any] = """|-:|:-|:-|"""
A__ : Dict = [header, sep]
for error in reduced_by_error:
A__ : List[Any] = reduced_by_error[error]["""count"""]
A__ : List[Any] = f'| {count} | {error[:1_0_0]} | |'
lines.append(UpperCAmelCase__ )
return "\n".join(UpperCAmelCase__ )
def _lowerCAmelCase ( UpperCAmelCase__ : int ) ->int:
A__ : str = """| model | no. of errors | major error | count |"""
A__ : Optional[int] = """|-:|-:|-:|-:|"""
A__ : Tuple = [header, sep]
for model in reduced_by_model:
A__ : Optional[Any] = reduced_by_model[model]["""count"""]
A__ , A__ : Optional[Any] = list(reduced_by_model[model]["""errors"""].items() )[0]
A__ : Optional[int] = f'| {model} | {count} | {error[:6_0]} | {_count} |'
lines.append(UpperCAmelCase__ )
return "\n".join(UpperCAmelCase__ )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''')
parser.add_argument(
'''--output_dir''',
type=str,
required=True,
help='''Where to store the downloaded artifacts and other result files.''',
)
parser.add_argument('''--token''', default=None, type=str, help='''A token that has actions:read permission.''')
A_ = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
A_ = get_job_links(args.workflow_run_id, token=args.token)
A_ = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
A_ = k.find(''' / ''')
A_ = k[index + len(''' / ''') :]
A_ = v
with open(os.path.join(args.output_dir, '''job_links.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
A_ = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, '''artifacts.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
A_ = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
A_ = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
A_ = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, '''errors.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
A_ = reduce_by_error(errors)
A_ = reduce_by_model(errors)
A_ = make_github_table(reduced_by_error)
A_ = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, '''reduced_by_error.txt'''), '''w''', encoding='''UTF-8''') as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, '''reduced_by_model.txt'''), '''w''', encoding='''UTF-8''') as fp:
fp.write(sa)
| 498 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__magic_name__ = {"""configuration_yolos""": ["""YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP""", """YolosConfig""", """YolosOnnxConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ["""YolosFeatureExtractor"""]
__magic_name__ = ["""YolosImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
"""YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""YolosForObjectDetection""",
"""YolosModel""",
"""YolosPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 129 |
"""simple docstring"""
from importlib import import_module
from .logging import get_logger
__magic_name__ = get_logger(__name__)
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : int=None ):
lowerCamelCase__ = attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith("""__""" ):
setattr(self , SCREAMING_SNAKE_CASE_ , getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
lowerCamelCase__ = module._original_module if isinstance(SCREAMING_SNAKE_CASE_ , _PatchedModuleObj ) else module
class SCREAMING_SNAKE_CASE__ :
snake_case = []
def __init__( self : Tuple , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[Any]=None ):
lowerCamelCase__ = obj
lowerCamelCase__ = target
lowerCamelCase__ = new
lowerCamelCase__ = target.split(""".""" )[0]
lowerCamelCase__ = {}
lowerCamelCase__ = attrs or []
def __enter__( self : Dict ):
*lowerCamelCase__ , lowerCamelCase__ = self.target.split(""".""" )
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
try:
lowerCamelCase__ = import_module(""".""".join(submodules[: i + 1] ) )
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
lowerCamelCase__ = getattr(self.obj , SCREAMING_SNAKE_CASE_ )
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(SCREAMING_SNAKE_CASE_ , _PatchedModuleObj ) and obj_attr._original_module is submodule)
):
lowerCamelCase__ = obj_attr
# patch at top level
setattr(self.obj , SCREAMING_SNAKE_CASE_ , _PatchedModuleObj(SCREAMING_SNAKE_CASE_ , attrs=self.attrs ) )
lowerCamelCase__ = getattr(self.obj , SCREAMING_SNAKE_CASE_ )
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , _PatchedModuleObj(getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , attrs=self.attrs ) )
lowerCamelCase__ = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# finally set the target attribute
setattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.new )
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
lowerCamelCase__ = getattr(import_module(""".""".join(SCREAMING_SNAKE_CASE_ ) ) , SCREAMING_SNAKE_CASE_ )
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj , SCREAMING_SNAKE_CASE_ ) is attr_value:
lowerCamelCase__ = getattr(self.obj , SCREAMING_SNAKE_CASE_ )
setattr(self.obj , SCREAMING_SNAKE_CASE_ , self.new )
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
lowerCamelCase__ = globals()["""__builtins__"""][target_attr]
setattr(self.obj , SCREAMING_SNAKE_CASE_ , self.new )
else:
raise RuntimeError(f"""Tried to patch attribute {target_attr} instead of a submodule.""" )
def __exit__( self : Optional[int] , *SCREAMING_SNAKE_CASE_ : Tuple ):
for attr in list(self.original ):
setattr(self.obj , SCREAMING_SNAKE_CASE_ , self.original.pop(SCREAMING_SNAKE_CASE_ ) )
def __UpperCAmelCase ( self : List[Any] ):
self.__enter__()
self._active_patches.append(self )
def __UpperCAmelCase ( self : str ):
try:
self._active_patches.remove(self )
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__()
| 129 | 1 |
'''simple docstring'''
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
__magic_name__ : Any = logging.get_logger(__name__)
set_seed(770)
__magic_name__ : List[Any] = {
'''c_attn''': '''att_proj''',
'''c_proj''': '''out_proj''',
'''c_fc''': '''in_proj''',
'''transformer.''': '''''',
'''h.''': '''layers.''',
'''ln_1''': '''layernorm_1''',
'''ln_2''': '''layernorm_2''',
'''ln_f''': '''layernorm_final''',
'''wpe''': '''position_embeds_layer''',
'''wte''': '''input_embeds_layer''',
}
__magic_name__ : Optional[int] = {
'''text_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''text.pt''',
},
'''coarse_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''coarse.pt''',
},
'''fine_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''fine.pt''',
},
'''text''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''text_2.pt''',
},
'''coarse''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''coarse_2.pt''',
},
'''fine''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''fine_2.pt''',
},
}
__magic_name__ : Tuple = os.path.dirname(os.path.abspath(__file__))
__magic_name__ : Dict = os.path.join(os.path.expanduser('''~'''), '''.cache''')
__magic_name__ : Optional[int] = os.path.join(os.getenv('''XDG_CACHE_HOME''', default_cache_dir), '''suno''', '''bark_v0''')
def A__ ( A_ , A_=False ) -> str:
_lowercase = model_type
if use_small:
key += "_small"
return os.path.join(A_ , REMOTE_MODEL_PATHS[key]["file_name"] )
def A__ ( A_ , A_ ) -> List[str]:
os.makedirs(A_ , exist_ok=A_ )
hf_hub_download(repo_id=A_ , filename=A_ , local_dir=A_ )
def A__ ( A_ , A_ , A_=False , A_="text" ) -> int:
if model_type == "text":
_lowercase = BarkSemanticModel
_lowercase = BarkSemanticConfig
_lowercase = BarkSemanticGenerationConfig
elif model_type == "coarse":
_lowercase = BarkCoarseModel
_lowercase = BarkCoarseConfig
_lowercase = BarkCoarseGenerationConfig
elif model_type == "fine":
_lowercase = BarkFineModel
_lowercase = BarkFineConfig
_lowercase = BarkFineGenerationConfig
else:
raise NotImplementedError()
_lowercase = F"""{model_type}_small""" if use_small else model_type
_lowercase = REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(A_ ):
logger.info(F"""{model_type} model not found, downloading into `{CACHE_DIR}`.""" )
_download(model_info["repo_id"] , model_info["file_name"] )
_lowercase = torch.load(A_ , map_location=A_ )
# this is a hack
_lowercase = checkpoint["model_args"]
if "input_vocab_size" not in model_args:
_lowercase = model_args["vocab_size"]
_lowercase = model_args["vocab_size"]
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
_lowercase = model_args.pop("n_head" )
_lowercase = model_args.pop("n_embd" )
_lowercase = model_args.pop("n_layer" )
_lowercase = ConfigClass(**checkpoint["model_args"] )
_lowercase = ModelClass(config=A_ )
_lowercase = GenerationConfigClass()
_lowercase = model_generation_config
_lowercase = checkpoint["model"]
# fixup checkpoint
_lowercase = "_orig_mod."
for k, v in list(state_dict.items() ):
if k.startswith(A_ ):
# replace part of the key with corresponding layer name in HF implementation
_lowercase = k[len(A_ ) :]
for old_layer_name in new_layer_name_dict:
_lowercase = new_k.replace(A_ , new_layer_name_dict[old_layer_name] )
_lowercase = state_dict.pop(A_ )
_lowercase = set(state_dict.keys() ) - set(model.state_dict().keys() )
_lowercase = {k for k in extra_keys if not k.endswith(".attn.bias" )}
_lowercase = set(model.state_dict().keys() ) - set(state_dict.keys() )
_lowercase = {k for k in missing_keys if not k.endswith(".attn.bias" )}
if len(A_ ) != 0:
raise ValueError(F"""extra keys found: {extra_keys}""" )
if len(A_ ) != 0:
raise ValueError(F"""missing keys: {missing_keys}""" )
model.load_state_dict(A_ , strict=A_ )
_lowercase = model.num_parameters(exclude_embeddings=A_ )
_lowercase = checkpoint["best_val_loss"].item()
logger.info(F"""model loaded: {round(n_params/1e6 , 1 )}M params, {round(A_ , 3 )} loss""" )
model.eval()
model.to(A_ )
del checkpoint, state_dict
return model
def A__ ( A_ , A_=False , A_="text" ) -> List[str]:
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
_lowercase = "cpu" # do conversion on cpu
_lowercase = _get_ckpt_path(A_ , use_small=A_ )
_lowercase = _load_model(A_ , A_ , model_type=A_ , use_small=A_ )
# load bark initial model
_lowercase = _bark_load_model(A_ , "cpu" , model_type=A_ , use_small=A_ )
if model_type == "text":
_lowercase = bark_model["model"]
if model.num_parameters(exclude_embeddings=A_ ) != bark_model.get_num_params():
raise ValueError("initial and new models don't have the same number of parameters" )
# check if same output as the bark model
_lowercase = 5
_lowercase = 10
if model_type in ["text", "coarse"]:
_lowercase = torch.randint(256 , (batch_size, sequence_length) , dtype=torch.int )
_lowercase = bark_model(A_ )[0]
_lowercase = model(A_ )
# take last logits
_lowercase = output_new_model_total.logits[:, [-1], :]
else:
_lowercase = 3
_lowercase = 8
_lowercase = torch.randint(256 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int )
_lowercase = model(A_ , A_ )
_lowercase = bark_model(A_ , A_ )
_lowercase = output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError("initial and new outputs don't have the same shape" )
if (output_new_model - output_old_model).abs().max().item() > 1e-3:
raise ValueError("initial and new outputs are not equal" )
Path(A_ ).mkdir(exist_ok=A_ )
model.save_pretrained(A_ )
def A__ ( A_ , A_ , A_ , A_ , A_ , A_ , ) -> int:
_lowercase = os.path.join(A_ , A_ )
_lowercase = BarkSemanticConfig.from_pretrained(os.path.join(A_ , "config.json" ) )
_lowercase = BarkCoarseConfig.from_pretrained(os.path.join(A_ , "config.json" ) )
_lowercase = BarkFineConfig.from_pretrained(os.path.join(A_ , "config.json" ) )
_lowercase = EncodecConfig.from_pretrained("facebook/encodec_24khz" )
_lowercase = BarkSemanticModel.from_pretrained(A_ )
_lowercase = BarkCoarseModel.from_pretrained(A_ )
_lowercase = BarkFineModel.from_pretrained(A_ )
_lowercase = EncodecModel.from_pretrained("facebook/encodec_24khz" )
_lowercase = BarkConfig.from_sub_model_configs(
A_ , A_ , A_ , A_ )
_lowercase = BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config )
_lowercase = BarkModel(A_ )
_lowercase = semantic
_lowercase = coarseAcoustic
_lowercase = fineAcoustic
_lowercase = codec
_lowercase = bark_generation_config
Path(A_ ).mkdir(exist_ok=A_ )
bark.save_pretrained(A_ , repo_id=A_ , push_to_hub=A_ )
if __name__ == "__main__":
__magic_name__ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''model_type''', type=str, help='''text, coarse or fine.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--is_small''', action='''store_true''', help='''convert the small version instead of the large.''')
__magic_name__ : Optional[Any] = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 709 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCamelCase__ ( lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ = BioGptTokenizer
UpperCAmelCase__ = False
def snake_case ( self : Dict ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_lowercase = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"w</w>",
"r</w>",
"t</w>",
"lo",
"low",
"er</w>",
"low</w>",
"lowest</w>",
"newer</w>",
"wider</w>",
"<unk>",
]
_lowercase = dict(zip(__A , range(len(__A ) ) ) )
_lowercase = ["l o 123", "lo w 1456", "e r</w> 1789", ""]
_lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
_lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" ) as fp:
fp.write(json.dumps(__A ) )
with open(self.merges_file , "w" ) as fp:
fp.write("\n".join(__A ) )
def snake_case ( self : Optional[Any] , __A : List[Any] ):
"""simple docstring"""
_lowercase = "lower newer"
_lowercase = "lower newer"
return input_text, output_text
def snake_case ( self : List[Any] ):
"""simple docstring"""
_lowercase = BioGptTokenizer(self.vocab_file , self.merges_file )
_lowercase = "lower"
_lowercase = ["low", "er</w>"]
_lowercase = tokenizer.tokenize(__A )
self.assertListEqual(__A , __A )
_lowercase = tokens + ["<unk>"]
_lowercase = [1_4, 1_5, 2_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , __A )
@slow
def snake_case ( self : Optional[int] ):
"""simple docstring"""
_lowercase = BioGptTokenizer.from_pretrained("microsoft/biogpt" )
_lowercase = tokenizer.encode("sequence builders" , add_special_tokens=__A )
_lowercase = tokenizer.encode("multi-sequence build" , add_special_tokens=__A )
_lowercase = tokenizer.build_inputs_with_special_tokens(__A )
_lowercase = tokenizer.build_inputs_with_special_tokens(__A , __A )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 602 | 0 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowercase__ : Dict = logging.get_logger(__name__)
lowercase__ : List[str] = {
"ut/deta": "https://huggingface.co/ut/deta/resolve/main/config.json",
}
class UpperCAmelCase ( __a ):
'''simple docstring'''
lowerCAmelCase_ = '''deta'''
lowerCAmelCase_ = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self : Tuple , __lowercase : List[str]=None , __lowercase : List[str]=9_00 , __lowercase : List[Any]=20_48 , __lowercase : Union[str, Any]=6 , __lowercase : int=20_48 , __lowercase : Optional[Any]=8 , __lowercase : int=6 , __lowercase : List[Any]=10_24 , __lowercase : List[Any]=8 , __lowercase : str=0.0 , __lowercase : int=True , __lowercase : int="relu" , __lowercase : Tuple=2_56 , __lowercase : int=0.1 , __lowercase : Optional[Any]=0.0 , __lowercase : List[Any]=0.0 , __lowercase : Any=0.02 , __lowercase : Tuple=1.0 , __lowercase : Optional[int]=True , __lowercase : str=False , __lowercase : List[Any]="sine" , __lowercase : Optional[Any]=5 , __lowercase : List[Any]=4 , __lowercase : int=4 , __lowercase : Tuple=True , __lowercase : Dict=3_00 , __lowercase : Optional[Any]=True , __lowercase : Optional[int]=True , __lowercase : Dict=1 , __lowercase : Optional[Any]=5 , __lowercase : List[str]=2 , __lowercase : List[Any]=1 , __lowercase : Tuple=1 , __lowercase : int=5 , __lowercase : Optional[int]=2 , __lowercase : Any=0.1 , __lowercase : Dict=0.25 , **__lowercase : List[Any] , ):
"""simple docstring"""
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
snake_case_ = CONFIG_MAPPING["resnet"](out_features=["stage2", "stage3", "stage4"] )
else:
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
snake_case_ = backbone_config.pop("model_type" )
snake_case_ = CONFIG_MAPPING[backbone_model_type]
snake_case_ = config_class.from_dict(__lowerCAmelCase )
snake_case_ = backbone_config
snake_case_ = num_queries
snake_case_ = max_position_embeddings
snake_case_ = d_model
snake_case_ = encoder_ffn_dim
snake_case_ = encoder_layers
snake_case_ = encoder_attention_heads
snake_case_ = decoder_ffn_dim
snake_case_ = decoder_layers
snake_case_ = decoder_attention_heads
snake_case_ = dropout
snake_case_ = attention_dropout
snake_case_ = activation_dropout
snake_case_ = activation_function
snake_case_ = init_std
snake_case_ = init_xavier_std
snake_case_ = encoder_layerdrop
snake_case_ = auxiliary_loss
snake_case_ = position_embedding_type
# deformable attributes
snake_case_ = num_feature_levels
snake_case_ = encoder_n_points
snake_case_ = decoder_n_points
snake_case_ = two_stage
snake_case_ = two_stage_num_proposals
snake_case_ = with_box_refine
snake_case_ = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError("If two_stage is True, with_box_refine must be True." )
# Hungarian matcher
snake_case_ = class_cost
snake_case_ = bbox_cost
snake_case_ = giou_cost
# Loss coefficients
snake_case_ = mask_loss_coefficient
snake_case_ = dice_loss_coefficient
snake_case_ = bbox_loss_coefficient
snake_case_ = giou_loss_coefficient
snake_case_ = eos_coefficient
snake_case_ = focal_alpha
super().__init__(is_encoder_decoder=__lowerCAmelCase , **__lowerCAmelCase )
@property
def snake_case__ ( self : Union[str, Any] ):
"""simple docstring"""
return self.encoder_attention_heads
@property
def snake_case__ ( self : Tuple ):
"""simple docstring"""
return self.d_model
def snake_case__ ( self : List[str] ):
"""simple docstring"""
snake_case_ = copy.deepcopy(self.__dict__ )
snake_case_ = self.backbone_config.to_dict()
snake_case_ = self.__class__.model_type
return output
| 376 | '''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class SCREAMING_SNAKE_CASE ( __a ):
"""simple docstring"""
__A = ["image_processor", "tokenizer"]
__A = "FlavaImageProcessor"
__A = ("BertTokenizer", "BertTokenizerFast")
def __init__( self : Dict , __lowerCAmelCase : int=None , __lowerCAmelCase : Optional[Any]=None , **__lowerCAmelCase : int ):
"""simple docstring"""
_lowerCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , __lowerCAmelCase , )
_lowerCAmelCase = kwargs.pop('feature_extractor' )
_lowerCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(__lowerCAmelCase , __lowerCAmelCase )
_lowerCAmelCase = self.image_processor
def __call__( self : Union[str, Any] , __lowerCAmelCase : Optional[ImageInput] = None , __lowerCAmelCase : Optional[Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]] = None , __lowerCAmelCase : bool = True , __lowerCAmelCase : Union[bool, str, PaddingStrategy] = False , __lowerCAmelCase : Union[bool, str, TruncationStrategy] = False , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : int = 0 , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : Optional[bool] = None , __lowerCAmelCase : Optional[bool] = None , __lowerCAmelCase : Optional[bool] = None , __lowerCAmelCase : Optional[bool] = None , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = True , __lowerCAmelCase : Optional[Union[str, TensorType]] = None , **__lowerCAmelCase : Union[str, Any] , ):
"""simple docstring"""
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
_lowerCAmelCase = self.tokenizer(
text=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase , stride=__lowerCAmelCase , pad_to_multiple_of=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , return_overflowing_tokens=__lowerCAmelCase , return_special_tokens_mask=__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , return_length=__lowerCAmelCase , verbose=__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase , )
if images is not None:
_lowerCAmelCase = self.image_processor(
__lowerCAmelCase , return_image_mask=__lowerCAmelCase , return_codebook_pixels=__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase , )
if text is not None and images is not None:
encoding.update(__lowerCAmelCase )
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__lowerCAmelCase ) , tensor_type=__lowerCAmelCase )
def a ( self : Any , *__lowerCAmelCase : str , **__lowerCAmelCase : List[Any] ):
"""simple docstring"""
return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase )
def a ( self : List[str] , *__lowerCAmelCase : List[str] , **__lowerCAmelCase : Optional[int] ):
"""simple docstring"""
return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase )
@property
def a ( self : List[str] ):
"""simple docstring"""
_lowerCAmelCase = self.tokenizer.model_input_names
_lowerCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def a ( self : Optional[int] ):
"""simple docstring"""
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , __lowerCAmelCase , )
return self.image_processor_class
@property
def a ( self : Any ):
"""simple docstring"""
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , __lowerCAmelCase , )
return self.image_processor
| 309 | 0 |
'''simple docstring'''
import os
from distutils.util import strtobool
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
for e in env_keys:
snake_case__ : Optional[int] = int(os.environ.get(_lowerCAmelCase , -1 ) )
if val >= 0:
return val
return default
def __snake_case( _lowerCAmelCase , _lowerCAmelCase=False ) -> Dict:
snake_case__ : Optional[int] = os.environ.get(_lowerCAmelCase , str(_lowerCAmelCase ) )
return strtobool(_lowerCAmelCase ) == 1 # As its name indicates `strtobool` actually returns an int...
def __snake_case( _lowerCAmelCase , _lowerCAmelCase="no" ) -> List[str]:
snake_case__ : Optional[Any] = os.environ.get(_lowerCAmelCase , str(_lowerCAmelCase ) )
return value
| 301 |
'''simple docstring'''
def __snake_case( _lowerCAmelCase ) -> bool:
snake_case__ : Tuple = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def __snake_case( _lowerCAmelCase = 5_000 ) -> int:
snake_case__ : Any = [(i * (3 * i - 1)) // 2 for i in range(1 , _lowerCAmelCase )]
for i, pentagonal_i in enumerate(_lowerCAmelCase ):
for j in range(_lowerCAmelCase , len(_lowerCAmelCase ) ):
snake_case__ : Any = pentagonal_nums[j]
snake_case__ : Any = pentagonal_i + pentagonal_j
snake_case__ : Union[str, Any] = pentagonal_j - pentagonal_i
if is_pentagonal(_lowerCAmelCase ) and is_pentagonal(_lowerCAmelCase ):
return b
return -1
if __name__ == "__main__":
print(F"{solution() = }")
| 301 | 1 |
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ ) -> float:
if mass < 0:
raise ValueError('''The mass of a body cannot be negative''' )
return 0.5 * mass * abs(__SCREAMING_SNAKE_CASE ) * abs(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 312 |
'''simple docstring'''
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@require_torch
def __a ( self : List[Any] ):
'''simple docstring'''
__a = pipeline(
task="""zero-shot-audio-classification""" , model="""hf-internal-testing/tiny-clap-htsat-unfused""" )
__a = load_dataset("""ashraq/esc50""" )
__a = dataset["""train"""]["""audio"""][-1]["""array"""]
__a = audio_classifier(SCREAMING_SNAKE_CASE__ , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{"""score""": 0.5_0_1, """label""": """Sound of a dog"""}, {"""score""": 0.4_9_9, """label""": """Sound of vaccum cleaner"""}] , )
@unittest.skip("""No models are available in TF""" )
def __a ( self : str ):
'''simple docstring'''
pass
@slow
@require_torch
def __a ( self : Dict ):
'''simple docstring'''
__a = pipeline(
task="""zero-shot-audio-classification""" , model="""laion/clap-htsat-unfused""" , )
# This is an audio of a dog
__a = load_dataset("""ashraq/esc50""" )
__a = dataset["""train"""]["""audio"""][-1]["""array"""]
__a = audio_classifier(SCREAMING_SNAKE_CASE__ , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ ) , [
{"""score""": 0.9_9_9, """label""": """Sound of a dog"""},
{"""score""": 0.0_0_1, """label""": """Sound of vaccum cleaner"""},
] , )
__a = audio_classifier([audio] * 5 , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ ) , [
[
{"""score""": 0.9_9_9, """label""": """Sound of a dog"""},
{"""score""": 0.0_0_1, """label""": """Sound of vaccum cleaner"""},
],
]
* 5 , )
__a = audio_classifier(
[audio] * 5 , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] , batch_size=5 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ ) , [
[
{"""score""": 0.9_9_9, """label""": """Sound of a dog"""},
{"""score""": 0.0_0_1, """label""": """Sound of vaccum cleaner"""},
],
]
* 5 , )
@unittest.skip("""No models are available in TF""" )
def __a ( self : str ):
'''simple docstring'''
pass
| 582 | 0 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class _lowercase ( unittest.TestCase ):
def UpperCamelCase ( self ) -> Tuple:
snake_case = 10
def UpperCamelCase ( self ) -> int:
snake_case = [1, 2, 3, 4]
snake_case = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(A__ , self.block_size , 0 ) , A__ )
def UpperCamelCase ( self ) -> List[str]:
snake_case = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
snake_case = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(A__ , self.block_size , 0 ) , A__ )
def UpperCamelCase ( self ) -> List[Any]:
snake_case = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
snake_case = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(A__ , self.block_size , 0 ) , A__ )
def UpperCamelCase ( self ) -> Optional[int]:
snake_case = '''It was the year of Our Lord one thousand seven hundred and
seventy-five.\n\nSpiritual revelations were conceded to England at that
favoured period, as at this.'''
snake_case , snake_case = process_story(A__ )
self.assertEqual(A__ , [] )
def UpperCamelCase ( self ) -> Union[str, Any]:
snake_case = ''''''
snake_case , snake_case = process_story(A__ )
self.assertEqual(A__ , [] )
self.assertEqual(A__ , [] )
def UpperCamelCase ( self ) -> Any:
snake_case = (
'''It was the year of Our Lord one thousand seven hundred and '''
'''seventy-five\n\nSpiritual revelations were conceded to England '''
'''at that favoured period, as at this.\n@highlight\n\nIt was the best of times'''
)
snake_case , snake_case = process_story(A__ )
snake_case = [
'''It was the year of Our Lord one thousand seven hundred and seventy-five.''',
'''Spiritual revelations were conceded to England at that favoured period, as at this.''',
]
self.assertEqual(A__ , A__ )
snake_case = ['''It was the best of times.''']
self.assertEqual(A__ , A__ )
def UpperCamelCase ( self ) -> str:
snake_case = torch.tensor([1, 2, 3, 4] )
snake_case = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(A__ , 0 ).numpy() , expected.numpy() )
def UpperCamelCase ( self ) -> Optional[Any]:
snake_case = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
snake_case = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(A__ , 23 ).numpy() , expected.numpy() )
def UpperCamelCase ( self ) -> Optional[int]:
snake_case = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
snake_case = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(A__ , 1 ).numpy() , expected.numpy() )
def UpperCamelCase ( self ) -> Tuple:
snake_case = 1_01
snake_case = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 1_01, 5, 6], [1, 1_01, 3, 4, 1_01, 6]] )
snake_case = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
snake_case = compute_token_type_ids(A__ , A__ )
np.testing.assert_array_equal(A__ , A__ )
| 44 |
'''simple docstring'''
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
_lowercase = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
'text-classification',
'language-modeling',
'summarization',
'token-classification',
'question-answering',
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
_lowercase = logging.getLogger()
def __UpperCamelCase ( ) ->Tuple:
snake_case = argparse.ArgumentParser()
parser.add_argument('''-f''' )
snake_case = parser.parse_args()
return args.f
def __UpperCamelCase ( a : Dict , a : Tuple="eval" ) ->List[Any]:
snake_case = os.path.join(a , f"""{split}_results.json""" )
if os.path.exists(a ):
with open(a , '''r''' ) as f:
return json.load(a )
raise ValueError(f"""can't find {path}""" )
_lowercase = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class _lowercase ( __a ):
def UpperCamelCase ( self ) -> List[str]:
snake_case = self.get_auto_remove_tmp_dir()
snake_case = F"""
run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--eval_steps=2
--warmup_steps=2
--seed=42
--max_seq_length=128
""".split()
with patch.object(A__ , '''argv''' , A__ ):
run_flax_glue.main()
snake_case = get_results(A__ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.7_5 )
@slow
def UpperCamelCase ( self ) -> List[Any]:
snake_case = self.get_auto_remove_tmp_dir()
snake_case = F"""
run_clm_flax.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--block_size 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(A__ , '''argv''' , A__ ):
run_clm_flax.main()
snake_case = get_results(A__ )
self.assertLess(result['''eval_perplexity'''] , 1_00 )
@slow
def UpperCamelCase ( self ) -> int:
snake_case = self.get_auto_remove_tmp_dir()
snake_case = F"""
run_summarization.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--test_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=8
--do_train
--do_eval
--do_predict
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--predict_with_generate
""".split()
with patch.object(A__ , '''argv''' , A__ ):
run_summarization_flax.main()
snake_case = get_results(A__ , split='''test''' )
self.assertGreaterEqual(result['''test_rouge1'''] , 10 )
self.assertGreaterEqual(result['''test_rouge2'''] , 2 )
self.assertGreaterEqual(result['''test_rougeL'''] , 7 )
self.assertGreaterEqual(result['''test_rougeLsum'''] , 7 )
@slow
def UpperCamelCase ( self ) -> Union[str, Any]:
snake_case = self.get_auto_remove_tmp_dir()
snake_case = F"""
run_mlm.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--overwrite_output_dir
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--logging_steps 2 --eval_steps 2
--do_train
--do_eval
--num_train_epochs=1
""".split()
with patch.object(A__ , '''argv''' , A__ ):
run_mlm_flax.main()
snake_case = get_results(A__ )
self.assertLess(result['''eval_perplexity'''] , 42 )
@slow
def UpperCamelCase ( self ) -> Dict:
snake_case = self.get_auto_remove_tmp_dir()
snake_case = F"""
run_t5_mlm_flax.py
--model_name_or_path t5-small
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(A__ , '''argv''' , A__ ):
run_ta_mlm_flax.main()
snake_case = get_results(A__ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.4_2 )
@slow
def UpperCamelCase ( self ) -> int:
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
snake_case = 7 if get_gpu_count() > 1 else 2
snake_case = self.get_auto_remove_tmp_dir()
snake_case = F"""
run_flax_ner.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--do_train
--do_eval
--warmup_steps=2
--learning_rate=2e-4
--logging_steps 2 --eval_steps 2
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
""".split()
with patch.object(A__ , '''argv''' , A__ ):
run_flax_ner.main()
snake_case = get_results(A__ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.7_5 )
self.assertGreaterEqual(result['''eval_f1'''] , 0.3 )
@slow
def UpperCamelCase ( self ) -> Any:
snake_case = self.get_auto_remove_tmp_dir()
snake_case = F"""
run_qa.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=2
--do_train
--do_eval
--logging_steps 2 --eval_steps 2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
""".split()
with patch.object(A__ , '''argv''' , A__ ):
run_qa.main()
snake_case = get_results(A__ )
self.assertGreaterEqual(result['''eval_f1'''] , 30 )
self.assertGreaterEqual(result['''eval_exact'''] , 30 )
| 44 | 1 |
'''simple docstring'''
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
_snake_case : List[Any] = logging.get_logger('transformers.models.speecht5')
_snake_case : List[Any] = {
'speech_encoder_prenet.layer_norm': 'speecht5.encoder.prenet.feature_projection.layer_norm',
'speech_encoder_prenet.post_extract_proj': 'speecht5.encoder.prenet.feature_projection.projection',
'speech_encoder_prenet.pos_conv.0': 'speecht5.encoder.prenet.pos_conv_embed.conv',
'speech_encoder_prenet.mask_emb': 'speecht5.encoder.prenet.masked_spec_embed',
}
_snake_case : Union[str, Any] = {
'text_encoder_prenet.encoder_prenet.0': 'speecht5.encoder.prenet.embed_tokens',
'text_encoder_prenet.encoder_prenet.1.alpha': 'speecht5.encoder.prenet.encode_positions.alpha',
}
_snake_case : Tuple = {
'speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0': 'speecht5.decoder.prenet.layers.0',
'speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0': 'speecht5.decoder.prenet.layers.1',
'speech_decoder_prenet.decoder_prenet.0.1': 'speecht5.decoder.prenet.final_layer',
'speech_decoder_prenet.decoder_prenet.1.alpha': 'speecht5.decoder.prenet.encode_positions.alpha',
'speech_decoder_prenet.spkembs_layer.0': 'speecht5.decoder.prenet.speaker_embeds_layer',
}
_snake_case : Optional[int] = {
'speech_decoder_postnet.feat_out': 'speech_decoder_postnet.feat_out',
'speech_decoder_postnet.prob_out': 'speech_decoder_postnet.prob_out',
'speech_decoder_postnet.postnet.postnet.0.0': 'speech_decoder_postnet.layers.0.conv',
'speech_decoder_postnet.postnet.postnet.0.1': 'speech_decoder_postnet.layers.0.batch_norm',
'speech_decoder_postnet.postnet.postnet.1.0': 'speech_decoder_postnet.layers.1.conv',
'speech_decoder_postnet.postnet.postnet.1.1': 'speech_decoder_postnet.layers.1.batch_norm',
'speech_decoder_postnet.postnet.postnet.2.0': 'speech_decoder_postnet.layers.2.conv',
'speech_decoder_postnet.postnet.postnet.2.1': 'speech_decoder_postnet.layers.2.batch_norm',
'speech_decoder_postnet.postnet.postnet.3.0': 'speech_decoder_postnet.layers.3.conv',
'speech_decoder_postnet.postnet.postnet.3.1': 'speech_decoder_postnet.layers.3.batch_norm',
'speech_decoder_postnet.postnet.postnet.4.0': 'speech_decoder_postnet.layers.4.conv',
'speech_decoder_postnet.postnet.postnet.4.1': 'speech_decoder_postnet.layers.4.batch_norm',
}
_snake_case : List[Any] = {
'text_decoder_prenet.embed_tokens': 'speecht5.decoder.prenet.embed_tokens',
}
_snake_case : List[str] = {
'text_decoder_postnet.output_projection': 'text_decoder_postnet.lm_head',
}
_snake_case : Tuple = {
'encoder.layers.*.self_attn.k_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj',
'encoder.layers.*.self_attn.v_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj',
'encoder.layers.*.self_attn.q_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj',
'encoder.layers.*.self_attn.out_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj',
'encoder.layers.*.self_attn_layer_norm': 'speecht5.encoder.wrapped_encoder.layers.*.layer_norm',
'encoder.layers.*.fc1': 'speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense',
'encoder.layers.*.fc2': 'speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense',
'encoder.layers.*.final_layer_norm': 'speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'speecht5.encoder.wrapped_encoder.layer_norm',
'encoder.pos_emb.pe_k': 'speecht5.encoder.wrapped_encoder.embed_positions.pe_k',
}
_snake_case : str = {
'decoder.layers.*.self_attn.k_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj',
'decoder.layers.*.self_attn.v_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj',
'decoder.layers.*.self_attn.q_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj',
'decoder.layers.*.self_attn.out_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj',
'decoder.layers.*.self_attn_layer_norm': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm',
'decoder.layers.*.encoder_attn.k_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj',
'decoder.layers.*.encoder_attn.v_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj',
'decoder.layers.*.encoder_attn.q_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj',
'decoder.layers.*.encoder_attn.out_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj',
'decoder.layers.*.encoder_attn_layer_norm': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm',
'decoder.layers.*.fc1': 'speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense',
'decoder.layers.*.fc2': 'speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense',
'decoder.layers.*.final_layer_norm': 'speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm',
}
_snake_case : str = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
_snake_case : List[str] = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
_snake_case : Dict = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
_snake_case : Optional[int] = []
_snake_case : List[Any] = [
'encoder.version',
'encoder.layers.*.norm_k.weight',
'encoder.layers.*.norm_k.bias',
'decoder.version',
'decoder.layers.*.norm_k.weight',
'decoder.layers.*.norm_k.bias',
'decoder.pos_emb.pe_k',
'speech_encoder_prenet.embed_positions._float_tensor',
'text_decoder_prenet.embed_positions._float_tensor',
]
_snake_case : List[str] = IGNORE_KEYS + [
'encoder.proj',
'text_encoder_prenet.*',
'speech_decoder_prenet.*',
'speech_decoder_postnet.*',
]
_snake_case : Tuple = IGNORE_KEYS + [
'encoder.proj',
'speech_encoder_prenet.*',
'text_decoder_prenet.*',
'text_decoder_postnet.*',
]
_snake_case : Dict = IGNORE_KEYS + [
'encoder.proj',
'text_encoder_prenet.*',
'text_decoder_prenet.*',
'text_decoder_postnet.*',
]
def snake_case_ (UpperCamelCase : Dict , UpperCamelCase : Optional[Any] , UpperCamelCase : Dict , UpperCamelCase : List[str] , UpperCamelCase : Any ):
'''simple docstring'''
for attribute in key.split('''.''' ):
_a = getattr(UpperCamelCase , UpperCamelCase )
if weight_type is not None:
_a = getattr(UpperCamelCase , UpperCamelCase ).shape
else:
_a = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}' )
if weight_type == "weight":
_a = value
elif weight_type == "weight_g":
_a = value
elif weight_type == "weight_v":
_a = value
elif weight_type == "bias":
_a = value
elif weight_type == "running_mean":
_a = value
elif weight_type == "running_var":
_a = value
elif weight_type == "num_batches_tracked":
_a = value
else:
_a = value
logger.info(f'{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.' )
def snake_case_ (UpperCamelCase : Dict , UpperCamelCase : str ):
'''simple docstring'''
for key in ignore_keys:
if key.endswith('''.*''' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
_a , _a = key.split('''.*.''' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def snake_case_ (UpperCamelCase : Tuple , UpperCamelCase : Optional[int] , UpperCamelCase : Tuple ):
'''simple docstring'''
_a = []
if task == "s2t":
_a = hf_model.speechta.encoder.prenet.feature_encoder
_a = MAPPING_S2T
_a = IGNORE_KEYS_S2T
elif task == "t2s":
_a = None
_a = MAPPING_T2S
_a = IGNORE_KEYS_T2S
elif task == "s2s":
_a = hf_model.speechta.encoder.prenet.feature_encoder
_a = MAPPING_S2S
_a = IGNORE_KEYS_S2S
else:
raise ValueError(f'Unsupported task: {task}' )
for name, value in fairseq_dict.items():
if should_ignore(UpperCamelCase , UpperCamelCase ):
logger.info(f'{name} was ignored' )
continue
_a = False
if "conv_layers" in name:
load_conv_layer(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , hf_model.config.feat_extract_norm == '''group''' , )
_a = True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
_a , _a = key.split('''.*.''' )
if prefix in name and suffix in name:
_a = suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
_a = True
if "*" in mapped_key:
_a = name.split(UpperCamelCase )[0].split('''.''' )[-2]
_a = mapped_key.replace('''*''' , UpperCamelCase )
if "weight_g" in name:
_a = '''weight_g'''
elif "weight_v" in name:
_a = '''weight_v'''
elif "bias" in name:
_a = '''bias'''
elif "weight" in name:
_a = '''weight'''
elif "running_mean" in name:
_a = '''running_mean'''
elif "running_var" in name:
_a = '''running_var'''
elif "num_batches_tracked" in name:
_a = '''num_batches_tracked'''
else:
_a = None
set_recursively(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
continue
if not is_used:
unused_weights.append(UpperCamelCase )
logger.warning(f'Unused weights: {unused_weights}' )
def snake_case_ (UpperCamelCase : Union[str, Any] , UpperCamelCase : str , UpperCamelCase : Dict , UpperCamelCase : Dict , UpperCamelCase : int ):
'''simple docstring'''
_a = full_name.split('''conv_layers.''' )[-1]
_a = name.split('''.''' )
_a = int(items[0] )
_a = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' )
_a = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' )
_a = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.' )
_a = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.' )
_a = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(UpperCamelCase )
@torch.no_grad()
def snake_case_ (UpperCamelCase : Tuple , UpperCamelCase : List[str] , UpperCamelCase : List[Any] , UpperCamelCase : Optional[Any]=None , UpperCamelCase : Any=None , UpperCamelCase : Any=None , ):
'''simple docstring'''
if config_path is not None:
_a = SpeechTaConfig.from_pretrained(UpperCamelCase )
else:
_a = SpeechTaConfig()
if task == "s2t":
_a = config.max_text_positions
_a = SpeechTaForSpeechToText(UpperCamelCase )
elif task == "t2s":
_a = 1876
_a = 600
_a = config.max_speech_positions
_a = SpeechTaForTextToSpeech(UpperCamelCase )
elif task == "s2s":
_a = 1876
_a = config.max_speech_positions
_a = SpeechTaForSpeechToSpeech(UpperCamelCase )
else:
raise ValueError(f'Unknown task name: {task}' )
if vocab_path:
_a = SpeechTaTokenizer(UpperCamelCase , model_max_length=config.max_text_positions )
# Mask token behaves like a normal word, i.e. include the space before it
_a = AddedToken('''<mask>''' , lstrip=UpperCamelCase , rstrip=UpperCamelCase )
_a = mask_token
tokenizer.add_special_tokens({'''mask_token''': mask_token} )
tokenizer.add_tokens(['''<ctc_blank>'''] )
_a = SpeechTaFeatureExtractor()
_a = SpeechTaProcessor(tokenizer=UpperCamelCase , feature_extractor=UpperCamelCase )
processor.save_pretrained(UpperCamelCase )
_a = torch.load(UpperCamelCase )
recursively_load_weights(fairseq_checkpoint['''model'''] , UpperCamelCase , UpperCamelCase )
model.save_pretrained(UpperCamelCase )
if repo_id:
print('''Pushing to the hub...''' )
processor.push_to_hub(UpperCamelCase )
model.push_to_hub(UpperCamelCase )
if __name__ == "__main__":
_snake_case : List[Any] = argparse.ArgumentParser()
parser.add_argument(
'--task',
default='s2t',
type=str,
help='Type of the SpeechT5 model you\'d like to convert. Should be one of \'s2t\', \'t2s\', \'s2s\'.',
)
parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--vocab_path', default=None, type=str, help='Path to SentencePiece model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
_snake_case : int = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
)
| 22 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPanoramaPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
@skip_mps
class A_ ( A__ , A__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = StableDiffusionPanoramaPipeline
SCREAMING_SNAKE_CASE_ = TEXT_TO_IMAGE_PARAMS
SCREAMING_SNAKE_CASE_ = TEXT_TO_IMAGE_BATCH_PARAMS
SCREAMING_SNAKE_CASE_ = TEXT_TO_IMAGE_IMAGE_PARAMS
SCREAMING_SNAKE_CASE_ = TEXT_TO_IMAGE_IMAGE_PARAMS
def UpperCAmelCase__ ( self :int ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCamelCase__ : int =UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
lowerCamelCase__ : Optional[int] =DDIMScheduler()
torch.manual_seed(0 )
lowerCamelCase__ : Any =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
lowerCamelCase__ : Union[str, Any] =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
lowerCamelCase__ : Union[str, Any] =CLIPTextModel(lowerCamelCase_ )
lowerCamelCase__ : Optional[int] =CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
lowerCamelCase__ : Optional[int] ={
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def UpperCAmelCase__ ( self :Dict , lowerCamelCase_ :List[str] , lowerCamelCase_ :List[str]=0 ):
"""simple docstring"""
lowerCamelCase__ : Optional[int] =torch.manual_seed(lowerCamelCase_ )
lowerCamelCase__ : int ={
'prompt': 'a photo of the dolomites',
'generator': generator,
# Setting height and width to None to prevent OOMs on CPU.
'height': None,
'width': None,
'num_inference_steps': 1,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def UpperCAmelCase__ ( self :int ):
"""simple docstring"""
lowerCamelCase__ : List[Any] ='cpu' # ensure determinism for the device-dependent torch.Generator
lowerCamelCase__ : Optional[Any] =self.get_dummy_components()
lowerCamelCase__ : int =StableDiffusionPanoramaPipeline(**lowerCamelCase_ )
lowerCamelCase__ : List[str] =sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCamelCase__ : Optional[int] =self.get_dummy_inputs(lowerCamelCase_ )
lowerCamelCase__ : Tuple =sd_pipe(**lowerCamelCase_ ).images
lowerCamelCase__ : Optional[Any] =image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCamelCase__ : List[str] =np.array([0.61_86, 0.53_74, 0.49_15, 0.41_35, 0.41_14, 0.45_63, 0.51_28, 0.49_77, 0.47_57] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCAmelCase__ ( self :Union[str, Any] ):
"""simple docstring"""
super().test_inference_batch_consistent(batch_sizes=[1, 2] )
def UpperCAmelCase__ ( self :str ):
"""simple docstring"""
super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.25e-3 )
def UpperCAmelCase__ ( self :Optional[int] ):
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] ='cpu' # ensure determinism for the device-dependent torch.Generator
lowerCamelCase__ : str =self.get_dummy_components()
lowerCamelCase__ : int =StableDiffusionPanoramaPipeline(**lowerCamelCase_ )
lowerCamelCase__ : int =sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] =self.get_dummy_inputs(lowerCamelCase_ )
lowerCamelCase__ : Optional[int] ='french fries'
lowerCamelCase__ : Optional[Any] =sd_pipe(**lowerCamelCase_ , negative_prompt=lowerCamelCase_ )
lowerCamelCase__ : Any =output.images
lowerCamelCase__ : Optional[Any] =image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCamelCase__ : List[str] =np.array([0.61_87, 0.53_75, 0.49_15, 0.41_36, 0.41_14, 0.45_63, 0.51_28, 0.49_76, 0.47_57] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCAmelCase__ ( self :Optional[Any] ):
"""simple docstring"""
lowerCamelCase__ : Optional[int] ='cpu' # ensure determinism for the device-dependent torch.Generator
lowerCamelCase__ : int =self.get_dummy_components()
lowerCamelCase__ : Any =StableDiffusionPanoramaPipeline(**lowerCamelCase_ )
lowerCamelCase__ : int =sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCamelCase__ : Any =self.get_dummy_inputs(lowerCamelCase_ )
lowerCamelCase__ : int =sd_pipe(**lowerCamelCase_ , view_batch_size=2 )
lowerCamelCase__ : Dict =output.images
lowerCamelCase__ : List[str] =image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCamelCase__ : List[str] =np.array([0.61_87, 0.53_75, 0.49_15, 0.41_36, 0.41_14, 0.45_63, 0.51_28, 0.49_76, 0.47_57] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCAmelCase__ ( self :Union[str, Any] ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] ='cpu' # ensure determinism for the device-dependent torch.Generator
lowerCamelCase__ : List[Any] =self.get_dummy_components()
lowerCamelCase__ : Any =EulerAncestralDiscreteScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' )
lowerCamelCase__ : Optional[Any] =StableDiffusionPanoramaPipeline(**lowerCamelCase_ )
lowerCamelCase__ : Tuple =sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCamelCase__ : Dict =self.get_dummy_inputs(lowerCamelCase_ )
lowerCamelCase__ : Optional[int] =sd_pipe(**lowerCamelCase_ ).images
lowerCamelCase__ : Any =image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCamelCase__ : Optional[int] =np.array([0.40_24, 0.65_10, 0.49_01, 0.53_78, 0.58_13, 0.56_22, 0.47_95, 0.44_67, 0.49_52] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCAmelCase__ ( self :List[Any] ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] ='cpu' # ensure determinism for the device-dependent torch.Generator
lowerCamelCase__ : List[Any] =self.get_dummy_components()
lowerCamelCase__ : Union[str, Any] =PNDMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , skip_prk_steps=lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] =StableDiffusionPanoramaPipeline(**lowerCamelCase_ )
lowerCamelCase__ : int =sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCamelCase__ : Dict =self.get_dummy_inputs(lowerCamelCase_ )
lowerCamelCase__ : Any =sd_pipe(**lowerCamelCase_ ).images
lowerCamelCase__ : List[Any] =image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCamelCase__ : str =np.array([0.63_91, 0.62_91, 0.48_61, 0.51_34, 0.55_52, 0.45_78, 0.50_32, 0.50_23, 0.45_39] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class A_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self :Tuple ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self :List[Any] , lowerCamelCase_ :Tuple=0 ):
"""simple docstring"""
lowerCamelCase__ : Any =torch.manual_seed(lowerCamelCase_ )
lowerCamelCase__ : Dict ={
'prompt': 'a photo of the dolomites',
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def UpperCAmelCase__ ( self :str ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] ='stabilityai/stable-diffusion-2-base'
lowerCamelCase__ : Tuple =DDIMScheduler.from_pretrained(lowerCamelCase_ , subfolder='scheduler' )
lowerCamelCase__ : Dict =StableDiffusionPanoramaPipeline.from_pretrained(lowerCamelCase_ , scheduler=lowerCamelCase_ , safety_checker=lowerCamelCase_ )
pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
pipe.enable_attention_slicing()
lowerCamelCase__ : List[Any] =self.get_inputs()
lowerCamelCase__ : List[Any] =pipe(**lowerCamelCase_ ).images
lowerCamelCase__ : int =image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2_048, 3)
lowerCamelCase__ : Dict =np.array(
[
0.36_96_83_92,
0.27_02_53_72,
0.32_44_67_66,
0.28_37_93_87,
0.36_36_32_74,
0.30_73_33_47,
0.27_10_00_27,
0.27_05_41_25,
0.25_53_60_96,
] )
assert np.abs(expected_slice - image_slice ).max() < 1e-2
def UpperCAmelCase__ ( self :Dict ):
"""simple docstring"""
lowerCamelCase__ : Tuple =StableDiffusionPanoramaPipeline.from_pretrained(
'stabilityai/stable-diffusion-2-base' , safety_checker=lowerCamelCase_ )
lowerCamelCase__ : Dict =LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
pipe.enable_attention_slicing()
lowerCamelCase__ : Optional[int] =self.get_inputs()
lowerCamelCase__ : Optional[Any] =pipe(**lowerCamelCase_ ).images
lowerCamelCase__ : Optional[Any] =image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2_048, 3)
lowerCamelCase__ : int =np.array(
[
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def UpperCAmelCase__ ( self :List[str] ):
"""simple docstring"""
lowerCamelCase__ : str =0
def callback_fn(lowerCamelCase_ :int , lowerCamelCase_ :int , lowerCamelCase_ :torch.FloatTensor ) -> None:
lowerCamelCase__ : int =True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
lowerCamelCase__ : Any =latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
lowerCamelCase__ : Optional[Any] =latents[0, -3:, -3:, -1]
lowerCamelCase__ : Union[str, Any] =np.array(
[
0.18_68_18_69,
0.33_90_78_16,
0.5_36_12_76,
0.14_43_28_65,
-0.02_85_66_11,
-0.73_94_11_23,
0.23_39_79_87,
0.47_32_26_82,
-0.37_82_31_64,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
lowerCamelCase__ : Union[str, Any] =latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
lowerCamelCase__ : int =latents[0, -3:, -3:, -1]
lowerCamelCase__ : List[Any] =np.array(
[
0.18_53_96_45,
0.33_98_72_48,
0.5_37_85_59,
0.14_43_71_42,
-0.02_45_52_61,
-0.7_33_83_17,
0.23_99_07_55,
0.47_35_62_72,
-0.3_78_65_05,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
lowerCamelCase__ : Optional[int] =False
lowerCamelCase__ : Optional[int] ='stabilityai/stable-diffusion-2-base'
lowerCamelCase__ : Union[str, Any] =DDIMScheduler.from_pretrained(lowerCamelCase_ , subfolder='scheduler' )
lowerCamelCase__ : int =StableDiffusionPanoramaPipeline.from_pretrained(lowerCamelCase_ , scheduler=lowerCamelCase_ , safety_checker=lowerCamelCase_ )
lowerCamelCase__ : Any =pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
pipe.enable_attention_slicing()
lowerCamelCase__ : Optional[int] =self.get_inputs()
pipe(**lowerCamelCase_ , callback=lowerCamelCase_ , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def UpperCAmelCase__ ( self :Dict ):
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowerCamelCase__ : Dict ='stabilityai/stable-diffusion-2-base'
lowerCamelCase__ : Optional[int] =DDIMScheduler.from_pretrained(lowerCamelCase_ , subfolder='scheduler' )
lowerCamelCase__ : str =StableDiffusionPanoramaPipeline.from_pretrained(lowerCamelCase_ , scheduler=lowerCamelCase_ , safety_checker=lowerCamelCase_ )
lowerCamelCase__ : Dict =pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
lowerCamelCase__ : Any =self.get_inputs()
lowerCamelCase__ : Optional[Any] =pipe(**lowerCamelCase_ )
lowerCamelCase__ : int =torch.cuda.max_memory_allocated()
# make sure that less than 5.2 GB is allocated
assert mem_bytes < 5.5 * 10**9 | 174 | 0 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
lowerCAmelCase_ = random.Random()
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase=1.0 , UpperCamelCase=None , UpperCamelCase=None ) -> Dict:
if rng is None:
lowerCAmelCase__ : Dict = global_rng
lowerCAmelCase__ : Optional[Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class _lowerCAmelCase ( unittest.TestCase ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=7 , __UpperCAmelCase=400 , __UpperCAmelCase=2000 , __UpperCAmelCase=2048 , __UpperCAmelCase=128 , __UpperCAmelCase=1 , __UpperCAmelCase=512 , __UpperCAmelCase=30 , __UpperCAmelCase=4_4100 , ):
lowerCAmelCase__ : Optional[int] = parent
lowerCAmelCase__ : Dict = batch_size
lowerCAmelCase__ : Dict = min_seq_length
lowerCAmelCase__ : Optional[int] = max_seq_length
lowerCAmelCase__ : List[str] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowerCAmelCase__ : Dict = spectrogram_length
lowerCAmelCase__ : Dict = feature_size
lowerCAmelCase__ : str = num_audio_channels
lowerCAmelCase__ : Optional[int] = hop_length
lowerCAmelCase__ : Optional[int] = chunk_length
lowerCAmelCase__ : Optional[Any] = sampling_rate
def __magic_name__( self ):
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def __magic_name__( self , __UpperCAmelCase=False , __UpperCAmelCase=False ):
def _flatten(__UpperCAmelCase ):
return list(itertools.chain(*__UpperCAmelCase ) )
if equal_length:
lowerCAmelCase__ : Optional[Any] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
lowerCAmelCase__ : Optional[int] = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowerCAmelCase__ : Optional[Any] = [np.asarray(__UpperCAmelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class _lowerCAmelCase ( _lowercase , unittest.TestCase ):
A__ = TvltFeatureExtractor
def __magic_name__( self ):
lowerCAmelCase__ : str = TvltFeatureExtractionTester(self )
def __magic_name__( self ):
lowerCAmelCase__ : List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(__UpperCAmelCase , '''spectrogram_length''' ) )
self.assertTrue(hasattr(__UpperCAmelCase , '''feature_size''' ) )
self.assertTrue(hasattr(__UpperCAmelCase , '''num_audio_channels''' ) )
self.assertTrue(hasattr(__UpperCAmelCase , '''hop_length''' ) )
self.assertTrue(hasattr(__UpperCAmelCase , '''chunk_length''' ) )
self.assertTrue(hasattr(__UpperCAmelCase , '''sampling_rate''' ) )
def __magic_name__( self ):
lowerCAmelCase__ : str = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ : int = feat_extract_first.save_pretrained(__UpperCAmelCase )[0]
check_json_file_has_correct_format(__UpperCAmelCase )
lowerCAmelCase__ : Tuple = self.feature_extraction_class.from_pretrained(__UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = feat_extract_first.to_dict()
lowerCAmelCase__ : int = feat_extract_second.to_dict()
lowerCAmelCase__ : int = dict_first.pop('''mel_filters''' )
lowerCAmelCase__ : List[str] = dict_second.pop('''mel_filters''' )
self.assertTrue(np.allclose(__UpperCAmelCase , __UpperCAmelCase ) )
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
def __magic_name__( self ):
lowerCAmelCase__ : int = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ : Tuple = os.path.join(__UpperCAmelCase , '''feat_extract.json''' )
feat_extract_first.to_json_file(__UpperCAmelCase )
lowerCAmelCase__ : List[str] = self.feature_extraction_class.from_json_file(__UpperCAmelCase )
lowerCAmelCase__ : int = feat_extract_first.to_dict()
lowerCAmelCase__ : Tuple = feat_extract_second.to_dict()
lowerCAmelCase__ : Optional[Any] = dict_first.pop('''mel_filters''' )
lowerCAmelCase__ : Optional[int] = dict_second.pop('''mel_filters''' )
self.assertTrue(np.allclose(__UpperCAmelCase , __UpperCAmelCase ) )
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
def __magic_name__( self ):
# Initialize feature_extractor
lowerCAmelCase__ : List[str] = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
lowerCAmelCase__ : Any = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowerCAmelCase__ : str = [np.asarray(__UpperCAmelCase ) for speech_input in speech_inputs]
# Test not batched input
lowerCAmelCase__ : int = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
lowerCAmelCase__ : Any = feature_extractor(__UpperCAmelCase , return_tensors='''np''' , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
lowerCAmelCase__ : int = feature_extractor(
__UpperCAmelCase , return_tensors='''np''' , sampling_rate=4_4100 , mask_audio=__UpperCAmelCase ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
lowerCAmelCase__ : Union[str, Any] = [floats_list((1, x) )[0] for x in (800, 800, 800)]
lowerCAmelCase__ : List[Any] = np.asarray(__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = feature_extractor(__UpperCAmelCase , return_tensors='''np''' , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def __magic_name__( self , __UpperCAmelCase ):
lowerCAmelCase__ : int = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
lowerCAmelCase__ : str = ds.sort('''id''' ).select(range(__UpperCAmelCase ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def __magic_name__( self ):
lowerCAmelCase__ : int = self._load_datasamples(1 )
lowerCAmelCase__ : Optional[int] = TvltFeatureExtractor()
lowerCAmelCase__ : Tuple = feature_extractor(__UpperCAmelCase , return_tensors='''pt''' ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 192, 128) )
lowerCAmelCase__ : List[Any] = torch.tensor([[-0.3032, -0.2708], [-0.4434, -0.4007]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , __UpperCAmelCase , atol=1e-4 ) )
| 470 |
import os
from datetime import datetime as dt
from github import Github
lowerCAmelCase_ = [
"""good first issue""",
"""good second issue""",
"""good difficult issue""",
"""enhancement""",
"""new pipeline/model""",
"""new scheduler""",
"""wip""",
]
def __lowerCAmelCase ( ) -> List[Any]:
lowerCAmelCase__ : str = Github(os.environ['''GITHUB_TOKEN'''] )
lowerCAmelCase__ : str = g.get_repo('''huggingface/diffusers''' )
lowerCAmelCase__ : List[str] = repo.get_issues(state='''open''' )
for issue in open_issues:
lowerCAmelCase__ : Optional[Any] = sorted(issue.get_comments() , key=lambda UpperCamelCase : i.created_at , reverse=UpperCamelCase )
lowerCAmelCase__ : int = comments[0] if len(UpperCamelCase ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state='''closed''' )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state='''open''' )
issue.remove_from_labels('''stale''' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
issue.add_to_labels('''stale''' )
if __name__ == "__main__":
main()
| 470 | 1 |
"""simple docstring"""
import unittest
from transformers import RoFormerTokenizer, RoFormerTokenizerFast
from transformers.testing_utils import require_rjieba, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_rjieba
@require_tokenizers
class _SCREAMING_SNAKE_CASE ( UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[Any] = RoFormerTokenizer
SCREAMING_SNAKE_CASE_: Dict = RoFormerTokenizerFast
SCREAMING_SNAKE_CASE_: Tuple = True
SCREAMING_SNAKE_CASE_: Dict = True
def __lowerCamelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
super().setUp()
def __lowerCamelCase ( self : Tuple , **UpperCAmelCase_ : Any ) -> List[str]:
"""simple docstring"""
return self.tokenizer_class.from_pretrained('junnyu/roformer_chinese_base' , **UpperCAmelCase_ )
def __lowerCamelCase ( self : Optional[Any] , **UpperCAmelCase_ : Optional[int] ) -> Tuple:
"""simple docstring"""
return self.rust_tokenizer_class.from_pretrained('junnyu/roformer_chinese_base' , **UpperCAmelCase_ )
def __lowerCamelCase ( self : List[str] ) -> int:
"""simple docstring"""
_lowerCAmelCase = "永和服装饰品有限公司,今天天气非常好"
_lowerCAmelCase = "永和 服装 饰品 有限公司 , 今 天 天 气 非常 好"
return input_text, output_text
def __lowerCamelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
_lowerCAmelCase = self.get_tokenizer()
_lowerCAmelCase = self.get_chinese_input_output_texts()
_lowerCAmelCase = tokenizer.tokenize(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , output_text.split() )
_lowerCAmelCase = tokens + [tokenizer.unk_token]
_lowerCAmelCase = [22_943, 21_332, 34_431, 45_904, 117, 306, 1_231, 1_231, 2_653, 33_994, 1_266, 100]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , UpperCAmelCase_ )
def __lowerCamelCase ( self : int ) -> int:
"""simple docstring"""
_lowerCAmelCase = self.get_rust_tokenizer()
_lowerCAmelCase = self.get_chinese_input_output_texts()
_lowerCAmelCase = tokenizer.tokenize(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , output_text.split() )
_lowerCAmelCase = tokens + [tokenizer.unk_token]
_lowerCAmelCase = [22_943, 21_332, 34_431, 45_904, 117, 306, 1_231, 1_231, 2_653, 33_994, 1_266, 100]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , UpperCAmelCase_ )
def __lowerCamelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
pass
def __lowerCamelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
pass
def __lowerCamelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
pass
| 580 |
"""simple docstring"""
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
_UpperCamelCase = get_tests_dir("""fixtures""")
_UpperCamelCase = get_tests_dir("""fixtures/dummy_feature_extractor_config.json""")
_UpperCamelCase = get_tests_dir("""fixtures/dummy-config.json""")
class __a ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = 0
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = AutoFeatureExtractor.from_pretrained("facebook/wav2vec2-base-960h" )
self.assertIsInstance(snake_case , snake_case )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : Tuple = AutoFeatureExtractor.from_pretrained(snake_case )
self.assertIsInstance(snake_case , snake_case )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ : int = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
lowerCAmelCase__ : Union[str, Any] = AutoFeatureExtractor.from_pretrained(snake_case ).to_dict()
config_dict.pop("feature_extractor_type" )
lowerCAmelCase__ : Any = WavaVecaFeatureExtractor(**snake_case )
# save in new folder
model_config.save_pretrained(snake_case )
config.save_pretrained(snake_case )
lowerCAmelCase__ : List[str] = AutoFeatureExtractor.from_pretrained(snake_case )
# make sure private variable is not incorrectly saved
lowerCAmelCase__ : Optional[int] = json.loads(config.to_json_string() )
self.assertTrue("_processor_class" not in dict_as_saved )
self.assertIsInstance(snake_case , snake_case )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : List[str] = AutoFeatureExtractor.from_pretrained(snake_case )
self.assertIsInstance(snake_case , snake_case )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
with self.assertRaisesRegex(
snake_case , "bert-base is not a local folder and is not a valid model identifier" ):
lowerCAmelCase__ : Optional[int] = AutoFeatureExtractor.from_pretrained("bert-base" )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
with self.assertRaisesRegex(
snake_case , R"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
lowerCAmelCase__ : Optional[Any] = AutoFeatureExtractor.from_pretrained(snake_case , revision="aaaaaa" )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
with self.assertRaisesRegex(
snake_case , "hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json." , ):
lowerCAmelCase__ : Dict = AutoFeatureExtractor.from_pretrained("hf-internal-testing/config-no-model" )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
with self.assertRaises(snake_case ):
lowerCAmelCase__ : Tuple = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(snake_case ):
lowerCAmelCase__ : str = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=snake_case )
lowerCAmelCase__ : Optional[int] = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=snake_case )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(snake_case )
lowerCAmelCase__ : Optional[Any] = AutoFeatureExtractor.from_pretrained(snake_case , trust_remote_code=snake_case )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
try:
AutoConfig.register("custom" , snake_case )
AutoFeatureExtractor.register(snake_case , snake_case )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(snake_case ):
AutoFeatureExtractor.register(snake_case , snake_case )
# Now that the config is registered, it can be used as any other config with the auto-API
lowerCAmelCase__ : List[Any] = CustomFeatureExtractor.from_pretrained(snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(snake_case )
lowerCAmelCase__ : Any = AutoFeatureExtractor.from_pretrained(snake_case )
self.assertIsInstance(snake_case , snake_case )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
class __a ( __magic_name__ ):
"""simple docstring"""
__UpperCamelCase : Optional[int] = True
try:
AutoConfig.register("custom" , snake_case )
AutoFeatureExtractor.register(snake_case , snake_case )
# If remote code is not set, the default is to use local
lowerCAmelCase__ : List[Any] = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
lowerCAmelCase__ : Dict = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=snake_case )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
lowerCAmelCase__ : Tuple = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=snake_case )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
self.assertTrue(not hasattr(snake_case , "is_local" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 453 | 0 |
"""simple docstring"""
import os
import sys
import unittest
_lowercase : int = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
_lowercase : Any = os.path.join(git_repo_path, 'src', 'transformers')
_lowercase : Tuple = '\n{0} = None\n'
_lowercase : Union[str, Any] = '\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n'
_lowercase : str = '\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n'
class _UpperCAmelCase ( unittest.TestCase ):
def a ( self : Dict ):
__UpperCAmelCase = find_backend(''' _import_structure["models.albert"].append("AlbertTokenizerFast")''' )
self.assertIsNone(_lowercase )
__UpperCAmelCase = find_backend(''' if not is_tokenizers_available():''' )
self.assertEqual(_lowercase , '''tokenizers''' )
__UpperCAmelCase = find_backend(''' if not is_tensorflow_text_available():''' )
self.assertEqual(_lowercase , '''tensorflow_text''' )
__UpperCAmelCase = find_backend(''' if not (is_sentencepiece_available() and is_tokenizers_available()):''' )
self.assertEqual(_lowercase , '''sentencepiece_and_tokenizers''' )
__UpperCAmelCase = find_backend(
''' if not (is_sentencepiece_available() and is_tensorflow_text_available()):''' )
self.assertEqual(_lowercase , '''sentencepiece_and_tensorflow_text''' )
__UpperCAmelCase = find_backend(
''' if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):''' )
self.assertEqual(_lowercase , '''sentencepiece_and_tokenizers_and_vision''' )
def a ( self : Tuple ):
__UpperCAmelCase = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('''torch''' , _lowercase )
self.assertIn('''tensorflow_text''' , _lowercase )
self.assertIn('''sentencepiece_and_tokenizers''' , _lowercase )
# Likewise, we can't assert on the exact content of a key
self.assertIn('''BertModel''' , objects['''torch'''] )
self.assertIn('''TFBertModel''' , objects['''tf'''] )
self.assertIn('''FlaxBertModel''' , objects['''flax'''] )
self.assertIn('''BertModel''' , objects['''torch'''] )
self.assertIn('''TFBertTokenizer''' , objects['''tensorflow_text'''] )
self.assertIn('''convert_slow_tokenizer''' , objects['''sentencepiece_and_tokenizers'''] )
def a ( self : Dict ):
__UpperCAmelCase = create_dummy_object('''CONSTANT''' , '''\'torch\'''' )
self.assertEqual(_lowercase , '''\nCONSTANT = None\n''' )
__UpperCAmelCase = create_dummy_object('''function''' , '''\'torch\'''' )
self.assertEqual(
_lowercase , '''\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n''' )
__UpperCAmelCase = '''\nclass FakeClass(metaclass=DummyObject):\n _backends = \'torch\'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, \'torch\')\n'''
__UpperCAmelCase = create_dummy_object('''FakeClass''' , '''\'torch\'''' )
self.assertEqual(_lowercase , _lowercase )
def a ( self : Union[str, Any] ):
__UpperCAmelCase = '''# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, ["torch"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = ["torch"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, ["torch"])\n'''
__UpperCAmelCase = create_dummy_files({'''torch''': ['''CONSTANT''', '''function''', '''FakeClass''']} )
self.assertEqual(dummy_files['''torch'''] , _lowercase )
| 716 |
"""simple docstring"""
def lowercase__ ( snake_case_ :int = 600_851_475_143 ):
try:
__UpperCAmelCase = int(snake_case_ )
except (TypeError, ValueError):
raise TypeError('''Parameter n must be int or castable to int.''' )
if n <= 0:
raise ValueError('''Parameter n must be greater than or equal to one.''' )
__UpperCAmelCase = 2
__UpperCAmelCase = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
__UpperCAmelCase = i
while n % i == 0:
__UpperCAmelCase = n // i
i += 1
return int(snake_case_ )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 397 | 0 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import KarrasVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class A__ ( A__ ):
A__ = 42
A__ = 42
def __init__( self : List[Any] , _a : UNetaDModel , _a : KarrasVeScheduler ) -> Optional[int]:
'''simple docstring'''
super().__init__()
self.register_modules(unet=_a , scheduler=_a )
@torch.no_grad()
def __call__( self : Any , _a : int = 1 , _a : int = 50 , _a : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _a : Optional[str] = "pil" , _a : bool = True , **_a : str , ) -> Union[Tuple, ImagePipelineOutput]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.unet.config.sample_size
_SCREAMING_SNAKE_CASE =(batch_size, 3, img_size, img_size)
_SCREAMING_SNAKE_CASE =self.unet
# sample x_0 ~ N(0, sigma_0^2 * I)
_SCREAMING_SNAKE_CASE =randn_tensor(_a , generator=_a , device=self.device ) * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(_a )
for t in self.progress_bar(self.scheduler.timesteps ):
# here sigma_t == t_i from the paper
_SCREAMING_SNAKE_CASE =self.scheduler.schedule[t]
_SCREAMING_SNAKE_CASE =self.scheduler.schedule[t - 1] if t > 0 else 0
# 1. Select temporarily increased noise level sigma_hat
# 2. Add new noise to move from sample_i to sample_hat
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.scheduler.add_noise_to_input(_a , _a , generator=_a )
# 3. Predict the noise residual given the noise magnitude `sigma_hat`
# The model inputs and output are adjusted by following eq. (213) in [1].
_SCREAMING_SNAKE_CASE =(sigma_hat / 2) * model((sample_hat + 1) / 2 , sigma_hat / 2 ).sample
# 4. Evaluate dx/dt at sigma_hat
# 5. Take Euler step from sigma to sigma_prev
_SCREAMING_SNAKE_CASE =self.scheduler.step(_a , _a , _a , _a )
if sigma_prev != 0:
# 6. Apply 2nd order correction
# The model inputs and output are adjusted by following eq. (213) in [1].
_SCREAMING_SNAKE_CASE =(sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 , sigma_prev / 2 ).sample
_SCREAMING_SNAKE_CASE =self.scheduler.step_correct(
_a , _a , _a , _a , step_output.prev_sample , step_output['derivative'] , )
_SCREAMING_SNAKE_CASE =step_output.prev_sample
_SCREAMING_SNAKE_CASE =(sample / 2 + 0.5).clamp(0 , 1 )
_SCREAMING_SNAKE_CASE =sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_SCREAMING_SNAKE_CASE =self.numpy_to_pil(_a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_a )
| 405 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class A__ ( metaclass=A__ ):
A__ = ['torch', 'transformers', 'onnx']
def __init__( self : Any , *_a : Union[str, Any] , **_a : Optional[Any] ) -> int:
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def A ( cls : str , *_a : Optional[int] , **_a : List[Any] ) -> int:
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def A ( cls : Optional[Any] , *_a : Dict , **_a : Optional[Any] ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class A__ ( metaclass=A__ ):
A__ = ['torch', 'transformers', 'onnx']
def __init__( self : List[Any] , *_a : Optional[int] , **_a : str ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def A ( cls : Union[str, Any] , *_a : List[str] , **_a : int ) -> Dict:
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def A ( cls : Optional[int] , *_a : List[str] , **_a : Union[str, Any] ) -> int:
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class A__ ( metaclass=A__ ):
A__ = ['torch', 'transformers', 'onnx']
def __init__( self : str , *_a : int , **_a : List[str] ) -> Any:
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def A ( cls : Any , *_a : Optional[int] , **_a : int ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def A ( cls : List[str] , *_a : Dict , **_a : List[Any] ) -> str:
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class A__ ( metaclass=A__ ):
A__ = ['torch', 'transformers', 'onnx']
def __init__( self : Optional[int] , *_a : Optional[Any] , **_a : str ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def A ( cls : List[Any] , *_a : Any , **_a : Tuple ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def A ( cls : str , *_a : Dict , **_a : Optional[Any] ) -> int:
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class A__ ( metaclass=A__ ):
A__ = ['torch', 'transformers', 'onnx']
def __init__( self : Dict , *_a : Union[str, Any] , **_a : int ) -> Tuple:
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def A ( cls : List[Any] , *_a : Union[str, Any] , **_a : Tuple ) -> Dict:
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def A ( cls : Union[str, Any] , *_a : Dict , **_a : List[str] ) -> Any:
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class A__ ( metaclass=A__ ):
A__ = ['torch', 'transformers', 'onnx']
def __init__( self : Optional[Any] , *_a : Union[str, Any] , **_a : Optional[Any] ) -> Tuple:
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def A ( cls : Optional[Any] , *_a : Optional[Any] , **_a : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def A ( cls : List[Any] , *_a : Optional[int] , **_a : Tuple ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
| 405 | 1 |
'''simple docstring'''
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
SCREAMING_SNAKE_CASE__ : Optional[int] = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class __lowerCAmelCase( nn.Module ):
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE_ :Tuple = torchvision.models.resnetaaa(pretrained=SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :List[Any] = list(model.children() )[:-2]
SCREAMING_SNAKE_CASE_ :Union[str, Any] = nn.Sequential(*SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :List[Any] = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] )
def _lowercase ( self : str , SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :Any = self.pool(self.model(SCREAMING_SNAKE_CASE ) )
SCREAMING_SNAKE_CASE_ :List[str] = torch.flatten(SCREAMING_SNAKE_CASE , start_dim=2 )
SCREAMING_SNAKE_CASE_ :str = out.transpose(1 , 2 ).contiguous()
return out # BxNx2048
class __lowerCAmelCase( lowerCAmelCase__ ):
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :Tuple = [json.loads(SCREAMING_SNAKE_CASE ) for l in open(SCREAMING_SNAKE_CASE )]
SCREAMING_SNAKE_CASE_ :Tuple = os.path.dirname(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :str = tokenizer
SCREAMING_SNAKE_CASE_ :Any = labels
SCREAMING_SNAKE_CASE_ :Tuple = len(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :Any = max_seq_length
SCREAMING_SNAKE_CASE_ :str = transforms
def __len__( self : Union[str, Any] ):
"""simple docstring"""
return len(self.data )
def __getitem__( self : Any , SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :List[Any] = torch.LongTensor(self.tokenizer.encode(self.data[index]['text'] , add_special_tokens=SCREAMING_SNAKE_CASE ) )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ :int = sentence[0], sentence[1:-1], sentence[-1]
SCREAMING_SNAKE_CASE_ :str = sentence[: self.max_seq_length]
SCREAMING_SNAKE_CASE_ :Optional[int] = torch.zeros(self.n_classes )
SCREAMING_SNAKE_CASE_ :List[str] = 1
SCREAMING_SNAKE_CASE_ :Dict = Image.open(os.path.join(self.data_dir , self.data[index]['img'] ) ).convert('RGB' )
SCREAMING_SNAKE_CASE_ :Union[str, Any] = self.transforms(SCREAMING_SNAKE_CASE )
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def _lowercase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :Optional[int] = Counter()
for row in self.data:
label_freqs.update(row['label'] )
return label_freqs
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ :Optional[int] = [len(row['sentence'] ) for row in batch]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ :Optional[Any] = len(SCREAMING_SNAKE_CASE ), max(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :Optional[Any] = torch.zeros(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , dtype=torch.long )
SCREAMING_SNAKE_CASE_ :Any = torch.zeros(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , dtype=torch.long )
for i_batch, (input_row, length) in enumerate(zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ):
SCREAMING_SNAKE_CASE_ :str = input_row['sentence']
SCREAMING_SNAKE_CASE_ :Optional[int] = 1
SCREAMING_SNAKE_CASE_ :int = torch.stack([row['image'] for row in batch] )
SCREAMING_SNAKE_CASE_ :List[str] = torch.stack([row['label'] for row in batch] )
SCREAMING_SNAKE_CASE_ :List[Any] = torch.stack([row['image_start_token'] for row in batch] )
SCREAMING_SNAKE_CASE_ :Tuple = torch.stack([row['image_end_token'] for row in batch] )
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def SCREAMING_SNAKE_CASE__ ( ):
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def SCREAMING_SNAKE_CASE__ ( ):
return transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.4_6_7_7_7_0_4_4, 0.4_4_5_3_1_4_2_9, 0.4_0_6_6_1_0_1_7] , std=[0.1_2_2_2_1_9_9_4, 0.1_2_1_4_5_8_3_5, 0.1_4_3_8_0_4_6_9] , ),
] )
| 233 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ : int = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Any = {
"""xlm-roberta-base""": """https://huggingface.co/xlm-roberta-base/resolve/main/config.json""",
"""xlm-roberta-large""": """https://huggingface.co/xlm-roberta-large/resolve/main/config.json""",
"""xlm-roberta-large-finetuned-conll02-dutch""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json"""
),
"""xlm-roberta-large-finetuned-conll02-spanish""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json"""
),
"""xlm-roberta-large-finetuned-conll03-english""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json"""
),
"""xlm-roberta-large-finetuned-conll03-german""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json"""
),
}
class __lowerCAmelCase( lowerCAmelCase__ ):
__snake_case : List[str] = 'xlm-roberta'
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE : List[str]=30_522 , SCREAMING_SNAKE_CASE : List[str]=768 , SCREAMING_SNAKE_CASE : List[str]=12 , SCREAMING_SNAKE_CASE : str=12 , SCREAMING_SNAKE_CASE : Optional[Any]=3_072 , SCREAMING_SNAKE_CASE : int="gelu" , SCREAMING_SNAKE_CASE : str=0.1 , SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE : Union[str, Any]=512 , SCREAMING_SNAKE_CASE : Union[str, Any]=2 , SCREAMING_SNAKE_CASE : str=0.02 , SCREAMING_SNAKE_CASE : List[str]=1E-12 , SCREAMING_SNAKE_CASE : List[str]=1 , SCREAMING_SNAKE_CASE : Any=0 , SCREAMING_SNAKE_CASE : Optional[int]=2 , SCREAMING_SNAKE_CASE : Union[str, Any]="absolute" , SCREAMING_SNAKE_CASE : Any=True , SCREAMING_SNAKE_CASE : Dict=None , **SCREAMING_SNAKE_CASE : int , ):
"""simple docstring"""
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE , bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :Optional[Any] = vocab_size
SCREAMING_SNAKE_CASE_ :int = hidden_size
SCREAMING_SNAKE_CASE_ :Tuple = num_hidden_layers
SCREAMING_SNAKE_CASE_ :Tuple = num_attention_heads
SCREAMING_SNAKE_CASE_ :List[Any] = hidden_act
SCREAMING_SNAKE_CASE_ :Optional[Any] = intermediate_size
SCREAMING_SNAKE_CASE_ :Tuple = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ :Optional[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ :List[str] = max_position_embeddings
SCREAMING_SNAKE_CASE_ :Union[str, Any] = type_vocab_size
SCREAMING_SNAKE_CASE_ :Any = initializer_range
SCREAMING_SNAKE_CASE_ :List[Any] = layer_norm_eps
SCREAMING_SNAKE_CASE_ :Any = position_embedding_type
SCREAMING_SNAKE_CASE_ :List[Any] = use_cache
SCREAMING_SNAKE_CASE_ :int = classifier_dropout
class __lowerCAmelCase( lowerCAmelCase__ ):
@property
def _lowercase ( self : Optional[int] ):
"""simple docstring"""
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE_ :str = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
SCREAMING_SNAKE_CASE_ :Optional[int] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 233 | 1 |
__UpperCAmelCase : int = 0 # The first color of the flag.
__UpperCAmelCase : List[str] = 1 # The second color of the flag.
__UpperCAmelCase : Union[str, Any] = 2 # The third color of the flag.
__UpperCAmelCase : Optional[Any] = (red, white, blue)
def lowerCamelCase_ ( UpperCamelCase_ ):
if not sequence:
return []
if len(UpperCamelCase_ ) == 1:
return list(UpperCamelCase_ )
_a : Tuple = 0
_a : int = len(UpperCamelCase_ ) - 1
_a : Tuple = 0
while mid <= high:
if sequence[mid] == colors[0]:
_a , _a : str = sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
_a , _a : Any = sequence[high], sequence[mid]
high -= 1
else:
_a : Tuple = f"""The elements inside the sequence must contains only {colors} values"""
raise ValueError(UpperCamelCase_ )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCAmelCase : List[str] = input('Enter numbers separated by commas:\n').strip()
__UpperCAmelCase : Optional[int] = [int(item.strip()) for item in user_input.split(',')]
print(f'''{dutch_national_flag_sort(unsorted)}''')
| 471 |
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class lowerCamelCase ( unittest.TestCase ):
def snake_case_ ( self : Tuple , __snake_case : Tuple , __snake_case : List[Any] ) -> Union[str, Any]:
return f"""gaussian_noise_s={seed}_shape={"_".join([str(__snake_case ) for s in shape] )}.npy"""
def snake_case_ ( self : Union[str, Any] ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def snake_case_ ( self : Any , __snake_case : Optional[int]=0 , __snake_case : Dict=(4, 4, 64, 64) , __snake_case : Optional[int]=False ) -> str:
_a : str = jnp.bfloataa if fpaa else jnp.floataa
_a : Any = jnp.array(load_hf_numpy(self.get_file_format(__snake_case , __snake_case ) ) , dtype=__snake_case )
return image
def snake_case_ ( self : int , __snake_case : Union[str, Any]=False , __snake_case : List[str]="CompVis/stable-diffusion-v1-4" ) -> Tuple:
_a : Optional[int] = jnp.bfloataa if fpaa else jnp.floataa
_a : Any = '''bf16''' if fpaa else None
_a , _a : List[str] = FlaxUNetaDConditionModel.from_pretrained(
__snake_case , subfolder='''unet''' , dtype=__snake_case , revision=__snake_case )
return model, params
def snake_case_ ( self : List[str] , __snake_case : Optional[Any]=0 , __snake_case : str=(4, 77, 768) , __snake_case : Tuple=False ) -> Union[str, Any]:
_a : Optional[Any] = jnp.bfloataa if fpaa else jnp.floataa
_a : List[Any] = jnp.array(load_hf_numpy(self.get_file_format(__snake_case , __snake_case ) ) , dtype=__snake_case )
return hidden_states
@parameterized.expand(
[
# fmt: off
[83, 4, [-0.2_323, -0.1_304, 0.0_813, -0.3_093, -0.0_919, -0.1_571, -0.1_125, -0.5_806]],
[17, 0.55, [-0.0_831, -0.2_443, 0.0_901, -0.0_919, 0.3_396, 0.0_103, -0.3_743, 0.0_701]],
[8, 0.89, [-0.4_863, 0.0_859, 0.0_875, -0.1_658, 0.9_199, -0.0_114, 0.4_839, 0.4_639]],
[3, 1000, [-0.5_649, 0.2_402, -0.5_518, 0.1_248, 1.1_328, -0.2_443, -0.0_325, -1.0_078]],
# fmt: on
] )
def snake_case_ ( self : Any , __snake_case : Union[str, Any] , __snake_case : Dict , __snake_case : Dict ) -> int:
_a , _a : Optional[int] = self.get_unet_model(model_id='''CompVis/stable-diffusion-v1-4''' , fpaa=__snake_case )
_a : Optional[Any] = self.get_latents(__snake_case , fpaa=__snake_case )
_a : Any = self.get_encoder_hidden_states(__snake_case , fpaa=__snake_case )
_a : int = model.apply(
{'''params''': params} , __snake_case , jnp.array(__snake_case , dtype=jnp.intaa ) , encoder_hidden_states=__snake_case , ).sample
assert sample.shape == latents.shape
_a : str = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
_a : int = jnp.array(__snake_case , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(__snake_case , __snake_case , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[83, 4, [0.1_514, 0.0_807, 0.1_624, 0.1_016, -0.1_896, 0.0_263, 0.0_677, 0.2_310]],
[17, 0.55, [0.1_164, -0.0_216, 0.0_170, 0.1_589, -0.3_120, 0.1_005, -0.0_581, -0.1_458]],
[8, 0.89, [-0.1_758, -0.0_169, 0.1_004, -0.1_411, 0.1_312, 0.1_103, -0.1_996, 0.2_139]],
[3, 1000, [0.1_214, 0.0_352, -0.0_731, -0.1_562, -0.0_994, -0.0_906, -0.2_340, -0.0_539]],
# fmt: on
] )
def snake_case_ ( self : Any , __snake_case : Tuple , __snake_case : Any , __snake_case : List[str] ) -> Any:
_a , _a : Optional[int] = self.get_unet_model(model_id='''stabilityai/stable-diffusion-2''' , fpaa=__snake_case )
_a : Union[str, Any] = self.get_latents(__snake_case , shape=(4, 4, 96, 96) , fpaa=__snake_case )
_a : Optional[Any] = self.get_encoder_hidden_states(__snake_case , shape=(4, 77, 1024) , fpaa=__snake_case )
_a : List[str] = model.apply(
{'''params''': params} , __snake_case , jnp.array(__snake_case , dtype=jnp.intaa ) , encoder_hidden_states=__snake_case , ).sample
assert sample.shape == latents.shape
_a : Any = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
_a : Any = jnp.array(__snake_case , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(__snake_case , __snake_case , atol=1E-2 )
| 471 | 1 |
'''simple docstring'''
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE : Dict = get_tests_dir("fixtures/test_sentencepiece_no_bos.model")
@require_sentencepiece
@require_tokenizers
class snake_case ( lowercase_, unittest.TestCase ):
"""simple docstring"""
_a = PegasusTokenizer
_a = PegasusTokenizerFast
_a = True
_a = True
def a__ ( self ) -> Any:
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE_ = PegasusTokenizer(_lowercase )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def a__ ( self ) -> Optional[Any]:
return PegasusTokenizer.from_pretrained('google/pegasus-large' )
def a__ ( self, **_lowercase ) -> List[Any]:
return PegasusTokenizer.from_pretrained(self.tmpdirname, **_lowercase )
def a__ ( self, _lowercase ) -> List[Any]:
return ("This is a test", "This is a test")
def a__ ( self ) -> str:
SCREAMING_SNAKE_CASE_ = '</s>'
SCREAMING_SNAKE_CASE_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowercase ), _lowercase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowercase ), _lowercase )
def a__ ( self ) -> Dict:
SCREAMING_SNAKE_CASE_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0], '<pad>' )
self.assertEqual(vocab_keys[1], '</s>' )
self.assertEqual(vocab_keys[-1], 'v' )
self.assertEqual(len(_lowercase ), 1103 )
def a__ ( self ) -> Any:
self.assertEqual(self.get_tokenizer().vocab_size, 1103 )
def a__ ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE_ = self.tokenizer_class.from_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE_ = (
'Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important'
' </s> <pad> <pad> <pad>'
)
SCREAMING_SNAKE_CASE_ = rust_tokenizer([raw_input_str], return_tensors=_lowercase, add_special_tokens=_lowercase ).input_ids[0]
SCREAMING_SNAKE_CASE_ = py_tokenizer([raw_input_str], return_tensors=_lowercase, add_special_tokens=_lowercase ).input_ids[0]
self.assertListEqual(_lowercase, _lowercase )
def a__ ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
SCREAMING_SNAKE_CASE_ = '<mask_1> To ensure a <mask_2> flow of bank resolutions.'
SCREAMING_SNAKE_CASE_ = [2, 413, 615, 114, 3, 1971, 113, 1679, 10710, 107, 1]
SCREAMING_SNAKE_CASE_ = tokenizer([raw_input_str], return_tensors=_lowercase ).input_ids[0]
self.assertListEqual(_lowercase, _lowercase )
def a__ ( self ) -> Tuple:
SCREAMING_SNAKE_CASE_ = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 96103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1024
SCREAMING_SNAKE_CASE_ = 'To ensure a smooth flow of bank resolutions.'
SCREAMING_SNAKE_CASE_ = [413, 615, 114, 2291, 1971, 113, 1679, 10710, 107, 1]
SCREAMING_SNAKE_CASE_ = tokenizer([raw_input_str], return_tensors=_lowercase ).input_ids[0]
self.assertListEqual(_lowercase, _lowercase )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def a__ ( self ) -> List[str]:
SCREAMING_SNAKE_CASE_ = ['This is going to be way too long.' * 150, 'short example']
SCREAMING_SNAKE_CASE_ = ['not super long but more than 5 tokens', 'tiny']
SCREAMING_SNAKE_CASE_ = self._large_tokenizer(_lowercase, padding=_lowercase, truncation=_lowercase, return_tensors='pt' )
SCREAMING_SNAKE_CASE_ = self._large_tokenizer(
text_target=_lowercase, max_length=5, padding=_lowercase, truncation=_lowercase, return_tensors='pt' )
assert batch.input_ids.shape == (2, 1024)
assert batch.attention_mask.shape == (2, 1024)
assert targets["input_ids"].shape == (2, 5)
assert len(_lowercase ) == 2 # input_ids, attention_mask.
@slow
def a__ ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = {'input_ids': [[38979, 143, 18485, 606, 130, 26669, 87686, 121, 54189, 1129, 111, 26669, 87686, 121, 9114, 14787, 121, 13249, 158, 592, 956, 121, 14621, 31576, 143, 62613, 108, 9688, 930, 43430, 11562, 62613, 304, 108, 11443, 897, 108, 9314, 17415, 63399, 108, 11443, 7614, 18316, 118, 4284, 7148, 12430, 143, 1400, 25703, 158, 111, 4284, 7148, 11772, 143, 21297, 1064, 158, 122, 204, 3506, 1754, 1133, 14787, 1581, 115, 33224, 4482, 111, 1355, 110, 29173, 317, 50833, 108, 20147, 94665, 111, 77198, 107, 1], [110, 62613, 117, 638, 112, 1133, 121, 20098, 1355, 79050, 13872, 135, 1596, 53541, 1352, 141, 13039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 18289, 17780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowercase, model_name='google/bigbird-pegasus-large-arxiv', revision='ba85d0851d708441f91440d509690f1ab6353415', )
@require_sentencepiece
@require_tokenizers
class snake_case ( lowercase_, unittest.TestCase ):
"""simple docstring"""
_a = PegasusTokenizer
_a = PegasusTokenizerFast
_a = True
_a = True
def a__ ( self ) -> Optional[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE_ = PegasusTokenizer(_lowercase, offset=0, mask_token_sent=_lowercase, mask_token='[MASK]' )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def a__ ( self ) -> Any:
return PegasusTokenizer.from_pretrained('google/bigbird-pegasus-large-arxiv' )
def a__ ( self, **_lowercase ) -> Union[str, Any]:
return PegasusTokenizer.from_pretrained(self.tmpdirname, **_lowercase )
def a__ ( self, _lowercase ) -> Tuple:
return ("This is a test", "This is a test")
def a__ ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE_ = self.tokenizer_class.from_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE_ = (
'Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>'
' <pad> <pad> <pad>'
)
SCREAMING_SNAKE_CASE_ = rust_tokenizer([raw_input_str], return_tensors=_lowercase, add_special_tokens=_lowercase ).input_ids[0]
SCREAMING_SNAKE_CASE_ = py_tokenizer([raw_input_str], return_tensors=_lowercase, add_special_tokens=_lowercase ).input_ids[0]
self.assertListEqual(_lowercase, _lowercase )
@require_torch
def a__ ( self ) -> str:
SCREAMING_SNAKE_CASE_ = ['This is going to be way too long.' * 1000, 'short example']
SCREAMING_SNAKE_CASE_ = ['not super long but more than 5 tokens', 'tiny']
SCREAMING_SNAKE_CASE_ = self._large_tokenizer(_lowercase, padding=_lowercase, truncation=_lowercase, return_tensors='pt' )
SCREAMING_SNAKE_CASE_ = self._large_tokenizer(
text_target=_lowercase, max_length=5, padding=_lowercase, truncation=_lowercase, return_tensors='pt' )
assert batch.input_ids.shape == (2, 4096)
assert batch.attention_mask.shape == (2, 4096)
assert targets["input_ids"].shape == (2, 5)
assert len(_lowercase ) == 2 # input_ids, attention_mask.
def a__ ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = (
'This is an example string that is used to test the original TF implementation against the HF'
' implementation'
)
SCREAMING_SNAKE_CASE_ = self._large_tokenizer(_lowercase ).input_ids
self.assertListEqual(
_lowercase, [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 25016, 3137, 464, 109, 26955, 3137, 1], )
| 716 |
'''simple docstring'''
import math
import sys
def _UpperCamelCase ( lowerCAmelCase__: str ) -> str:
SCREAMING_SNAKE_CASE_ = ''
try:
with open(lowerCAmelCase__ ,'rb' ) as binary_file:
SCREAMING_SNAKE_CASE_ = binary_file.read()
for dat in data:
SCREAMING_SNAKE_CASE_ = F"""{dat:08b}"""
result += curr_byte
return result
except OSError:
print('File not accessible' )
sys.exit()
def _UpperCamelCase ( lowerCAmelCase__: str ) -> str:
SCREAMING_SNAKE_CASE_ = {'0': '0', '1': '1'}
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = '', ''
SCREAMING_SNAKE_CASE_ = len(lowerCAmelCase__ )
for i in range(len(lowerCAmelCase__ ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
SCREAMING_SNAKE_CASE_ = lexicon[curr_string]
result += last_match_id
SCREAMING_SNAKE_CASE_ = last_match_id + '0'
if math.loga(lowerCAmelCase__ ).is_integer():
SCREAMING_SNAKE_CASE_ = {}
for curr_key in list(lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = lexicon.pop(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ = new_lex
SCREAMING_SNAKE_CASE_ = last_match_id + '1'
index += 1
SCREAMING_SNAKE_CASE_ = ''
return result
def _UpperCamelCase ( lowerCAmelCase__: str ,lowerCAmelCase__: str ) -> None:
SCREAMING_SNAKE_CASE_ = 8
try:
with open(lowerCAmelCase__ ,'wb' ) as opened_file:
SCREAMING_SNAKE_CASE_ = [
to_write[i : i + byte_length]
for i in range(0 ,len(lowerCAmelCase__ ) ,lowerCAmelCase__ )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('10000000' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(lowerCAmelCase__ ,2 ).to_bytes(1 ,byteorder='big' ) )
except OSError:
print('File not accessible' )
sys.exit()
def _UpperCamelCase ( lowerCAmelCase__: str ) -> str:
SCREAMING_SNAKE_CASE_ = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
SCREAMING_SNAKE_CASE_ = data_bits[counter:]
SCREAMING_SNAKE_CASE_ = data_bits[counter + 1 :]
return data_bits
def _UpperCamelCase ( lowerCAmelCase__: str ,lowerCAmelCase__: str ) -> None:
SCREAMING_SNAKE_CASE_ = read_file_binary(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ = remove_prefix(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ = decompress_data(lowerCAmelCase__ )
write_file_binary(lowerCAmelCase__ ,lowerCAmelCase__ )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 238 | 0 |
"""simple docstring"""
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__ ) -> int:
if not isinstance(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ):
raise ValueError("Input must be an integer" )
if input_num <= 0:
raise ValueError("Input must be positive" )
return sum(
divisor for divisor in range(1, input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 237 |
"""simple docstring"""
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) -> str:
a_ : Tuple = WavaVecaForSequenceClassification.from_pretrained(SCREAMING_SNAKE_CASE__, config=SCREAMING_SNAKE_CASE__ )
a_ : Any = downstream_dict["projector.weight"]
a_ : Dict = downstream_dict["projector.bias"]
a_ : Tuple = downstream_dict["model.post_net.linear.weight"]
a_ : int = downstream_dict["model.post_net.linear.bias"]
return model
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) -> List[Any]:
a_ : List[str] = WavaVecaForAudioFrameClassification.from_pretrained(SCREAMING_SNAKE_CASE__, config=SCREAMING_SNAKE_CASE__ )
a_ : List[str] = downstream_dict["model.linear.weight"]
a_ : List[Any] = downstream_dict["model.linear.bias"]
return model
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
a_ : int = WavaVecaForXVector.from_pretrained(SCREAMING_SNAKE_CASE__, config=SCREAMING_SNAKE_CASE__ )
a_ : Any = downstream_dict["connector.weight"]
a_ : Tuple = downstream_dict["connector.bias"]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
a_ : List[str] = downstream_dict[
F"""model.framelevel_feature_extractor.module.{i}.kernel.weight"""
]
a_ : int = downstream_dict[F"""model.framelevel_feature_extractor.module.{i}.kernel.bias"""]
a_ : Any = downstream_dict["model.utterancelevel_feature_extractor.linear1.weight"]
a_ : Union[str, Any] = downstream_dict["model.utterancelevel_feature_extractor.linear1.bias"]
a_ : str = downstream_dict["model.utterancelevel_feature_extractor.linear2.weight"]
a_ : Union[str, Any] = downstream_dict["model.utterancelevel_feature_extractor.linear2.bias"]
a_ : List[str] = downstream_dict["objective.W"]
return model
@torch.no_grad()
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) -> Tuple:
a_ : Optional[int] = torch.load(SCREAMING_SNAKE_CASE__, map_location="cpu" )
a_ : List[str] = checkpoint["Downstream"]
a_ : Union[str, Any] = WavaVecaConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
a_ : Optional[int] = WavaVecaFeatureExtractor.from_pretrained(
SCREAMING_SNAKE_CASE__, return_attention_mask=SCREAMING_SNAKE_CASE__, do_normalize=SCREAMING_SNAKE_CASE__ )
a_ : Tuple = hf_config.architectures[0]
if arch.endswith("ForSequenceClassification" ):
a_ : int = convert_classification(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
elif arch.endswith("ForAudioFrameClassification" ):
a_ : Any = convert_diarization(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
elif arch.endswith("ForXVector" ):
a_ : Any = convert_xvector(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
else:
raise NotImplementedError(F"""S3PRL weights conversion is not supported for {arch}""" )
if hf_config.use_weighted_layer_sum:
a_ : Tuple = checkpoint["Featurizer"]["weights"]
hf_feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE__ )
hf_model.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
parser.add_argument(
"""--base_model_name""", default=None, type=str, help="""Name of the huggingface pretrained base model."""
)
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to the huggingface classifier config.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to the s3prl checkpoint.""")
parser.add_argument("""--model_dump_path""", default=None, type=str, help="""Path to the final converted model.""")
SCREAMING_SNAKE_CASE_ = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path) | 237 | 1 |
from __future__ import annotations
from collections import Counter
from random import random
class UpperCamelCase__ :
def __init__( self : Optional[Any] ):
'''simple docstring'''
lowercase_ = {}
def UpperCAmelCase__ ( self : Union[str, Any] , UpperCamelCase__ : str ):
'''simple docstring'''
lowercase_ = {}
def UpperCAmelCase__ ( self : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : float ):
'''simple docstring'''
if nodea not in self.connections:
self.add_node(UpperCamelCase__ )
if nodea not in self.connections:
self.add_node(UpperCamelCase__ )
lowercase_ = probability
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
return list(self.connections )
def UpperCAmelCase__ ( self : int , UpperCamelCase__ : str ):
'''simple docstring'''
lowercase_ = 0
lowercase_ = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
lowercase_ = Counter(graph.get_nodes() )
lowercase_ = start
for _ in range(__UpperCAmelCase ):
lowercase_ = graph.transition(__UpperCAmelCase )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 713 |
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
a = logging.get_logger(__name__)
class UpperCamelCase__ ( __magic_name__ ):
__SCREAMING_SNAKE_CASE : List[Any] = ['pixel_values']
def __init__( self : List[str] , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[int, float] = 1 / 255 , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN , UpperCamelCase__ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD , **UpperCamelCase__ : Dict , ):
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
lowercase_ = size if size is not None else {"""shortest_edge""": 224}
lowercase_ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ )
lowercase_ = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
lowercase_ = get_size_dict(UpperCamelCase__ , param_name="""crop_size""" )
lowercase_ = do_resize
lowercase_ = size
lowercase_ = resample
lowercase_ = do_center_crop
lowercase_ = crop_size
lowercase_ = do_rescale
lowercase_ = rescale_factor
lowercase_ = do_normalize
lowercase_ = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
lowercase_ = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def UpperCAmelCase__ ( self : int , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Optional[int] , ):
'''simple docstring'''
lowercase_ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
lowercase_ = int((256 / 224) * size["""shortest_edge"""] )
lowercase_ = get_resize_output_image_size(UpperCamelCase__ , size=UpperCamelCase__ , default_to_square=UpperCamelCase__ )
lowercase_ = {"""height""": output_size[0], """width""": output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
F'''Size dict must have keys \'height\' and \'width\' or \'shortest_edge\'. Got {size_dict.keys()}''' )
return resize(
UpperCamelCase__ , size=(size_dict["""height"""], size_dict["""width"""]) , resample=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def UpperCAmelCase__ ( self : str , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : str , ):
'''simple docstring'''
lowercase_ = get_size_dict(UpperCamelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(F'''Size dict must have keys \'height\' and \'width\'. Got {size.keys()}''' )
return center_crop(UpperCamelCase__ , size=(size["""height"""], size["""width"""]) , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def UpperCAmelCase__ ( self : Optional[int] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[int, float] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : str , ):
'''simple docstring'''
return rescale(UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def UpperCAmelCase__ ( self : List[str] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Optional[int] , ):
'''simple docstring'''
return normalize(UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def UpperCAmelCase__ ( self : Dict , UpperCamelCase__ : ImageInput , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[Dict[str, int]] = None , UpperCamelCase__ : PILImageResampling = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[Dict[str, int]] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[float] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[Union[float, Iterable[float]]] = None , UpperCamelCase__ : Optional[Union[float, Iterable[float]]] = None , UpperCamelCase__ : Optional[TensorType] = None , UpperCamelCase__ : ChannelDimension = ChannelDimension.FIRST , **UpperCamelCase__ : Optional[int] , ):
'''simple docstring'''
lowercase_ = do_resize if do_resize is not None else self.do_resize
lowercase_ = resample if resample is not None else self.resample
lowercase_ = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase_ = do_rescale if do_rescale is not None else self.do_rescale
lowercase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase_ = do_normalize if do_normalize is not None else self.do_normalize
lowercase_ = image_mean if image_mean is not None else self.image_mean
lowercase_ = image_std if image_std is not None else self.image_std
lowercase_ = size if size is not None else self.size
lowercase_ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ )
lowercase_ = crop_size if crop_size is not None else self.crop_size
lowercase_ = get_size_dict(UpperCamelCase__ , param_name="""crop_size""" )
lowercase_ = make_list_of_images(UpperCamelCase__ )
if not valid_images(UpperCamelCase__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
lowercase_ = [to_numpy_array(UpperCamelCase__ ) for image in images]
if do_resize:
lowercase_ = [self.resize(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) for image in images]
if do_center_crop:
lowercase_ = [self.center_crop(UpperCamelCase__ , UpperCamelCase__ ) for image in images]
if do_rescale:
lowercase_ = [self.rescale(UpperCamelCase__ , UpperCamelCase__ ) for image in images]
if do_normalize:
lowercase_ = [self.normalize(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) for image in images]
lowercase_ = [to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__ ) for image in images]
lowercase_ = {"""pixel_values""": images}
return BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
| 650 | 0 |
"""simple docstring"""
import re
def snake_case__ ( _lowerCamelCase ) ->bool:
"""simple docstring"""
__lowercase : str = re.compile(
R"^(?:0|94|\+94|0{2}94)" R"7(0|1|2|4|5|6|7|8)" R"(-| |)" R"\d{7}$" )
return bool(re.search(_lowerCamelCase, _lowerCamelCase ) )
if __name__ == "__main__":
__A : str = '0094702343221'
print(is_sri_lankan_phone_number(phone))
| 575 |
"""simple docstring"""
import os
from collections import deque
import torch
from torch.utils.data import Dataset
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
def __init__( self : Any , lowercase__ : Any="" , lowercase__ : List[str]="train" ):
assert os.path.isdir(lowercase__ )
__lowercase : Optional[Any] = []
__lowercase : Optional[int] = os.listdir(lowercase__ )
for story_filename in story_filenames_list:
if "summary" in story_filename:
continue
__lowercase : Optional[Any] = os.path.join(lowercase__ , lowercase__ )
if not os.path.isfile(lowercase__ ):
continue
self.documents.append(lowercase__ )
def __len__( self : Union[str, Any] ):
return len(self.documents )
def __getitem__( self : int , lowercase__ : int ):
__lowercase : List[str] = self.documents[idx]
__lowercase : List[str] = document_path.split("/" )[-1]
with open(lowercase__ , encoding="utf-8" ) as source:
__lowercase : Optional[int] = source.read()
__lowercase ,__lowercase : Union[str, Any] = process_story(lowercase__ )
return document_name, story_lines, summary_lines
def snake_case__ ( _lowerCamelCase ) ->str:
"""simple docstring"""
__lowercase : int = list(filter(lambda _lowerCamelCase : len(_lowerCamelCase ) != 0, [line.strip() for line in raw_story.split("\n" )] ) )
# for some unknown reason some lines miss a period, add it
__lowercase : int = [_add_missing_period(_lowerCamelCase ) for line in nonempty_lines]
# gather article lines
__lowercase : str = []
__lowercase : Union[str, Any] = deque(_lowerCamelCase )
while True:
try:
__lowercase : Any = lines.popleft()
if element.startswith("@highlight" ):
break
story_lines.append(_lowerCamelCase )
except IndexError:
# if "@highlight" is absent from the file we pop
# all elements until there is None, raising an exception.
return story_lines, []
# gather summary lines
__lowercase : str = list(filter(lambda _lowerCamelCase : not t.startswith("@highlight" ), _lowerCamelCase ) )
return story_lines, summary_lines
def snake_case__ ( _lowerCamelCase ) ->Optional[Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = [".", "!", "?", "...", "'", "`", "\"", "\u2019", "\u2019", ")"]
if line.startswith("@highlight" ):
return line
if line[-1] in END_TOKENS:
return line
return line + "."
def snake_case__ ( _lowerCamelCase, _lowerCamelCase, _lowerCamelCase ) ->int:
"""simple docstring"""
if len(_lowerCamelCase ) > block_size:
return sequence[:block_size]
else:
sequence.extend([pad_token_id] * (block_size - len(_lowerCamelCase )) )
return sequence
def snake_case__ ( _lowerCamelCase, _lowerCamelCase ) ->Union[str, Any]:
"""simple docstring"""
__lowercase : Tuple = torch.ones_like(_lowerCamelCase )
__lowercase : Any = sequence == pad_token_id
__lowercase : List[str] = 0
return mask
def snake_case__ ( _lowerCamelCase, _lowerCamelCase, _lowerCamelCase ) ->List[str]:
"""simple docstring"""
__lowercase : Optional[int] = [tokenizer.encode(_lowerCamelCase ) for line in story_lines]
__lowercase : Optional[int] = [token for sentence in story_lines_token_ids for token in sentence]
__lowercase : Any = [tokenizer.encode(_lowerCamelCase ) for line in summary_lines]
__lowercase : Tuple = [token for sentence in summary_lines_token_ids for token in sentence]
return story_token_ids, summary_token_ids
def snake_case__ ( _lowerCamelCase, _lowerCamelCase ) ->Optional[int]:
"""simple docstring"""
__lowercase : List[str] = []
for sequence in batch:
__lowercase : Dict = -1
__lowercase : str = []
for s in sequence:
if s == separator_token_id:
sentence_num += 1
embeddings.append(sentence_num % 2 )
batch_embeddings.append(_lowerCamelCase )
return torch.tensor(_lowerCamelCase )
| 575 | 1 |
'''simple docstring'''
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def a_ ( __UpperCAmelCase , __UpperCAmelCase=None ) -> str:
"""simple docstring"""
snake_case: Any =None
if token is not None:
snake_case: List[str] ={'Accept': 'application/vnd.github+json', 'Authorization': f'''Bearer {token}'''}
snake_case: int =f'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'''
snake_case: str =requests.get(lowerCAmelCase__ , headers=lowerCAmelCase__ ).json()
snake_case: Tuple ={}
try:
job_links.update({job['name']: job['html_url'] for job in result['jobs']} )
snake_case: Tuple =math.ceil((result['total_count'] - 1_00) / 1_00 )
for i in range(lowerCAmelCase__ ):
snake_case: List[Any] =requests.get(url + f'''&page={i + 2}''' , headers=lowerCAmelCase__ ).json()
job_links.update({job['name']: job['html_url'] for job in result['jobs']} )
return job_links
except Exception:
print(f'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
def a_ ( __UpperCAmelCase , __UpperCAmelCase=None ) -> Any:
"""simple docstring"""
snake_case: Optional[int] =None
if token is not None:
snake_case: Optional[int] ={'Accept': 'application/vnd.github+json', 'Authorization': f'''Bearer {token}'''}
snake_case: Tuple =f'''https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100'''
snake_case: Any =requests.get(lowerCAmelCase__ , headers=lowerCAmelCase__ ).json()
snake_case: List[str] ={}
try:
artifacts.update({artifact['name']: artifact['archive_download_url'] for artifact in result['artifacts']} )
snake_case: int =math.ceil((result['total_count'] - 1_00) / 1_00 )
for i in range(lowerCAmelCase__ ):
snake_case: int =requests.get(url + f'''&page={i + 2}''' , headers=lowerCAmelCase__ ).json()
artifacts.update({artifact['name']: artifact['archive_download_url'] for artifact in result['artifacts']} )
return artifacts
except Exception:
print(f'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
def a_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
"""simple docstring"""
snake_case: Union[str, Any] =None
if token is not None:
snake_case: Union[str, Any] ={'Accept': 'application/vnd.github+json', 'Authorization': f'''Bearer {token}'''}
snake_case: int =requests.get(lowerCAmelCase__ , headers=lowerCAmelCase__ , allow_redirects=lowerCAmelCase__ )
snake_case: int =result.headers['Location']
snake_case: str =requests.get(lowerCAmelCase__ , allow_redirects=lowerCAmelCase__ )
snake_case: int =os.path.join(lowerCAmelCase__ , f'''{artifact_name}.zip''' )
with open(lowerCAmelCase__ , 'wb' ) as fp:
fp.write(response.content )
def a_ ( __UpperCAmelCase , __UpperCAmelCase=None ) -> List[Any]:
"""simple docstring"""
snake_case: Union[str, Any] =[]
snake_case: Tuple =[]
snake_case: Dict =None
with zipfile.ZipFile(lowerCAmelCase__ ) as z:
for filename in z.namelist():
if not os.path.isdir(lowerCAmelCase__ ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(lowerCAmelCase__ ) as f:
for line in f:
snake_case: Optional[Any] =line.decode('UTF-8' ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
snake_case: Any =line[: line.index(': ' )]
snake_case: List[str] =line[line.index(': ' ) + len(': ' ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith('FAILED ' ):
# `test` is the test method that failed
snake_case: str =line[len('FAILED ' ) :]
failed_tests.append(lowerCAmelCase__ )
elif filename == "job_name.txt":
snake_case: Optional[int] =line
if len(lowerCAmelCase__ ) != len(lowerCAmelCase__ ):
raise ValueError(
f'''`errors` and `failed_tests` should have the same number of elements. Got {len(lowerCAmelCase__ )} for `errors` '''
f'''and {len(lowerCAmelCase__ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some'''
' problem.' )
snake_case: int =None
if job_name and job_links:
snake_case: List[str] =job_links.get(lowerCAmelCase__ , lowerCAmelCase__ )
# A list with elements of the form (line of error, error, failed test)
snake_case: str =[x + [y] + [job_link] for x, y in zip(lowerCAmelCase__ , lowerCAmelCase__ )]
return result
def a_ ( __UpperCAmelCase , __UpperCAmelCase=None ) -> Dict:
"""simple docstring"""
snake_case: str =[]
snake_case: Optional[Any] =[os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) for p in os.listdir(lowerCAmelCase__ ) if p.endswith('.zip' )]
for p in paths:
errors.extend(get_errors_from_single_artifact(lowerCAmelCase__ , job_links=lowerCAmelCase__ ) )
return errors
def a_ ( __UpperCAmelCase , __UpperCAmelCase=None ) -> Optional[Any]:
"""simple docstring"""
snake_case: List[str] =Counter()
counter.update([x[1] for x in logs] )
snake_case: int =counter.most_common()
snake_case: Dict ={}
for error, count in counts:
if error_filter is None or error not in error_filter:
snake_case: Any ={'count': count, 'failed_tests': [(x[2], x[0]) for x in logs if x[1] == error]}
snake_case: Dict =dict(sorted(r.items() , key=lambda __UpperCAmelCase : item[1]["count"] , reverse=lowerCAmelCase__ ) )
return r
def a_ ( __UpperCAmelCase ) -> str:
"""simple docstring"""
snake_case: List[Any] =test.split('::' )[0]
if test.startswith('tests/models/' ):
snake_case: Tuple =test.split('/' )[2]
else:
snake_case: Dict =None
return test
def a_ ( __UpperCAmelCase , __UpperCAmelCase=None ) -> int:
"""simple docstring"""
snake_case: Any =[(x[0], x[1], get_model(x[2] )) for x in logs]
snake_case: Union[str, Any] =[x for x in logs if x[2] is not None]
snake_case: Any ={x[2] for x in logs}
snake_case: Tuple ={}
for test in tests:
snake_case: Optional[int] =Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
snake_case: Tuple =counter.most_common()
snake_case: str ={error: count for error, count in counts if (error_filter is None or error not in error_filter)}
snake_case: Tuple =sum(error_counts.values() )
if n_errors > 0:
snake_case: Any ={'count': n_errors, 'errors': error_counts}
snake_case: Optional[Any] =dict(sorted(r.items() , key=lambda __UpperCAmelCase : item[1]["count"] , reverse=lowerCAmelCase__ ) )
return r
def a_ ( __UpperCAmelCase ) -> Dict:
"""simple docstring"""
snake_case: List[str] ='| no. | error | status |'
snake_case: List[str] ='|-:|:-|:-|'
snake_case: List[str] =[header, sep]
for error in reduced_by_error:
snake_case: Optional[int] =reduced_by_error[error]['count']
snake_case: Union[str, Any] =f'''| {count} | {error[:1_00]} | |'''
lines.append(lowerCAmelCase__ )
return "\n".join(lowerCAmelCase__ )
def a_ ( __UpperCAmelCase ) -> Any:
"""simple docstring"""
snake_case: Optional[int] ='| model | no. of errors | major error | count |'
snake_case: List[str] ='|-:|-:|-:|-:|'
snake_case: str =[header, sep]
for model in reduced_by_model:
snake_case: int =reduced_by_model[model]['count']
snake_case , snake_case: Optional[int] =list(reduced_by_model[model]['errors'].items() )[0]
snake_case: str =f'''| {model} | {count} | {error[:60]} | {_count} |'''
lines.append(lowerCAmelCase__ )
return "\n".join(lowerCAmelCase__ )
if __name__ == "__main__":
a = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
parser.add_argument(
'--output_dir',
type=str,
required=True,
help='Where to store the downloaded artifacts and other result files.',
)
parser.add_argument('--token', default=None, type=str, help='A token that has actions:read permission.')
a = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
a = get_job_links(args.workflow_run_id, token=args.token)
a = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
a = k.find(' / ')
a = k[index + len(' / ') :]
a = v
with open(os.path.join(args.output_dir, 'job_links.json'), 'w', encoding='UTF-8') as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
a = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, 'artifacts.json'), 'w', encoding='UTF-8') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
a = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
a = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
a = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, 'errors.json'), 'w', encoding='UTF-8') as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
a = reduce_by_error(errors)
a = reduce_by_model(errors)
a = make_github_table(reduced_by_error)
a = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, 'reduced_by_error.txt'), 'w', encoding='UTF-8') as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, 'reduced_by_model.txt'), 'w', encoding='UTF-8') as fp:
fp.write(sa)
| 721 |
'''simple docstring'''
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
'huggingface/time-series-transformer-tourism-monthly': (
'https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json'
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class a_ ( snake_case ):
UpperCAmelCase : Dict = """time_series_transformer"""
UpperCAmelCase : Optional[Any] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self : Any , a_ : Optional[int] = None , a_ : Optional[int] = None , a_ : str = "student_t" , a_ : str = "nll" , a_ : int = 1 , a_ : List[int] = [1, 2, 3, 4, 5, 6, 7] , a_ : Optional[Union[str, bool]] = "mean" , a_ : int = 0 , a_ : int = 0 , a_ : int = 0 , a_ : int = 0 , a_ : Optional[List[int]] = None , a_ : Optional[List[int]] = None , a_ : int = 3_2 , a_ : int = 3_2 , a_ : int = 2 , a_ : int = 2 , a_ : int = 2 , a_ : int = 2 , a_ : bool = True , a_ : str = "gelu" , a_ : int = 6_4 , a_ : float = 0.1 , a_ : float = 0.1 , a_ : float = 0.1 , a_ : float = 0.1 , a_ : float = 0.1 , a_ : int = 1_0_0 , a_ : float = 0.0_2 , a_ : Optional[int]=True , **a_ : Tuple , ) -> Optional[int]:
# time series specific configuration
snake_case: Dict =prediction_length
snake_case: Any =context_length or prediction_length
snake_case: str =distribution_output
snake_case: List[str] =loss
snake_case: Optional[Any] =input_size
snake_case: Optional[Any] =num_time_features
snake_case: List[str] =lags_sequence
snake_case: Union[str, Any] =scaling
snake_case: List[str] =num_dynamic_real_features
snake_case: Any =num_static_real_features
snake_case: Dict =num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(a_ ) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`' )
snake_case: Dict =cardinality
else:
snake_case: Dict =[0]
if embedding_dimension and num_static_categorical_features > 0:
if len(a_ ) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`' )
snake_case: List[Any] =embedding_dimension
else:
snake_case: str =[min(5_0 , (cat + 1) // 2 ) for cat in self.cardinality]
snake_case: Any =num_parallel_samples
# Transformer architecture configuration
snake_case: Union[str, Any] =input_size * len(a_ ) + self._number_of_features
snake_case: List[Any] =d_model
snake_case: int =encoder_attention_heads
snake_case: Optional[int] =decoder_attention_heads
snake_case: str =encoder_ffn_dim
snake_case: List[Any] =decoder_ffn_dim
snake_case: str =encoder_layers
snake_case: List[str] =decoder_layers
snake_case: List[Any] =dropout
snake_case: Union[str, Any] =attention_dropout
snake_case: Optional[int] =activation_dropout
snake_case: str =encoder_layerdrop
snake_case: Optional[int] =decoder_layerdrop
snake_case: Tuple =activation_function
snake_case: List[Any] =init_std
snake_case: Union[str, Any] =use_cache
super().__init__(is_encoder_decoder=a_ , **a_ )
@property
def UpperCamelCase ( self : Tuple ) -> int:
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 347 | 0 |
def a__ ( snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = len(snake_case ) + 1
__SCREAMING_SNAKE_CASE : Tuple = len(snake_case ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
__SCREAMING_SNAKE_CASE : Dict = [[0 for i in range(snake_case )] for j in range(snake_case )]
# since string of zero length match pattern of zero length
__SCREAMING_SNAKE_CASE : Tuple = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , snake_case ):
__SCREAMING_SNAKE_CASE : Optional[int] = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , snake_case ):
__SCREAMING_SNAKE_CASE : Optional[int] = dp[0][j - 2] if pattern[j - 1] == '''*''' else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , snake_case ):
for j in range(1 , snake_case ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
__SCREAMING_SNAKE_CASE : Tuple = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
__SCREAMING_SNAKE_CASE : List[Any] = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
__SCREAMING_SNAKE_CASE : int = dp[i - 1][j]
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = 0
else:
__SCREAMING_SNAKE_CASE : int = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
lowercase_ = """aab"""
lowercase_ = """c*a*b"""
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(f'''{input_string} matches the given pattern {pattern}''')
else:
print(f'''{input_string} does not match with the given pattern {pattern}''')
| 74 |
"""simple docstring"""
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
lowerCamelCase = re.compile(r"""\b(a|an|the)\b""", re.UNICODE)
lowerCamelCase = None
def a__ ( ):
UpperCAmelCase_ = argparse.ArgumentParser("Official evaluation script for SQuAD version 2.0." )
parser.add_argument("data_file" , metavar="data.json" , help="Input data JSON file." )
parser.add_argument("pred_file" , metavar="pred.json" , help="Model predictions." )
parser.add_argument(
"--out-file" , "-o" , metavar="eval.json" , help="Write accuracy metrics to file (default is stdout)." )
parser.add_argument(
"--na-prob-file" , "-n" , metavar="na_prob.json" , help="Model estimates of probability of no answer." )
parser.add_argument(
"--na-prob-thresh" , "-t" , type=lowerCAmelCase__ , default=1.0 , help="Predict \"\" if no-answer probability exceeds this (default = 1.0)." , )
parser.add_argument(
"--out-image-dir" , "-p" , metavar="out_images" , default=lowerCAmelCase__ , help="Save precision-recall curves to directory." )
parser.add_argument("--verbose" , "-v" , action="store_true" )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def a__ ( lowerCAmelCase__ ):
UpperCAmelCase_ = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
UpperCAmelCase_ = bool(qa["answers"]["text"] )
return qid_to_has_ans
def a__ ( lowerCAmelCase__ ):
def remove_articles(lowerCAmelCase__ ):
return ARTICLES_REGEX.sub(" " , lowerCAmelCase__ )
def white_space_fix(lowerCAmelCase__ ):
return " ".join(text.split() )
def remove_punc(lowerCAmelCase__ ):
UpperCAmelCase_ = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(lowerCAmelCase__ ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(lowerCAmelCase__ ) ) ) )
def a__ ( lowerCAmelCase__ ):
if not s:
return []
return normalize_answer(lowerCAmelCase__ ).split()
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
return int(normalize_answer(lowerCAmelCase__ ) == normalize_answer(lowerCAmelCase__ ) )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = get_tokens(lowerCAmelCase__ )
UpperCAmelCase_ = get_tokens(lowerCAmelCase__ )
UpperCAmelCase_ = collections.Counter(lowerCAmelCase__ ) & collections.Counter(lowerCAmelCase__ )
UpperCAmelCase_ = sum(common.values() )
if len(lowerCAmelCase__ ) == 0 or len(lowerCAmelCase__ ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
UpperCAmelCase_ = 1.0 * num_same / len(lowerCAmelCase__ )
UpperCAmelCase_ = 1.0 * num_same / len(lowerCAmelCase__ )
UpperCAmelCase_ = (2 * precision * recall) / (precision + recall)
return fa
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = {}
UpperCAmelCase_ = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
UpperCAmelCase_ = qa["id"]
UpperCAmelCase_ = [t for t in qa["answers"]["text"] if normalize_answer(lowerCAmelCase__ )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
UpperCAmelCase_ = [""]
if qid not in preds:
print(f"""Missing prediction for {qid}""" )
continue
UpperCAmelCase_ = preds[qid]
# Take max over all gold answers
UpperCAmelCase_ = max(compute_exact(lowerCAmelCase__ , lowerCAmelCase__ ) for a in gold_answers )
UpperCAmelCase_ = max(compute_fa(lowerCAmelCase__ , lowerCAmelCase__ ) for a in gold_answers )
return exact_scores, fa_scores
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = {}
for qid, s in scores.items():
UpperCAmelCase_ = na_probs[qid] > na_prob_thresh
if pred_na:
UpperCAmelCase_ = float(not qid_to_has_ans[qid] )
else:
UpperCAmelCase_ = s
return new_scores
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None ):
if not qid_list:
UpperCAmelCase_ = len(lowerCAmelCase__ )
return collections.OrderedDict(
[
("exact", 100.0 * sum(exact_scores.values() ) / total),
("f1", 100.0 * sum(fa_scores.values() ) / total),
("total", total),
] )
else:
UpperCAmelCase_ = len(lowerCAmelCase__ )
return collections.OrderedDict(
[
("exact", 100.0 * sum(exact_scores[k] for k in qid_list ) / total),
("f1", 100.0 * sum(fa_scores[k] for k in qid_list ) / total),
("total", total),
] )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
for k in new_eval:
UpperCAmelCase_ = new_eval[k]
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
plt.step(lowerCAmelCase__ , lowerCAmelCase__ , color="b" , alpha=0.2 , where="post" )
plt.fill_between(lowerCAmelCase__ , lowerCAmelCase__ , step="post" , alpha=0.2 , color="b" )
plt.xlabel("Recall" )
plt.ylabel("Precision" )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(lowerCAmelCase__ )
plt.savefig(lowerCAmelCase__ )
plt.clf()
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None ):
UpperCAmelCase_ = sorted(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : na_probs[k] )
UpperCAmelCase_ = 0.0
UpperCAmelCase_ = 1.0
UpperCAmelCase_ = 0.0
UpperCAmelCase_ = [1.0]
UpperCAmelCase_ = [0.0]
UpperCAmelCase_ = 0.0
for i, qid in enumerate(lowerCAmelCase__ ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
UpperCAmelCase_ = true_pos / float(i + 1 )
UpperCAmelCase_ = true_pos / float(lowerCAmelCase__ )
if i == len(lowerCAmelCase__ ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(lowerCAmelCase__ )
recalls.append(lowerCAmelCase__ )
if out_image:
plot_pr_curve(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
return {"ap": 100.0 * avg_prec}
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
if out_image_dir and not os.path.exists(lowerCAmelCase__ ):
os.makedirs(lowerCAmelCase__ )
UpperCAmelCase_ = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
UpperCAmelCase_ = make_precision_recall_eval(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , out_image=os.path.join(lowerCAmelCase__ , "pr_exact.png" ) , title="Precision-Recall curve for Exact Match score" , )
UpperCAmelCase_ = make_precision_recall_eval(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , out_image=os.path.join(lowerCAmelCase__ , "pr_f1.png" ) , title="Precision-Recall curve for F1 score" , )
UpperCAmelCase_ = {k: float(lowerCAmelCase__ ) for k, v in qid_to_has_ans.items()}
UpperCAmelCase_ = make_precision_recall_eval(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , out_image=os.path.join(lowerCAmelCase__ , "pr_oracle.png" ) , title="Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)" , )
merge_eval(lowerCAmelCase__ , lowerCAmelCase__ , "pr_exact" )
merge_eval(lowerCAmelCase__ , lowerCAmelCase__ , "pr_f1" )
merge_eval(lowerCAmelCase__ , lowerCAmelCase__ , "pr_oracle" )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
if not qid_list:
return
UpperCAmelCase_ = [na_probs[k] for k in qid_list]
UpperCAmelCase_ = np.ones_like(lowerCAmelCase__ ) / float(len(lowerCAmelCase__ ) )
plt.hist(lowerCAmelCase__ , weights=lowerCAmelCase__ , bins=20 , range=(0.0, 1.0) )
plt.xlabel("Model probability of no-answer" )
plt.ylabel("Proportion of dataset" )
plt.title(f"""Histogram of no-answer probability: {name}""" )
plt.savefig(os.path.join(lowerCAmelCase__ , f"""na_prob_hist_{name}.png""" ) )
plt.clf()
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
UpperCAmelCase_ = num_no_ans
UpperCAmelCase_ = cur_score
UpperCAmelCase_ = 0.0
UpperCAmelCase_ = sorted(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : na_probs[k] )
for i, qid in enumerate(lowerCAmelCase__ ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
UpperCAmelCase_ = scores[qid]
else:
if preds[qid]:
UpperCAmelCase_ = -1
else:
UpperCAmelCase_ = 0
cur_score += diff
if cur_score > best_score:
UpperCAmelCase_ = cur_score
UpperCAmelCase_ = na_probs[qid]
return 100.0 * best_score / len(lowerCAmelCase__ ), best_thresh
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ , UpperCAmelCase_ = find_best_thresh(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ = find_best_thresh(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase_ = best_exact
UpperCAmelCase_ = exact_thresh
UpperCAmelCase_ = best_fa
UpperCAmelCase_ = fa_thresh
def a__ ( ):
with open(OPTS.data_file ) as f:
UpperCAmelCase_ = json.load(lowerCAmelCase__ )
UpperCAmelCase_ = dataset_json["data"]
with open(OPTS.pred_file ) as f:
UpperCAmelCase_ = json.load(lowerCAmelCase__ )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
UpperCAmelCase_ = json.load(lowerCAmelCase__ )
else:
UpperCAmelCase_ = {k: 0.0 for k in preds}
UpperCAmelCase_ = make_qid_to_has_ans(lowerCAmelCase__ ) # maps qid to True/False
UpperCAmelCase_ = [k for k, v in qid_to_has_ans.items() if v]
UpperCAmelCase_ = [k for k, v in qid_to_has_ans.items() if not v]
UpperCAmelCase_ , UpperCAmelCase_ = get_raw_scores(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase_ = apply_no_ans_threshold(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , OPTS.na_prob_thresh )
UpperCAmelCase_ = apply_no_ans_threshold(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , OPTS.na_prob_thresh )
UpperCAmelCase_ = make_eval_dict(lowerCAmelCase__ , lowerCAmelCase__ )
if has_ans_qids:
UpperCAmelCase_ = make_eval_dict(lowerCAmelCase__ , lowerCAmelCase__ , qid_list=lowerCAmelCase__ )
merge_eval(lowerCAmelCase__ , lowerCAmelCase__ , "HasAns" )
if no_ans_qids:
UpperCAmelCase_ = make_eval_dict(lowerCAmelCase__ , lowerCAmelCase__ , qid_list=lowerCAmelCase__ )
merge_eval(lowerCAmelCase__ , lowerCAmelCase__ , "NoAns" )
if OPTS.na_prob_file:
find_all_best_thresh(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , OPTS.out_image_dir )
histogram_na_prob(lowerCAmelCase__ , lowerCAmelCase__ , OPTS.out_image_dir , "hasAns" )
histogram_na_prob(lowerCAmelCase__ , lowerCAmelCase__ , OPTS.out_image_dir , "noAns" )
if OPTS.out_file:
with open(OPTS.out_file , "w" ) as f:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
else:
print(json.dumps(lowerCAmelCase__ , indent=2 ) )
if __name__ == "__main__":
lowerCamelCase = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use("""Agg""")
import matplotlib.pyplot as plt
main()
| 82 | 0 |
"""simple docstring"""
from __future__ import annotations
def _lowercase ( __snake_case ,__snake_case ) -> set[str]:
__lowerCAmelCase , __lowerCAmelCase : Dict = set(__snake_case ), [start]
while stack:
__lowerCAmelCase : Optional[int] = stack.pop()
explored.add(__snake_case )
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v] ):
if adj not in explored:
stack.append(__snake_case )
return explored
__snake_case : List[Any] = {
'A': ['B', 'C', 'D'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F'],
'D': ['B', 'D'],
'E': ['B', 'F'],
'F': ['C', 'E', 'G'],
'G': ['F'],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, 'A')) | 615 |
"""simple docstring"""
import logging
from transformers.configuration_utils import PretrainedConfig
__snake_case : Tuple = logging.getLogger(__name__)
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = 'masked_bert'
def __init__( self: str , _SCREAMING_SNAKE_CASE: Tuple=3_0522 , _SCREAMING_SNAKE_CASE: Optional[int]=768 , _SCREAMING_SNAKE_CASE: List[str]=12 , _SCREAMING_SNAKE_CASE: Any=12 , _SCREAMING_SNAKE_CASE: Optional[int]=3072 , _SCREAMING_SNAKE_CASE: List[str]="gelu" , _SCREAMING_SNAKE_CASE: List[str]=0.1 , _SCREAMING_SNAKE_CASE: str=0.1 , _SCREAMING_SNAKE_CASE: Dict=512 , _SCREAMING_SNAKE_CASE: List[Any]=2 , _SCREAMING_SNAKE_CASE: Tuple=0.02 , _SCREAMING_SNAKE_CASE: int=1e-12 , _SCREAMING_SNAKE_CASE: int=0 , _SCREAMING_SNAKE_CASE: Tuple="topK" , _SCREAMING_SNAKE_CASE: Any="constant" , _SCREAMING_SNAKE_CASE: int=0.0 , **_SCREAMING_SNAKE_CASE: Any , ) -> List[str]:
"""simple docstring"""
super().__init__(pad_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = vocab_size
__lowerCAmelCase : List[str] = hidden_size
__lowerCAmelCase : Any = num_hidden_layers
__lowerCAmelCase : Dict = num_attention_heads
__lowerCAmelCase : Union[str, Any] = hidden_act
__lowerCAmelCase : Optional[Any] = intermediate_size
__lowerCAmelCase : int = hidden_dropout_prob
__lowerCAmelCase : List[Any] = attention_probs_dropout_prob
__lowerCAmelCase : int = max_position_embeddings
__lowerCAmelCase : str = type_vocab_size
__lowerCAmelCase : Any = initializer_range
__lowerCAmelCase : Optional[int] = layer_norm_eps
__lowerCAmelCase : Tuple = pruning_method
__lowerCAmelCase : Union[str, Any] = mask_init
__lowerCAmelCase : str = mask_scale | 615 | 1 |
from collections.abc import Generator
from math import sin
def _A ( _lowercase ) -> bytes:
"""simple docstring"""
if len(_lowercase ) != 32:
raise ValueError('Input must be of length 32' )
__UpperCamelCase = B''
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def _A ( _lowercase ) -> bytes:
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
__UpperCamelCase = format(_lowercase , '08x' )[-8:]
__UpperCamelCase = B''
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('utf-8' )
return little_endian_hex
def _A ( _lowercase ) -> bytes:
"""simple docstring"""
__UpperCamelCase = B''
for char in message:
bit_string += format(_lowercase , '08b' ).encode('utf-8' )
__UpperCamelCase = format(len(_lowercase ) , '064b' ).encode('utf-8' )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(_lowercase ) % 5_12 != 4_48:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def _A ( _lowercase ) -> Generator[list[int], None, None]:
"""simple docstring"""
if len(_lowercase ) % 5_12 != 0:
raise ValueError('Input must have length that\'s a multiple of 512' )
for pos in range(0 , len(_lowercase ) , 5_12 ):
__UpperCamelCase = bit_string[pos : pos + 5_12]
__UpperCamelCase = []
for i in range(0 , 5_12 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def _A ( _lowercase ) -> int:
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
__UpperCamelCase = format(_lowercase , '032b' )
__UpperCamelCase = ''
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(_lowercase , 2 )
def _A ( _lowercase , _lowercase ) -> int:
"""simple docstring"""
return (a + b) % 2**32
def _A ( _lowercase , _lowercase ) -> int:
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
if shift < 0:
raise ValueError('Shift must be non-negative' )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def _A ( _lowercase ) -> bytes:
"""simple docstring"""
__UpperCamelCase = preprocess(_lowercase )
__UpperCamelCase = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
__UpperCamelCase = 0X67_45_23_01
__UpperCamelCase = 0Xef_cd_ab_89
__UpperCamelCase = 0X98_ba_dc_fe
__UpperCamelCase = 0X10_32_54_76
__UpperCamelCase = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(_lowercase ):
__UpperCamelCase = aa
__UpperCamelCase = ba
__UpperCamelCase = ca
__UpperCamelCase = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
__UpperCamelCase = d ^ (b & (c ^ d))
__UpperCamelCase = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
__UpperCamelCase = c ^ (d & (b ^ c))
__UpperCamelCase = (5 * i + 1) % 16
elif i <= 47:
__UpperCamelCase = b ^ c ^ d
__UpperCamelCase = (3 * i + 5) % 16
else:
__UpperCamelCase = c ^ (b | not_aa(_lowercase ))
__UpperCamelCase = (7 * i) % 16
__UpperCamelCase = (f + a + added_consts[i] + block_words[g]) % 2**32
__UpperCamelCase = d
__UpperCamelCase = c
__UpperCamelCase = b
__UpperCamelCase = sum_aa(_lowercase , left_rotate_aa(_lowercase , shift_amounts[i] ) )
# Add hashed chunk to running total
__UpperCamelCase = sum_aa(_lowercase , _lowercase )
__UpperCamelCase = sum_aa(_lowercase , _lowercase )
__UpperCamelCase = sum_aa(_lowercase , _lowercase )
__UpperCamelCase = sum_aa(_lowercase , _lowercase )
__UpperCamelCase = reformat_hex(_lowercase ) + reformat_hex(_lowercase ) + reformat_hex(_lowercase ) + reformat_hex(_lowercase )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 1 |
'''simple docstring'''
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
SCREAMING_SNAKE_CASE_ = ""
SCREAMING_SNAKE_CASE_ = ""
SCREAMING_SNAKE_CASE_ = ""
SCREAMING_SNAKE_CASE_ = 1 # (0 is vertical, 1 is horizontal)
def lowerCamelCase__ ( ) -> None:
"""simple docstring"""
_snake_case , _snake_case : Optional[Any] = get_dataset(a__ , a__)
print('Processing...')
_snake_case , _snake_case , _snake_case : Tuple = update_image_and_anno(a__ , a__ , a__)
for index, image in enumerate(a__):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
_snake_case : List[str] = random_chars(3_2)
_snake_case : List[str] = paths[index].split(os.sep)[-1].rsplit('.' , 1)[0]
_snake_case : Optional[Any] = F"""{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}"""
cva.imwrite(F"""/{file_root}.jpg""" , a__ , [cva.IMWRITE_JPEG_QUALITY, 8_5])
print(F"""Success {index+1}/{len(a__)} with {file_name}""")
_snake_case : str = []
for anno in new_annos[index]:
_snake_case : Union[str, Any] = F"""{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}"""
annos_list.append(a__)
with open(F"""/{file_root}.txt""" , 'w') as outfile:
outfile.write('\n'.join(line for line in annos_list))
def lowerCamelCase__ ( a__ , a__) -> tuple[list, list]:
"""simple docstring"""
_snake_case : Optional[int] = []
_snake_case : int = []
for label_file in glob.glob(os.path.join(a__ , '*.txt')):
_snake_case : List[str] = label_file.split(os.sep)[-1].rsplit('.' , 1)[0]
with open(a__) as in_file:
_snake_case : Union[str, Any] = in_file.readlines()
_snake_case : str = os.path.join(a__ , F"""{label_name}.jpg""")
_snake_case : Dict = []
for obj_list in obj_lists:
_snake_case : Dict = obj_list.rstrip('\n').split(' ')
boxes.append(
[
int(obj[0]),
float(obj[1]),
float(obj[2]),
float(obj[3]),
float(obj[4]),
])
if not boxes:
continue
img_paths.append(a__)
labels.append(a__)
return img_paths, labels
def lowerCamelCase__ ( a__ , a__ , a__ = 1) -> tuple[list, list, list]:
"""simple docstring"""
_snake_case : Dict = []
_snake_case : List[str] = []
_snake_case : Optional[int] = []
for idx in range(len(a__)):
_snake_case : Any = []
_snake_case : str = img_list[idx]
path_list.append(a__)
_snake_case : List[Any] = anno_list[idx]
_snake_case : Union[str, Any] = cva.imread(a__)
if flip_type == 1:
_snake_case : Optional[int] = cva.flip(a__ , a__)
for bbox in img_annos:
_snake_case : List[str] = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]])
elif flip_type == 0:
_snake_case : Tuple = cva.flip(a__ , a__)
for bbox in img_annos:
_snake_case : List[str] = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]])
new_annos_lists.append(a__)
new_imgs_list.append(a__)
return new_imgs_list, new_annos_lists, path_list
def lowerCamelCase__ ( a__ = 3_2) -> str:
"""simple docstring"""
assert number_char > 1, "The number of character should greater than 1"
_snake_case : int = ascii_lowercase + digits
return "".join(random.choice(a__) for _ in range(a__))
if __name__ == "__main__":
main()
print("DONE ✅")
| 517 | 0 |
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def a (_lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = args.pruning_method
SCREAMING_SNAKE_CASE_ = args.threshold
SCREAMING_SNAKE_CASE_ = args.model_name_or_path.rstrip('''/''' )
SCREAMING_SNAKE_CASE_ = args.target_model_path
print(F"Load fine-pruned model from {model_name_or_path}" )
SCREAMING_SNAKE_CASE_ = torch.load(os.path.join(_SCREAMING_SNAKE_CASE , '''pytorch_model.bin''' ) )
SCREAMING_SNAKE_CASE_ = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
SCREAMING_SNAKE_CASE_ = tensor
print(F"Copied layer {name}" )
elif "classifier" in name or "qa_output" in name:
SCREAMING_SNAKE_CASE_ = tensor
print(F"Copied layer {name}" )
elif "bias" in name:
SCREAMING_SNAKE_CASE_ = tensor
print(F"Copied layer {name}" )
else:
if pruning_method == "magnitude":
SCREAMING_SNAKE_CASE_ = MagnitudeBinarizer.apply(inputs=_SCREAMING_SNAKE_CASE , threshold=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = tensor * mask
print(F"Pruned layer {name}" )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
SCREAMING_SNAKE_CASE_ = name[:-6]
SCREAMING_SNAKE_CASE_ = model[F"{prefix_}mask_scores"]
SCREAMING_SNAKE_CASE_ = TopKBinarizer.apply(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = tensor * mask
print(F"Pruned layer {name}" )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
SCREAMING_SNAKE_CASE_ = name[:-6]
SCREAMING_SNAKE_CASE_ = model[F"{prefix_}mask_scores"]
SCREAMING_SNAKE_CASE_ = ThresholdBinarizer.apply(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = tensor * mask
print(F"Pruned layer {name}" )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
SCREAMING_SNAKE_CASE_ = name[:-6]
SCREAMING_SNAKE_CASE_ = model[F"{prefix_}mask_scores"]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = -0.1, 1.1
SCREAMING_SNAKE_CASE_ = torch.sigmoid(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = s * (r - l) + l
SCREAMING_SNAKE_CASE_ = s_bar.clamp(min=0.0 , max=1.0 )
SCREAMING_SNAKE_CASE_ = tensor * mask
print(F"Pruned layer {name}" )
else:
raise ValueError('''Unknown pruning method''' )
if target_model_path is None:
SCREAMING_SNAKE_CASE_ = os.path.join(
os.path.dirname(_SCREAMING_SNAKE_CASE ) , F"bertarized_{os.path.basename(_SCREAMING_SNAKE_CASE )}" )
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
shutil.copytree(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
print(F"\nCreated folder {target_model_path}" )
torch.save(_SCREAMING_SNAKE_CASE , os.path.join(_SCREAMING_SNAKE_CASE , '''pytorch_model.bin''' ) )
print('''\nPruned model saved! See you later!''' )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE =argparse.ArgumentParser()
parser.add_argument(
"""--pruning_method""",
choices=["""l0""", """magnitude""", """topK""", """sigmoied_threshold"""],
type=str,
required=True,
help=(
"""Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,"""
""" sigmoied_threshold = Soft movement pruning)"""
),
)
parser.add_argument(
"""--threshold""",
type=float,
required=False,
help=(
"""For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model."""
"""For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared."""
"""Not needed for `l0`"""
),
)
parser.add_argument(
"""--model_name_or_path""",
type=str,
required=True,
help="""Folder containing the model that was previously fine-pruned""",
)
parser.add_argument(
"""--target_model_path""",
default=None,
type=str,
required=False,
help="""Folder containing the model that was previously fine-pruned""",
)
__SCREAMING_SNAKE_CASE =parser.parse_args()
main(args)
| 706 |
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class __magic_name__ ( __UpperCAmelCase , unittest.TestCase):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = FlaxAutoencoderKL
@property
def _A ( self: Any ):
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = 3
SCREAMING_SNAKE_CASE_ = (32, 32)
SCREAMING_SNAKE_CASE_ = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE_ = jax.random.uniform(_lowerCamelCase , ((batch_size, num_channels) + sizes) )
return {"sample": image, "prng_key": prng_key}
def _A ( self: List[str] ):
SCREAMING_SNAKE_CASE_ = {
'''block_out_channels''': [32, 64],
'''in_channels''': 3,
'''out_channels''': 3,
'''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''],
'''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''],
'''latent_channels''': 4,
}
SCREAMING_SNAKE_CASE_ = self.dummy_input
return init_dict, inputs_dict
| 89 | 0 |
'''simple docstring'''
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
snake_case_ : Dict = logging.get_logger(__name__)
# General docstring
snake_case_ : int = 'RegNetConfig'
# Base docstring
snake_case_ : str = 'facebook/regnet-y-040'
snake_case_ : Tuple = [1, 1_088, 7, 7]
# Image classification docstring
snake_case_ : Tuple = 'facebook/regnet-y-040'
snake_case_ : Tuple = 'tabby, tabby cat'
snake_case_ : Dict = [
'facebook/regnet-y-040',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class lowercase__ ( nn.Module ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = 3 , lowerCamelCase__ = 1 , lowerCamelCase__ = 1 , lowerCamelCase__ = "relu" , ):
'''simple docstring'''
super().__init__()
UpperCamelCase = nn.Convad(
lowerCamelCase__ , lowerCamelCase__ , kernel_size=lowerCamelCase__ , stride=lowerCamelCase__ , padding=kernel_size // 2 , groups=lowerCamelCase__ , bias=lowerCamelCase__ , )
UpperCamelCase = nn.BatchNormad(lowerCamelCase__ )
UpperCamelCase = ACTaFN[activation] if activation is not None else nn.Identity()
def UpperCAmelCase ( self , lowerCamelCase__ ):
'''simple docstring'''
UpperCamelCase = self.convolution(lowerCamelCase__ )
UpperCamelCase = self.normalization(lowerCamelCase__ )
UpperCamelCase = self.activation(lowerCamelCase__ )
return hidden_state
class lowercase__ ( nn.Module ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ ):
'''simple docstring'''
super().__init__()
UpperCamelCase = RegNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act )
UpperCamelCase = config.num_channels
def UpperCAmelCase ( self , lowerCamelCase__ ):
'''simple docstring'''
UpperCamelCase = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' )
UpperCamelCase = self.embedder(lowerCamelCase__ )
return hidden_state
class lowercase__ ( nn.Module ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = 2 ):
'''simple docstring'''
super().__init__()
UpperCamelCase = nn.Convad(lowerCamelCase__ , lowerCamelCase__ , kernel_size=1 , stride=lowerCamelCase__ , bias=lowerCamelCase__ )
UpperCamelCase = nn.BatchNormad(lowerCamelCase__ )
def UpperCAmelCase ( self , lowerCamelCase__ ):
'''simple docstring'''
UpperCamelCase = self.convolution(lowerCamelCase__ )
UpperCamelCase = self.normalization(lowerCamelCase__ )
return hidden_state
class lowercase__ ( nn.Module ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
super().__init__()
UpperCamelCase = nn.AdaptiveAvgPoolad((1, 1) )
UpperCamelCase = nn.Sequential(
nn.Convad(lowerCamelCase__ , lowerCamelCase__ , kernel_size=1 ) , nn.ReLU() , nn.Convad(lowerCamelCase__ , lowerCamelCase__ , kernel_size=1 ) , nn.Sigmoid() , )
def UpperCAmelCase ( self , lowerCamelCase__ ):
'''simple docstring'''
UpperCamelCase = self.pooler(lowerCamelCase__ )
UpperCamelCase = self.attention(lowerCamelCase__ )
UpperCamelCase = hidden_state * attention
return hidden_state
class lowercase__ ( nn.Module ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = 1 ):
'''simple docstring'''
super().__init__()
UpperCamelCase = in_channels != out_channels or stride != 1
UpperCamelCase = max(1 , out_channels // config.groups_width )
UpperCamelCase = (
RegNetShortCut(lowerCamelCase__ , lowerCamelCase__ , stride=lowerCamelCase__ ) if should_apply_shortcut else nn.Identity()
)
UpperCamelCase = nn.Sequential(
RegNetConvLayer(lowerCamelCase__ , lowerCamelCase__ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(lowerCamelCase__ , lowerCamelCase__ , stride=lowerCamelCase__ , groups=lowerCamelCase__ , activation=config.hidden_act ) , RegNetConvLayer(lowerCamelCase__ , lowerCamelCase__ , kernel_size=1 , activation=lowerCamelCase__ ) , )
UpperCamelCase = ACTaFN[config.hidden_act]
def UpperCAmelCase ( self , lowerCamelCase__ ):
'''simple docstring'''
UpperCamelCase = hidden_state
UpperCamelCase = self.layer(lowerCamelCase__ )
UpperCamelCase = self.shortcut(lowerCamelCase__ )
hidden_state += residual
UpperCamelCase = self.activation(lowerCamelCase__ )
return hidden_state
class lowercase__ ( nn.Module ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = 1 ):
'''simple docstring'''
super().__init__()
UpperCamelCase = in_channels != out_channels or stride != 1
UpperCamelCase = max(1 , out_channels // config.groups_width )
UpperCamelCase = (
RegNetShortCut(lowerCamelCase__ , lowerCamelCase__ , stride=lowerCamelCase__ ) if should_apply_shortcut else nn.Identity()
)
UpperCamelCase = nn.Sequential(
RegNetConvLayer(lowerCamelCase__ , lowerCamelCase__ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(lowerCamelCase__ , lowerCamelCase__ , stride=lowerCamelCase__ , groups=lowerCamelCase__ , activation=config.hidden_act ) , RegNetSELayer(lowerCamelCase__ , reduced_channels=int(round(in_channels / 4 ) ) ) , RegNetConvLayer(lowerCamelCase__ , lowerCamelCase__ , kernel_size=1 , activation=lowerCamelCase__ ) , )
UpperCamelCase = ACTaFN[config.hidden_act]
def UpperCAmelCase ( self , lowerCamelCase__ ):
'''simple docstring'''
UpperCamelCase = hidden_state
UpperCamelCase = self.layer(lowerCamelCase__ )
UpperCamelCase = self.shortcut(lowerCamelCase__ )
hidden_state += residual
UpperCamelCase = self.activation(lowerCamelCase__ )
return hidden_state
class lowercase__ ( nn.Module ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = 2 , lowerCamelCase__ = 2 , ):
'''simple docstring'''
super().__init__()
UpperCamelCase = RegNetXLayer if config.layer_type == '''x''' else RegNetYLayer
UpperCamelCase = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , stride=lowerCamelCase__ , ) , *[layer(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) for _ in range(depth - 1 )] , )
def UpperCAmelCase ( self , lowerCamelCase__ ):
'''simple docstring'''
UpperCamelCase = self.layers(lowerCamelCase__ )
return hidden_state
class lowercase__ ( nn.Module ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ ):
'''simple docstring'''
super().__init__()
UpperCamelCase = nn.ModuleList([] )
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
lowerCamelCase__ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
UpperCamelCase = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(lowerCamelCase__ , config.depths[1:] ):
self.stages.append(RegNetStage(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , depth=lowerCamelCase__ ) )
def UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ = False , lowerCamelCase__ = True ):
'''simple docstring'''
UpperCamelCase = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
UpperCamelCase = hidden_states + (hidden_state,)
UpperCamelCase = stage_module(lowerCamelCase__ )
if output_hidden_states:
UpperCamelCase = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=lowerCamelCase__ , hidden_states=lowerCamelCase__ )
class lowercase__ ( snake_case_ ):
'''simple docstring'''
_snake_case = RegNetConfig
_snake_case = '''regnet'''
_snake_case = '''pixel_values'''
_snake_case = True
def UpperCAmelCase ( self , lowerCamelCase__ ):
'''simple docstring'''
if isinstance(lowerCamelCase__ , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='''fan_out''' , nonlinearity='''relu''' )
elif isinstance(lowerCamelCase__ , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__=False ):
'''simple docstring'''
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
UpperCamelCase = value
snake_case_ : Dict = r'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
snake_case_ : Dict = r'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
'''The bare RegNet model outputting raw features without any specific head on top.''', snake_case_, )
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class lowercase__ ( snake_case_ ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ ):
'''simple docstring'''
super().__init__(lowerCamelCase__ )
UpperCamelCase = config
UpperCamelCase = RegNetEmbeddings(lowerCamelCase__ )
UpperCamelCase = RegNetEncoder(lowerCamelCase__ )
UpperCamelCase = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCamelCase__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowerCamelCase__ , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None ):
'''simple docstring'''
UpperCamelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict
UpperCamelCase = self.embedder(lowerCamelCase__ )
UpperCamelCase = self.encoder(
lowerCamelCase__ , output_hidden_states=lowerCamelCase__ , return_dict=lowerCamelCase__ )
UpperCamelCase = encoder_outputs[0]
UpperCamelCase = self.pooler(lowerCamelCase__ )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowerCamelCase__ , pooler_output=lowerCamelCase__ , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
'''
RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''', snake_case_, )
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class lowercase__ ( snake_case_ ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ ):
'''simple docstring'''
super().__init__(lowerCamelCase__ )
UpperCamelCase = config.num_labels
UpperCamelCase = RegNetModel(lowerCamelCase__ )
# classification head
UpperCamelCase = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCamelCase__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowerCamelCase__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def UpperCAmelCase ( self , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , ):
'''simple docstring'''
UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict
UpperCamelCase = self.regnet(lowerCamelCase__ , output_hidden_states=lowerCamelCase__ , return_dict=lowerCamelCase__ )
UpperCamelCase = outputs.pooler_output if return_dict else outputs[1]
UpperCamelCase = self.classifier(lowerCamelCase__ )
UpperCamelCase = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
UpperCamelCase = '''regression'''
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
UpperCamelCase = '''single_label_classification'''
else:
UpperCamelCase = '''multi_label_classification'''
if self.config.problem_type == "regression":
UpperCamelCase = MSELoss()
if self.num_labels == 1:
UpperCamelCase = loss_fct(logits.squeeze() , labels.squeeze() )
else:
UpperCamelCase = loss_fct(lowerCamelCase__ , lowerCamelCase__ )
elif self.config.problem_type == "single_label_classification":
UpperCamelCase = CrossEntropyLoss()
UpperCamelCase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
UpperCamelCase = BCEWithLogitsLoss()
UpperCamelCase = loss_fct(lowerCamelCase__ , lowerCamelCase__ )
if not return_dict:
UpperCamelCase = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=lowerCamelCase__ , logits=lowerCamelCase__ , hidden_states=outputs.hidden_states )
| 212 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from packaging import version
from .. import __version__
from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD
from .doc import (
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
copy_func,
replace_return_docstrings,
)
from .generic import (
ContextManagers,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
TensorType,
add_model_info_to_auto_map,
cached_property,
can_return_loss,
expand_dims,
find_labels,
flatten_dict,
infer_framework,
is_jax_tensor,
is_numpy_array,
is_tensor,
is_tf_symbolic_tensor,
is_tf_tensor,
is_torch_device,
is_torch_dtype,
is_torch_tensor,
reshape,
squeeze,
strtobool,
tensor_size,
to_numpy,
to_py_obj,
transpose,
working_or_temp_dir,
)
from .hub import (
CLOUDFRONT_DISTRIB_PREFIX,
DISABLE_TELEMETRY,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
EntryNotFoundError,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
cached_file,
default_cache_path,
define_sagemaker_information,
download_url,
extract_commit_hash,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
has_file,
http_user_agent,
is_offline_mode,
is_remote_url,
move_cache,
send_example_telemetry,
try_to_load_from_cache,
)
from .import_utils import (
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
TORCH_FX_REQUIRED_VERSION,
USE_JAX,
USE_TF,
USE_TORCH,
DummyObject,
OptionalDependencyNotAvailable,
_LazyModule,
ccl_version,
direct_transformers_import,
get_torch_version,
is_accelerate_available,
is_apex_available,
is_bitsandbytes_available,
is_bsa_available,
is_coloredlogs_available,
is_cython_available,
is_datasets_available,
is_decord_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_jieba_available,
is_jumanpp_available,
is_kenlm_available,
is_keras_nlp_available,
is_librosa_available,
is_natten_available,
is_ninja_available,
is_onnx_available,
is_openai_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_sudachi_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_bfaa_cpu_available,
is_torch_bfaa_gpu_available,
is_torch_compile_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_neuroncore_available,
is_torch_tensorrt_fx_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_torchdistx_available,
is_torchdynamo_available,
is_torchvision_available,
is_training_run_on_sagemaker,
is_vision_available,
requires_backends,
torch_only_method,
)
snake_case_ : Optional[Any] = 'pytorch_model.bin'
snake_case_ : Union[str, Any] = 'pytorch_model.bin.index.json'
snake_case_ : Optional[Any] = 'adapter_config.json'
snake_case_ : List[str] = 'adapter_model.bin'
snake_case_ : Any = 'adapter_model.safetensors'
snake_case_ : Optional[int] = 'tf_model.h5'
snake_case_ : List[Any] = 'tf_model.h5.index.json'
snake_case_ : Any = 'model.ckpt'
snake_case_ : Optional[Any] = 'flax_model.msgpack'
snake_case_ : List[str] = 'flax_model.msgpack.index.json'
snake_case_ : List[str] = 'model.safetensors'
snake_case_ : Any = 'model.safetensors.index.json'
snake_case_ : Any = 'config.json'
snake_case_ : Optional[Any] = 'preprocessor_config.json'
snake_case_ : List[Any] = FEATURE_EXTRACTOR_NAME
snake_case_ : Optional[int] = 'generation_config.json'
snake_case_ : Any = 'modelcard.json'
snake_case_ : Optional[int] = '▁'
snake_case_ : Optional[Any] = SENTENCEPIECE_UNDERLINE # Kept for backward compatibility
snake_case_ : Union[str, Any] = [
[[0, 1, 0, 1], [1, 0, 0, 1]]
] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too.
snake_case_ : Union[str, Any] = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]
snake_case_ : Union[str, Any] = [[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]]
def __snake_case ( _UpperCAmelCase : int):
if version.parse(_UpperCAmelCase) < version.parse(_UpperCAmelCase):
if "dev" in min_version:
UpperCamelCase = (
'''This example requires a source install from HuggingFace Transformers (see '''
'''`https://huggingface.co/docs/transformers/installation#install-from-source`),'''
)
else:
UpperCamelCase = f'This example requires a minimum version of {min_version},'
error_message += f' but the version found is {__version__}.\n'
raise ImportError(
error_message
+ '''Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other '''
'''versions of HuggingFace Transformers.''')
| 212 | 1 |
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
__A : Optional[Any] = logging.get_logger(__name__)
__A : List[str] = [
("bert.bert", "visual_bert"),
("bert.cls", "cls"),
("bert.classifier", "cls"),
("token_type_embeddings_visual", "visual_token_type_embeddings"),
("position_embeddings_visual", "visual_position_embeddings"),
("projection", "visual_projection"),
]
__A : Dict = [
"nlvr2_coco_pre_trained.th",
"nlvr2_fine_tuned.th",
"nlvr2_pre_trained.th",
"vcr_coco_pre_train.th",
"vcr_fine_tune.th",
"vcr_pre_train.th",
"vqa_coco_pre_trained.th",
"vqa_fine_tuned.th",
"vqa_pre_trained.th",
]
def UpperCAmelCase ( lowerCamelCase_ :Dict ):
'''simple docstring'''
snake_case_ : int = torch.load(lowerCamelCase_ , map_location="""cpu""" )
return sd
def UpperCAmelCase ( lowerCamelCase_ :List[Any] , lowerCamelCase_ :Tuple , lowerCamelCase_ :Tuple=rename_keys_prefix ):
'''simple docstring'''
snake_case_ : Dict = OrderedDict()
snake_case_ : Tuple = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
snake_case_ : Optional[Any] = key
for name_pair in rename_keys_prefix:
snake_case_ : Optional[Any] = new_key.replace(name_pair[0] , name_pair[1] )
snake_case_ : int = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
snake_case_ : List[Any] = new_d['cls.predictions.bias']
return new_d
@torch.no_grad()
def UpperCAmelCase ( lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :Any ):
'''simple docstring'''
assert (
checkpoint_path.split("""/""" )[-1] in ACCEPTABLE_CHECKPOINTS
), F'''The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'''
# Get Config
if "pre" in checkpoint_path:
snake_case_ : Any = 'pretraining'
if "vcr" in checkpoint_path:
snake_case_ : Dict = {'visual_embedding_dim': 5_12}
elif "vqa_advanced" in checkpoint_path:
snake_case_ : Tuple = {'visual_embedding_dim': 20_48}
elif "vqa" in checkpoint_path:
snake_case_ : List[Any] = {'visual_embedding_dim': 20_48}
elif "nlvr" in checkpoint_path:
snake_case_ : Optional[int] = {'visual_embedding_dim': 10_24}
else:
raise NotImplementedError(F'''No implementation found for `{checkpoint_path}`.''' )
else:
if "vcr" in checkpoint_path:
snake_case_ : Tuple = {'visual_embedding_dim': 5_12}
snake_case_ : List[Any] = 'multichoice'
elif "vqa_advanced" in checkpoint_path:
snake_case_ : Tuple = {'visual_embedding_dim': 20_48}
snake_case_ : str = 'vqa_advanced'
elif "vqa" in checkpoint_path:
snake_case_ : List[str] = {'visual_embedding_dim': 20_48, 'num_labels': 31_29}
snake_case_ : Optional[Any] = 'vqa'
elif "nlvr" in checkpoint_path:
snake_case_ : int = {
'visual_embedding_dim': 10_24,
'num_labels': 2,
}
snake_case_ : List[Any] = 'nlvr'
snake_case_ : Any = VisualBertConfig(**lowerCamelCase_ )
# Load State Dict
snake_case_ : Any = load_state_dict(lowerCamelCase_ )
snake_case_ : Optional[Any] = get_new_dict(lowerCamelCase_ , lowerCamelCase_ )
if model_type == "pretraining":
snake_case_ : Union[str, Any] = VisualBertForPreTraining(lowerCamelCase_ )
elif model_type == "vqa":
snake_case_ : int = VisualBertForQuestionAnswering(lowerCamelCase_ )
elif model_type == "nlvr":
snake_case_ : Union[str, Any] = VisualBertForVisualReasoning(lowerCamelCase_ )
elif model_type == "multichoice":
snake_case_ : List[Any] = VisualBertForMultipleChoice(lowerCamelCase_ )
model.load_state_dict(lowerCamelCase_ )
# Save Checkpoints
Path(lowerCamelCase_ ).mkdir(exist_ok=lowerCamelCase_ )
model.save_pretrained(lowerCamelCase_ )
if __name__ == "__main__":
__A : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('orig_checkpoint_path', type=str, help='A path to .th on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', type=str, help='Path to the output PyTorch model.')
__A : Union[str, Any] = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path) | 707 |
'''simple docstring'''
import random
from typing import Any
def UpperCAmelCase ( lowerCamelCase_ :list ):
'''simple docstring'''
for _ in range(len(lowerCamelCase_ ) ):
snake_case_ : Union[str, Any] = random.randint(0 , len(lowerCamelCase_ ) - 1 )
snake_case_ : Any = random.randint(0 , len(lowerCamelCase_ ) - 1 )
snake_case_ , snake_case_ : List[str] = data[b], data[a]
return data
if __name__ == "__main__":
__A : Optional[int] = [0, 1, 2, 3, 4, 5, 6, 7]
__A : Optional[int] = ['python', 'says', 'hello', '!']
print('Fisher-Yates Shuffle:')
print('List', integers, strings)
print('FY Shuffle', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings)) | 267 | 0 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase : str = logging.get_logger(__name__)
def UpperCamelCase_ ( __a , __a=False ) -> Dict:
a__ : Dict = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''deit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''deit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''deit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''deit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''deit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''deit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''deit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''deit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''deit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''deit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "deit.embeddings.cls_token"),
("dist_token", "deit.embeddings.distillation_token"),
("patch_embed.proj.weight", "deit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "deit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "deit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
a__ : List[str] = [(pair[0], pair[1][4:]) if pair[1].startswith("deit" ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
("norm.weight", "deit.layernorm.weight"),
("norm.bias", "deit.layernorm.bias"),
("head.weight", "cls_classifier.weight"),
("head.bias", "cls_classifier.bias"),
("head_dist.weight", "distillation_classifier.weight"),
("head_dist.bias", "distillation_classifier.bias"),
] )
return rename_keys
def UpperCamelCase_ ( __a , __a , __a=False ) -> Optional[int]:
for i in range(config.num_hidden_layers ):
if base_model:
a__ : Optional[Any] = ""
else:
a__ : List[Any] = "deit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
a__ : List[Any] = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' )
a__ : Union[str, Any] = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
a__ : List[str] = in_proj_weight[
: config.hidden_size, :
]
a__ : List[Any] = in_proj_bias[: config.hidden_size]
a__ : Optional[int] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
a__ : Optional[int] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
a__ : Tuple = in_proj_weight[
-config.hidden_size :, :
]
a__ : Union[str, Any] = in_proj_bias[-config.hidden_size :]
def UpperCamelCase_ ( __a , __a , __a ) -> int:
a__ : List[str] = dct.pop(__a )
a__ : List[Any] = val
def UpperCamelCase_ ( ) -> Optional[Any]:
a__ : str = "http://images.cocodataset.org/val2017/000000039769.jpg"
a__ : Dict = Image.open(requests.get(__a , stream=__a ).raw )
return im
@torch.no_grad()
def UpperCamelCase_ ( __a , __a ) -> List[str]:
a__ : str = DeiTConfig()
# all deit models have fine-tuned heads
a__ : Tuple = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
a__ : Optional[Any] = 1_000
a__ : List[str] = "huggingface/label-files"
a__ : int = "imagenet-1k-id2label.json"
a__ : List[str] = json.load(open(hf_hub_download(__a , __a , repo_type="dataset" ) , "r" ) )
a__ : Union[str, Any] = {int(__a ): v for k, v in idalabel.items()}
a__ : Optional[Any] = idalabel
a__ : List[str] = {v: k for k, v in idalabel.items()}
a__ : Optional[Any] = int(deit_name[-6:-4] )
a__ : Tuple = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith("tiny" ):
a__ : Dict = 192
a__ : Union[str, Any] = 768
a__ : List[str] = 12
a__ : Union[str, Any] = 3
elif deit_name[9:].startswith("small" ):
a__ : int = 384
a__ : int = 1_536
a__ : Optional[int] = 12
a__ : int = 6
if deit_name[9:].startswith("base" ):
pass
elif deit_name[4:].startswith("large" ):
a__ : List[str] = 1_024
a__ : List[str] = 4_096
a__ : int = 24
a__ : Any = 16
# load original model from timm
a__ : List[Any] = timm.create_model(__a , pretrained=__a )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
a__ : Dict = timm_model.state_dict()
a__ : Optional[Any] = create_rename_keys(__a , __a )
for src, dest in rename_keys:
rename_key(__a , __a , __a )
read_in_q_k_v(__a , __a , __a )
# load HuggingFace model
a__ : Optional[int] = DeiTForImageClassificationWithTeacher(__a ).eval()
model.load_state_dict(__a )
# Check outputs on an image, prepared by DeiTImageProcessor
a__ : int = int(
(256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
a__ : Tuple = DeiTImageProcessor(size=__a , crop_size=config.image_size )
a__ : Optional[int] = image_processor(images=prepare_img() , return_tensors="pt" )
a__ : Optional[int] = encoding["pixel_values"]
a__ : Any = model(__a )
a__ : List[str] = timm_model(__a )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__a , outputs.logits , atol=1e-3 )
Path(__a ).mkdir(exist_ok=__a )
print(f'''Saving model {deit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__a )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__a )
if __name__ == "__main__":
UpperCamelCase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--deit_name""",
default="""vit_deit_base_distilled_patch16_224""",
type=str,
help="""Name of the DeiT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
UpperCamelCase : Optional[int] = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 37 |
'''simple docstring'''
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class a__:
a_ : CommonSchedulerState
# setable values
a_ : jnp.ndarray
a_ : jnp.ndarray
a_ : Optional[int] = None
@classmethod
def _lowercase ( cls , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Dict:
return cls(common=_UpperCAmelCase , init_noise_sigma=_UpperCAmelCase , timesteps=_UpperCAmelCase )
@dataclass
class a__( snake_case__ ):
a_ : DDPMSchedulerState
class a__( snake_case__ , snake_case__ ):
a_ : Union[str, Any] = [e.name for e in FlaxKarrasDiffusionSchedulers]
a_ : jnp.dtype
@property
def _lowercase ( self ) -> Union[str, Any]:
return True
@register_to_config
def __init__( self , _UpperCAmelCase = 1000 , _UpperCAmelCase = 0.0_001 , _UpperCAmelCase = 0.02 , _UpperCAmelCase = "linear" , _UpperCAmelCase = None , _UpperCAmelCase = "fixed_small" , _UpperCAmelCase = True , _UpperCAmelCase = "epsilon" , _UpperCAmelCase = jnp.floataa , ) -> Union[str, Any]:
snake_case__ =dtype
def _lowercase ( self , _UpperCAmelCase = None ) -> DDPMSchedulerState:
if common is None:
snake_case__ =CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
snake_case__ =jnp.array(1.0 , dtype=self.dtype )
snake_case__ =jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=_UpperCAmelCase , init_noise_sigma=_UpperCAmelCase , timesteps=_UpperCAmelCase , )
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None ) -> jnp.ndarray:
return sample
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = () ) -> DDPMSchedulerState:
snake_case__ =self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
snake_case__ =(jnp.arange(0 , _UpperCAmelCase ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=_UpperCAmelCase , timesteps=_UpperCAmelCase , )
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None ) -> Optional[Any]:
snake_case__ =state.common.alphas_cumprod[t]
snake_case__ =jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
snake_case__ =(1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
snake_case__ =self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
snake_case__ =jnp.clip(_UpperCAmelCase , a_min=1E-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
snake_case__ =jnp.log(jnp.clip(_UpperCAmelCase , a_min=1E-20 ) )
elif variance_type == "fixed_large":
snake_case__ =state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
snake_case__ =jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
snake_case__ =variance
snake_case__ =state.common.betas[t]
snake_case__ =(predicted_variance + 1) / 2
snake_case__ =frac * max_log + (1 - frac) * min_log
return variance
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = True , ) -> Union[FlaxDDPMSchedulerOutput, Tuple]:
snake_case__ =timestep
if key is None:
snake_case__ =jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
snake_case__ , snake_case__ =jnp.split(_UpperCAmelCase , sample.shape[1] , axis=1 )
else:
snake_case__ =None
# 1. compute alphas, betas
snake_case__ =state.common.alphas_cumprod[t]
snake_case__ =jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
snake_case__ =1 - alpha_prod_t
snake_case__ =1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
snake_case__ =(sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
snake_case__ =model_output
elif self.config.prediction_type == "v_prediction":
snake_case__ =(alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` """
' for the FlaxDDPMScheduler.' )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
snake_case__ =jnp.clip(_UpperCAmelCase , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
snake_case__ =(alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
snake_case__ =state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
snake_case__ =pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
snake_case__ =jax.random.split(_UpperCAmelCase , num=1 )
snake_case__ =jax.random.normal(_UpperCAmelCase , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(_UpperCAmelCase , _UpperCAmelCase , predicted_variance=_UpperCAmelCase ) ** 0.5) * noise
snake_case__ =jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
snake_case__ =pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=_UpperCAmelCase , state=_UpperCAmelCase )
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ) -> jnp.ndarray:
return add_noise_common(state.common , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ) -> jnp.ndarray:
return get_velocity_common(state.common , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def __len__( self ) -> Optional[int]:
return self.config.num_train_timesteps
| 538 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCamelCase : Optional[int] = {
"""configuration_deberta""": ["""DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DebertaConfig""", """DebertaOnnxConfig"""],
"""tokenization_deberta""": ["""DebertaTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Dict = ["""DebertaTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Tuple = [
"""DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DebertaForMaskedLM""",
"""DebertaForQuestionAnswering""",
"""DebertaForSequenceClassification""",
"""DebertaForTokenClassification""",
"""DebertaModel""",
"""DebertaPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[Any] = [
"""TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFDebertaForMaskedLM""",
"""TFDebertaForQuestionAnswering""",
"""TFDebertaForSequenceClassification""",
"""TFDebertaForTokenClassification""",
"""TFDebertaModel""",
"""TFDebertaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
__UpperCamelCase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 720 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCamelCase : Dict = logging.get_logger(__name__)
__UpperCamelCase : Optional[int] = {
"""facebook/xmod-base""": """https://huggingface.co/facebook/xmod-base/resolve/main/config.json""",
"""facebook/xmod-large-prenorm""": """https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json""",
"""facebook/xmod-base-13-125k""": """https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json""",
"""facebook/xmod-base-30-125k""": """https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json""",
"""facebook/xmod-base-30-195k""": """https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json""",
"""facebook/xmod-base-60-125k""": """https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json""",
"""facebook/xmod-base-60-265k""": """https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json""",
"""facebook/xmod-base-75-125k""": """https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json""",
"""facebook/xmod-base-75-269k""": """https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json""",
}
class __SCREAMING_SNAKE_CASE ( _lowerCAmelCase ):
__a ="xmod"
def __init__( self , lowerCamelCase=3_0522 , lowerCamelCase=768 , lowerCamelCase=12 , lowerCamelCase=12 , lowerCamelCase=3072 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=512 , lowerCamelCase=2 , lowerCamelCase=0.02 , lowerCamelCase=1e-12 , lowerCamelCase=1 , lowerCamelCase=0 , lowerCamelCase=2 , lowerCamelCase="absolute" , lowerCamelCase=True , lowerCamelCase=None , lowerCamelCase=False , lowerCamelCase=2 , lowerCamelCase=False , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=("en_XX",) , lowerCamelCase=None , **lowerCamelCase , ) ->Any:
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase , bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , **lowerCamelCase )
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = hidden_act
__a = intermediate_size
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = initializer_range
__a = layer_norm_eps
__a = position_embedding_type
__a = use_cache
__a = classifier_dropout
__a = pre_norm
__a = adapter_reduction_factor
__a = adapter_layer_norm
__a = adapter_reuse_layer_norm
__a = ln_before_adapter
__a = list(lowerCamelCase )
__a = default_language
class __SCREAMING_SNAKE_CASE ( _lowerCAmelCase ):
@property
def __UpperCamelCase ( self ) ->Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
__a = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__a = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] ) | 270 | 0 |
"""simple docstring"""
A = {
"joule": 1.0,
"kilojoule": 1_000,
"megajoule": 1_000_000,
"gigajoule": 1_000_000_000,
"wattsecond": 1.0,
"watthour": 3_600,
"kilowatthour": 3_600_000,
"newtonmeter": 1.0,
"calorie_nutr": 4_186.8,
"kilocalorie_nutr": 4_186_800.00,
"electronvolt": 1.6_0_2_1_7_6_6_3_4e-1_9,
"britishthermalunit_it": 1_055.05_585,
"footpound": 1.355818,
}
def __A ( a_ :str , a_ :str , a_ :float) -> float:
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
__a : Dict = (
F"""Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n"""
F"""Valid values are: {", ".join(a_)}"""
)
raise ValueError(a_)
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod() | 52 |
"""simple docstring"""
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__)
@add_end_docstrings(_lowerCamelCase )
class snake_case_ ( _lowerCamelCase ):
"""simple docstring"""
def __init__( self , *__a , **__a ):
"""simple docstring"""
super().__init__(*__a , **__a )
self.check_model_type(__a )
def _UpperCAmelCase ( self , __a=None , __a=None , __a=None , **__a ):
"""simple docstring"""
A__ , A__ = {}, {}
if padding is not None:
A__ = padding
if truncation is not None:
A__ = truncation
if top_k is not None:
A__ = top_k
return preprocess_params, {}, postprocess_params
def __call__( self , __a , __a = None , **__a ):
"""simple docstring"""
if isinstance(__a , (Image.Image, str) ) and isinstance(__a , __a ):
A__ = {'image': image, 'question': question}
else:
A__ = image
A__ = super().__call__(__a , **__a )
return results
def _UpperCAmelCase ( self , __a , __a=False , __a=False ):
"""simple docstring"""
A__ = load_image(inputs['image'] )
A__ = self.tokenizer(
inputs['question'] , return_tensors=self.framework , padding=__a , truncation=__a )
A__ = self.image_processor(images=__a , return_tensors=self.framework )
model_inputs.update(__a )
return model_inputs
def _UpperCAmelCase ( self , __a ):
"""simple docstring"""
A__ = self.model(**__a )
return model_outputs
def _UpperCAmelCase ( self , __a , __a=5 ):
"""simple docstring"""
if top_k > self.model.config.num_labels:
A__ = self.model.config.num_labels
if self.framework == "pt":
A__ = model_outputs.logits.sigmoid()[0]
A__ , A__ = probs.topk(__a )
else:
raise ValueError(f'''Unsupported framework: {self.framework}''' )
A__ = scores.tolist()
A__ = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(__a , __a )]
| 260 | 0 |
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
lowercase_ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
def __init__( self : Dict , *a : Dict , **a : int )-> None:
"""simple docstring"""
warnings.warn(
'The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use OwlViTImageProcessor instead.' , a , )
super().__init__(*a , **a )
| 707 |
from __future__ import annotations
import math
from collections.abc import Callable
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 100 , ) -> float:
lowercase__ = x_start
lowercase__ = fnc(_SCREAMING_SNAKE_CASE )
lowercase__ = 0.0
for _ in range(_SCREAMING_SNAKE_CASE ):
# Approximates curve as a sequence of linear lines and sums their length
lowercase__ = (x_end - x_start) / steps + xa
lowercase__ = fnc(_SCREAMING_SNAKE_CASE )
length += math.hypot(xa - xa , fxa - fxa )
# Increment step
lowercase__ = xa
lowercase__ = fxa
return length
if __name__ == "__main__":
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> str:
return math.sin(10 * x )
print("""f(x) = sin(10 * x)""")
print("""The length of the curve from x = -10 to x = 10 is:""")
lowercase_ = 10
while i <= 100_000:
print(f'''With {i} steps: {line_length(f, -10, 10, i)}''')
i *= 10
| 45 | 0 |
def snake_case (UpperCAmelCase__ = 1_0_0_0 ) -> int:
UpperCamelCase_: Dict = 2**power
UpperCamelCase_: List[str] = 0
while n:
UpperCamelCase_ ,UpperCamelCase_: str = r + n % 1_0, n // 1_0
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip()))) | 57 |
'''simple docstring'''
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
a__ : int ='''\
@inproceedings{lin-2004-rouge,
title = "{ROUGE}: A Package for Automatic Evaluation of Summaries",
author = "Lin, Chin-Yew",
booktitle = "Text Summarization Branches Out",
month = jul,
year = "2004",
address = "Barcelona, Spain",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W04-1013",
pages = "74--81",
}
'''
a__ : str ='''\
ROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for
evaluating automatic summarization and machine translation software in natural language processing.
The metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.
Note that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.
This metrics is a wrapper around Google Research reimplementation of ROUGE:
https://github.com/google-research/google-research/tree/master/rouge
'''
a__ : Any ='''
Calculates average rouge scores for a list of hypotheses and references
Args:
predictions: list of predictions to score. Each prediction
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
rouge_types: A list of rouge types to calculate.
Valid names:
`"rouge{n}"` (e.g. `"rouge1"`, `"rouge2"`) where: {n} is the n-gram based scoring,
`"rougeL"`: Longest common subsequence based scoring.
`"rougeLSum"`: rougeLsum splits text using `"\n"`.
See details in https://github.com/huggingface/datasets/issues/617
use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.
use_aggregator: Return aggregates if this is set to True
Returns:
rouge1: rouge_1 (precision, recall, f1),
rouge2: rouge_2 (precision, recall, f1),
rougeL: rouge_l (precision, recall, f1),
rougeLsum: rouge_lsum (precision, recall, f1)
Examples:
>>> rouge = datasets.load_metric(\'rouge\')
>>> predictions = ["hello there", "general kenobi"]
>>> references = ["hello there", "general kenobi"]
>>> results = rouge.compute(predictions=predictions, references=references)
>>> print(list(results.keys()))
[\'rouge1\', \'rouge2\', \'rougeL\', \'rougeLsum\']
>>> print(results["rouge1"])
AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))
>>> print(results["rouge1"].mid.fmeasure)
1.0
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case ( datasets.Metric ):
"""simple docstring"""
def _lowerCamelCase ( self : Union[str, Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/google-research/google-research/tree/master/rouge'] , reference_urls=[
'https://en.wikipedia.org/wiki/ROUGE_(metric)',
'https://github.com/google-research/google-research/tree/master/rouge',
] , )
def _lowerCamelCase ( self : List[Any] , __A : Union[str, Any] , __A : str , __A : List[str]=None , __A : List[str]=True , __A : Optional[Any]=False ):
if rouge_types is None:
__UpperCamelCase = ['rouge1', 'rouge2', 'rougeL', 'rougeLsum']
__UpperCamelCase = rouge_scorer.RougeScorer(rouge_types=__A , use_stemmer=__A )
if use_aggregator:
__UpperCamelCase = scoring.BootstrapAggregator()
else:
__UpperCamelCase = []
for ref, pred in zip(__A , __A ):
__UpperCamelCase = scorer.score(__A , __A )
if use_aggregator:
aggregator.add_scores(__A )
else:
scores.append(__A )
if use_aggregator:
__UpperCamelCase = aggregator.aggregate()
else:
__UpperCamelCase = {}
for key in scores[0]:
__UpperCamelCase = [score[key] for score in scores]
return result
| 399 | 0 |
'''simple docstring'''
# Function to print upper half of diamond (pyramid)
def lowercase_ ( _lowercase ) -> Optional[Any]:
'''simple docstring'''
for i in range(0 , _lowercase ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(''' ''' , end='''''' )
for _ in range(0 , i + 1 ): # printing stars
print('''* ''' , end='''''' )
print()
def lowercase_ ( _lowercase ) -> Dict:
'''simple docstring'''
for i in range(_lowercase , 0 , -1 ):
for _ in range(_lowercase , 0 , -1 ): # printing stars
print('''* ''' , end='''''' )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(''' ''' , end='''''' )
def lowercase_ ( _lowercase ) -> Tuple:
'''simple docstring'''
if n <= 0:
print(''' ... .... nothing printing :(''' )
return
floyd(_lowercase ) # upper half
reverse_floyd(_lowercase ) # lower half
if __name__ == "__main__":
print(r'''| /\ | |- | |- |--| |\ /| |-''')
print(r'''|/ \| |- |_ |_ |__| | \/ | |_''')
__lowercase : str = 1
while K:
__lowercase : Any = int(input('''enter the number and , and see the magic : '''))
print()
pretty_print(user_number)
__lowercase : Optional[int] = int(input('''press 0 to exit... and 1 to continue...'''))
print('''Good Bye...''')
| 357 |
'''simple docstring'''
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class __lowercase :
def __init__(self , A , ):
lowerCamelCase_ : int = parent
lowerCamelCase_ : Dict = 1_3
lowerCamelCase_ : Any = 7
lowerCamelCase_ : Dict = 3_0
lowerCamelCase_ : Optional[Any] = self.seq_length + self.mem_len
lowerCamelCase_ : Tuple = 1_5
lowerCamelCase_ : Tuple = True
lowerCamelCase_ : List[Any] = True
lowerCamelCase_ : Dict = 9_9
lowerCamelCase_ : Any = [1_0, 5_0, 8_0]
lowerCamelCase_ : List[str] = 3_2
lowerCamelCase_ : Tuple = 3_2
lowerCamelCase_ : Optional[Any] = 4
lowerCamelCase_ : Union[str, Any] = 8
lowerCamelCase_ : Dict = 1_2_8
lowerCamelCase_ : Dict = 2
lowerCamelCase_ : Tuple = 2
lowerCamelCase_ : Any = None
lowerCamelCase_ : Optional[Any] = 1
lowerCamelCase_ : Union[str, Any] = 0
lowerCamelCase_ : str = 3
lowerCamelCase_ : Dict = self.vocab_size - 1
lowerCamelCase_ : Dict = 0.01
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ : Union[str, Any] = None
if self.use_labels:
lowerCamelCase_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ : str = TransfoXLConfig(
vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , )
return (config, input_ids_a, input_ids_a, lm_labels)
def UpperCAmelCase__ (self ):
random.seed(self.seed )
tf.random.set_seed(self.seed )
def UpperCAmelCase__ (self , A , A , A , A ):
lowerCamelCase_ : int = TFTransfoXLModel(A )
lowerCamelCase_, lowerCamelCase_ : Tuple = model(A ).to_tuple()
lowerCamelCase_ : Union[str, Any] = {'''input_ids''': input_ids_a, '''mems''': mems_a}
lowerCamelCase_, lowerCamelCase_ : Any = model(A ).to_tuple()
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def UpperCAmelCase__ (self , A , A , A , A ):
lowerCamelCase_ : List[Any] = TFTransfoXLLMHeadModel(A )
lowerCamelCase_, lowerCamelCase_ : List[Any] = model(A ).to_tuple()
lowerCamelCase_ : Optional[int] = {'''input_ids''': input_ids_a, '''labels''': lm_labels}
lowerCamelCase_, lowerCamelCase_ : Union[str, Any] = model(A ).to_tuple()
lowerCamelCase_, lowerCamelCase_ : Tuple = model([input_ids_a, mems_a] ).to_tuple()
lowerCamelCase_ : List[str] = {'''input_ids''': input_ids_a, '''mems''': mems_a, '''labels''': lm_labels}
lowerCamelCase_, lowerCamelCase_ : str = model(A ).to_tuple()
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def UpperCAmelCase__ (self , A , A , A , A ):
lowerCamelCase_ : Dict = TFTransfoXLForSequenceClassification(A )
lowerCamelCase_ : Optional[int] = model(A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Tuple = self.prepare_config_and_inputs()
((lowerCamelCase_), (lowerCamelCase_), (lowerCamelCase_), (lowerCamelCase_)) : List[Any] = config_and_inputs
lowerCamelCase_ : Tuple = {'''input_ids''': input_ids_a}
return config, inputs_dict
@require_tf
class __lowercase ( _lowercase , _lowercase , unittest.TestCase ):
lowerCamelCase : Any = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
lowerCamelCase : Tuple = () if is_tf_available() else ()
lowerCamelCase : Any = (
{
"feature-extraction": TFTransfoXLModel,
"text-classification": TFTransfoXLForSequenceClassification,
"text-generation": TFTransfoXLLMHeadModel,
"zero-shot": TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
lowerCamelCase : str = False
lowerCamelCase : Optional[int] = False
lowerCamelCase : Dict = False
lowerCamelCase : Optional[int] = False
def UpperCAmelCase__ (self , A , A , A , A , A ):
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Optional[Any] = TFTransfoXLModelTester(self )
lowerCamelCase_ : Optional[Any] = ConfigTester(self , config_class=A , d_embed=3_7 )
def UpperCAmelCase__ (self ):
self.config_tester.run_common_tests()
def UpperCAmelCase__ (self ):
self.model_tester.set_seed()
lowerCamelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*A )
def UpperCAmelCase__ (self ):
self.model_tester.set_seed()
lowerCamelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*A )
def UpperCAmelCase__ (self ):
lowerCamelCase_, lowerCamelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ : List[Any] = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
lowerCamelCase_ : Optional[Any] = model_class(A )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class in list_other_models_with_output_ebd:
lowerCamelCase_ : str = model.get_output_embeddings()
assert isinstance(A , tf.keras.layers.Layer )
lowerCamelCase_ : int = model.get_bias()
assert name is None
else:
lowerCamelCase_ : Tuple = model.get_output_embeddings()
assert x is None
lowerCamelCase_ : int = model.get_bias()
assert name is None
def UpperCAmelCase__ (self ):
# TODO JP: Make TransfoXL XLA compliant
pass
@slow
def UpperCAmelCase__ (self ):
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ : Optional[Any] = TFTransfoXLModel.from_pretrained(A )
self.assertIsNotNone(A )
@unittest.skip(reason='''This model doesn\'t play well with fit() due to not returning a single loss.''' )
def UpperCAmelCase__ (self ):
pass
@require_tf
class __lowercase ( unittest.TestCase ):
@unittest.skip('''Skip test until #12651 is resolved.''' )
@slow
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Union[str, Any] = TFTransfoXLLMHeadModel.from_pretrained('''transfo-xl-wt103''' )
# fmt: off
lowerCamelCase_ : Optional[Any] = tf.convert_to_tensor([[3_3,1_2_9_7,2,1,1_0_0_9,4,1_1_0_9,1_1_7_3_9,4_7_6_2,3_5_8,5,2_5,2_4_5,2_2,1_7_0_6,1_7,2_0_0_9_8,5,3_2_1_5,2_1,3_7,1_1_1_0,3,1_3,1_0_4_1,4,2_4,6_0_3,4_9_0,2,7_1_4_7_7,2_0_0_9_8,1_0_4_4_4_7,2,2_0_9_6_1,1,2_6_0_4,4,1,3_2_9,3,6_2_2_4,8_3_1,1_6_0_0_2,2,8,6_0_3,7_8_9_6_7,2_9_5_4_6,2_3,8_0_3,2_0,2_5,4_1_6,5,8,2_3_2,4,2_7_7,6,1_8_5_5,4_6_0_1,3,2_9_5_4_6,5_4,8,3_6_0_9,5,5_7_2_1_1,4_9,4,1,2_7_7,1_8,8,1_7_5_5,1_5_6_9_1,3,3_4_1,2_5,4_1_6,6_9_3,4_2_5_7_3,7_1,1_7,4_0_1,9_4,3_1,1_7_9_1_9,2,2_9_5_4_6,7_8_7_3,1_8,1,4_3_5,2_3,1_1_0_1_1,7_5_5,5,5_1_6_7,3,7_9_8_3,9_8,8_4,2,2_9_5_4_6,3_2_6_7,8,3_6_0_9,4,1,4_8_6_5,1_0_7_5,2,6_0_8_7,7_1,6,3_4_6,8,5_8_5_4,3,2_9_5_4_6,8_2_4,1_4_0_0,1_8_6_8,2,1_9,1_6_0,2,3_1_1,8,5_4_9_6,2,2_0_9_2_0,1_7,2_5,1_5_0_9_7,3,2_4,2_4,0]] , dtype=tf.intaa ) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
lowerCamelCase_ : Union[str, Any] = [3_3,1_2_9_7,2,1,1_0_0_9,4,1_1_0_9,1_1_7_3_9,4_7_6_2,3_5_8,5,2_5,2_4_5,2_2,1_7_0_6,1_7,2_0_0_9_8,5,3_2_1_5,2_1,3_7,1_1_1_0,3,1_3,1_0_4_1,4,2_4,6_0_3,4_9_0,2,7_1_4_7_7,2_0_0_9_8,1_0_4_4_4_7,2,2_0_9_6_1,1,2_6_0_4,4,1,3_2_9,3,6_2_2_4,8_3_1,1_6_0_0_2,2,8,6_0_3,7_8_9_6_7,2_9_5_4_6,2_3,8_0_3,2_0,2_5,4_1_6,5,8,2_3_2,4,2_7_7,6,1_8_5_5,4_6_0_1,3,2_9_5_4_6,5_4,8,3_6_0_9,5,5_7_2_1_1,4_9,4,1,2_7_7,1_8,8,1_7_5_5,1_5_6_9_1,3,3_4_1,2_5,4_1_6,6_9_3,4_2_5_7_3,7_1,1_7,4_0_1,9_4,3_1,1_7_9_1_9,2,2_9_5_4_6,7_8_7_3,1_8,1,4_3_5,2_3,1_1_0_1_1,7_5_5,5,5_1_6_7,3,7_9_8_3,9_8,8_4,2,2_9_5_4_6,3_2_6_7,8,3_6_0_9,4,1,4_8_6_5,1_0_7_5,2,6_0_8_7,7_1,6,3_4_6,8,5_8_5_4,3,2_9_5_4_6,8_2_4,1_4_0_0,1_8_6_8,2,1_9,1_6_0,2,3_1_1,8,5_4_9_6,2,2_0_9_2_0,1_7,2_5,1_5_0_9_7,3,2_4,2_4,0,3_3,1,1_8_5_7,2,1,1_0_0_9,4,1_1_0_9,1_1_7_3_9,4_7_6_2,3_5_8,5,2_5,2_4_5,2_8,1_1_1_0,3,1_3,1_0_4_1,4,2_4,6_0_3,4_9_0,2,7_1_4_7_7,2_0_0_9_8,1_0_4_4_4_7,2,2_0_9_6_1,1,2_6_0_4,4,1,3_2_9,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
lowerCamelCase_ : Optional[Any] = model.generate(A , max_length=2_0_0 , do_sample=A )
self.assertListEqual(output_ids[0].numpy().tolist() , A )
| 357 | 1 |
"""simple docstring"""
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class __A ( ctypes.Structure ):
'''simple docstring'''
lowerCAmelCase : int = [("size", ctypes.c_int), ("visible", ctypes.c_byte)]
def __UpperCAmelCase ( ) -> List[str]:
if os.name == "nt":
lowercase__ : Any = CursorInfo()
lowercase__ : Optional[int] = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(__lowerCamelCase , ctypes.byref(__lowerCamelCase ) )
lowercase__ : int = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(__lowerCamelCase , ctypes.byref(__lowerCamelCase ) )
elif os.name == "posix":
sys.stdout.write('''\033[?25l''' )
sys.stdout.flush()
def __UpperCAmelCase ( ) -> List[Any]:
if os.name == "nt":
lowercase__ : Any = CursorInfo()
lowercase__ : Dict = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(__lowerCamelCase , ctypes.byref(__lowerCamelCase ) )
lowercase__ : List[Any] = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(__lowerCamelCase , ctypes.byref(__lowerCamelCase ) )
elif os.name == "posix":
sys.stdout.write('''\033[?25h''' )
sys.stdout.flush()
@contextmanager
def __UpperCAmelCase ( ) -> Tuple:
try:
hide_cursor()
yield
finally:
show_cursor()
| 560 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
lowerCAmelCase_ = logging.get_logger(__name__)
class __A ( A_ ):
'''simple docstring'''
def __init__( self : Dict ,*_snake_case : Optional[int] ,**_snake_case : List[Any] ) -> None:
"""simple docstring"""
warnings.warn(
'''The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use GLPNImageProcessor instead.''' ,_snake_case ,)
super().__init__(*_snake_case ,**_snake_case )
| 560 | 1 |
import math
import tensorflow as tf
from packaging import version
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Dict ) -> List[Any]:
'''simple docstring'''
A__ = tf.convert_to_tensor(lowerCAmelCase_ )
A__ = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) ))
return x * cdf
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[Any] ) -> Optional[Any]:
'''simple docstring'''
A__ = tf.convert_to_tensor(lowerCAmelCase_ )
A__ = tf.cast(math.pi , x.dtype )
A__ = tf.cast(0.044715 , x.dtype )
A__ = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(lowerCAmelCase_ , 3 )) ))
return x * cdf
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Tuple ) -> int:
'''simple docstring'''
A__ = tf.convert_to_tensor(lowerCAmelCase_ )
return x * tf.tanh(tf.math.softplus(lowerCAmelCase_ ) )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Optional[Any] ) -> int:
'''simple docstring'''
A__ = tf.convert_to_tensor(lowerCAmelCase_ )
A__ = tf.cast(0.044715 , x.dtype )
A__ = tf.cast(0.7978845608 , x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[str] ) -> str:
'''simple docstring'''
A__ = tf.convert_to_tensor(lowerCAmelCase_ )
A__ = tf.cast(1.702 , x.dtype )
return x * tf.math.sigmoid(coeff * x )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[str] ) -> Optional[int]:
'''simple docstring'''
return tf.clip_by_value(_gelu(lowerCAmelCase_ ) , -1_0 , 1_0 )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Tuple , SCREAMING_SNAKE_CASE_: Optional[Any]=-1 ) -> List[str]:
'''simple docstring'''
A__ = tf.split(lowerCAmelCase_ , 2 , axis=lowerCAmelCase_ )
return a * tf.math.sigmoid(lowerCAmelCase_ )
if version.parse(tf.version.VERSION) >= version.parse("""2.4"""):
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Any ) -> Tuple:
'''simple docstring'''
return tf.keras.activations.gelu(lowerCAmelCase_ , approximate=lowerCAmelCase_ )
lowerCAmelCase__ = tf.keras.activations.gelu
lowerCAmelCase__ = approximate_gelu_wrap
else:
lowerCAmelCase__ = _gelu
lowerCAmelCase__ = _gelu_new
lowerCAmelCase__ = {
'gelu': gelu,
'gelu_10': gelu_aa,
'gelu_fast': gelu_fast,
'gelu_new': gelu_new,
'glu': glu,
'mish': mish,
'quick_gelu': quick_gelu,
'relu': tf.keras.activations.relu,
'sigmoid': tf.keras.activations.sigmoid,
'silu': tf.keras.activations.swish,
'swish': tf.keras.activations.swish,
'tanh': tf.keras.activations.tanh,
}
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Optional[Any] ) -> List[str]:
'''simple docstring'''
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(F'function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}' )
| 703 |
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
lowerCAmelCase__ = logging.getLogger(__name__)
@dataclass
class a__ :
"""simple docstring"""
__lowerCamelCase = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
__lowerCamelCase = field(
default=snake_case , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
__lowerCamelCase = field(
default=snake_case , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
__lowerCamelCase = field(
default=snake_case , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
__lowerCamelCase = field(
default=snake_case , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , )
__lowerCamelCase = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
__lowerCamelCase = field(
default=snake_case , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
@dataclass
class a__ :
"""simple docstring"""
__lowerCamelCase = field(default=snake_case , metadata={'help': 'The input training data file (a text file).'} )
__lowerCamelCase = field(
default=snake_case , metadata={'help': 'An optional input evaluation data file to evaluate the perplexity on (a text file).'} , )
__lowerCamelCase = field(
default=snake_case , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
__lowerCamelCase = field(
default=snake_case , metadata={'help': 'The number of processes to use for the preprocessing.'} , )
__lowerCamelCase = field(
default=snake_case , metadata={
'help': (
'The maximum total input sequence length after tokenization. If passed, sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__lowerCamelCase = field(
default=snake_case , metadata={
'help': (
'Whether to pad all samples to the maximum sentence length. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch. More '
'efficient on GPU but very bad for TPU.'
)
} , )
__lowerCamelCase = field(
default=snake_case , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
__lowerCamelCase = field(
default=snake_case , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
def UpperCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
if self.train_file is not None:
A__ = self.train_file.split("." )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
A__ = self.validation_file.split("." )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class a__ :
"""simple docstring"""
__lowerCamelCase = 42
__lowerCamelCase = True
__lowerCamelCase = None
__lowerCamelCase = None
def __call__( self , lowercase ) -> Tuple:
'''simple docstring'''
A__ = "label" if "label" in features[0].keys() else "labels"
A__ = [feature.pop(lowercase ) for feature in features]
A__ = len(lowercase )
A__ = len(features[0]["input_ids"] )
A__ = [
[{k: v[i] for k, v in feature.items()} for i in range(lowercase )] for feature in features
]
A__ = list(chain(*lowercase ) )
A__ = self.tokenizer.pad(
lowercase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , )
# Un-flatten
A__ = {k: v.view(lowercase , lowercase , -1 ) for k, v in batch.items()}
# Add back labels
A__ = torch.tensor(lowercase , dtype=torch.intaa )
return batch
def lowerCAmelCase__ ( ) -> List[Any]:
'''simple docstring'''
A__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
A__ , A__ , A__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
A__ , A__ , A__ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_swag" , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
A__ = training_args.get_process_log_level()
logger.setLevel(SCREAMING_SNAKE_CASE_ )
datasets.utils.logging.set_verbosity(SCREAMING_SNAKE_CASE_ )
transformers.utils.logging.set_verbosity(SCREAMING_SNAKE_CASE_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
A__ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
A__ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
A__ = {}
if data_args.train_file is not None:
A__ = data_args.train_file
if data_args.validation_file is not None:
A__ = data_args.validation_file
A__ = data_args.train_file.split("." )[-1]
A__ = load_dataset(
SCREAMING_SNAKE_CASE_ , data_files=SCREAMING_SNAKE_CASE_ , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
A__ = load_dataset(
"swag" , "regular" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
A__ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
A__ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
A__ = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=SCREAMING_SNAKE_CASE_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
A__ = [F'ending{i}' for i in range(4 )]
A__ = "sent1"
A__ = "sent2"
if data_args.max_seq_length is None:
A__ = tokenizer.model_max_length
if max_seq_length > 1_0_2_4:
logger.warning(
"The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value"
" of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can"
" override this default with `--block_size xxx`." )
A__ = 1_0_2_4
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F'The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'
F'model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.' )
A__ = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(SCREAMING_SNAKE_CASE_: Optional[Any] ):
A__ = [[context] * 4 for context in examples[context_name]]
A__ = examples[question_header_name]
A__ = [
[F'{header} {examples[end][i]}' for end in ending_names] for i, header in enumerate(SCREAMING_SNAKE_CASE_ )
]
# Flatten out
A__ = list(chain(*SCREAMING_SNAKE_CASE_ ) )
A__ = list(chain(*SCREAMING_SNAKE_CASE_ ) )
# Tokenize
A__ = tokenizer(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , padding="max_length" if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("--do_train requires a train dataset" )
A__ = raw_datasets["train"]
if data_args.max_train_samples is not None:
A__ = min(len(SCREAMING_SNAKE_CASE_ ) , data_args.max_train_samples )
A__ = train_dataset.select(range(SCREAMING_SNAKE_CASE_ ) )
with training_args.main_process_first(desc="train dataset map pre-processing" ):
A__ = train_dataset.map(
SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError("--do_eval requires a validation dataset" )
A__ = raw_datasets["validation"]
if data_args.max_eval_samples is not None:
A__ = min(len(SCREAMING_SNAKE_CASE_ ) , data_args.max_eval_samples )
A__ = eval_dataset.select(range(SCREAMING_SNAKE_CASE_ ) )
with training_args.main_process_first(desc="validation dataset map pre-processing" ):
A__ = eval_dataset.map(
SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
A__ = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=SCREAMING_SNAKE_CASE_ , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(SCREAMING_SNAKE_CASE_: str ):
A__ , A__ = eval_predictions
A__ = np.argmax(SCREAMING_SNAKE_CASE_ , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
A__ = Trainer(
model=SCREAMING_SNAKE_CASE_ , args=SCREAMING_SNAKE_CASE_ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=SCREAMING_SNAKE_CASE_ , data_collator=SCREAMING_SNAKE_CASE_ , compute_metrics=SCREAMING_SNAKE_CASE_ , )
# Training
if training_args.do_train:
A__ = None
if training_args.resume_from_checkpoint is not None:
A__ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
A__ = last_checkpoint
A__ = trainer.train(resume_from_checkpoint=SCREAMING_SNAKE_CASE_ )
trainer.save_model() # Saves the tokenizer too for easy upload
A__ = train_result.metrics
A__ = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(SCREAMING_SNAKE_CASE_ )
)
A__ = min(SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) )
trainer.log_metrics("train" , SCREAMING_SNAKE_CASE_ )
trainer.save_metrics("train" , SCREAMING_SNAKE_CASE_ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
A__ = trainer.evaluate()
A__ = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(SCREAMING_SNAKE_CASE_ )
A__ = min(SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) )
trainer.log_metrics("eval" , SCREAMING_SNAKE_CASE_ )
trainer.save_metrics("eval" , SCREAMING_SNAKE_CASE_ )
A__ = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "multiple-choice",
"dataset_tags": "swag",
"dataset_args": "regular",
"dataset": "SWAG",
"language": "en",
}
if training_args.push_to_hub:
trainer.push_to_hub(**SCREAMING_SNAKE_CASE_ )
else:
trainer.create_model_card(**SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Optional[int] ) -> Dict:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 626 | 0 |
"""simple docstring"""
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _lowerCAmelCase ( a ):
"""simple docstring"""
__magic_name__ :str = ["""image_processor""", """tokenizer"""]
__magic_name__ :List[str] = """BridgeTowerImageProcessor"""
__magic_name__ :Dict = ("""RobertaTokenizer""", """RobertaTokenizerFast""")
def __init__( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
def __call__( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = True , __UpperCAmelCase = False , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = 0 , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = True , __UpperCAmelCase = None , **__UpperCAmelCase , ):
'''simple docstring'''
lowerCAmelCase__ :Any = self.tokenizer(
text=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase , stride=__UpperCAmelCase , pad_to_multiple_of=__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , return_overflowing_tokens=__UpperCAmelCase , return_special_tokens_mask=__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , return_length=__UpperCAmelCase , verbose=__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase , )
# add pixel_values + pixel_mask
lowerCAmelCase__ :str = self.image_processor(
__UpperCAmelCase , return_tensors=__UpperCAmelCase , do_normalize=__UpperCAmelCase , do_center_crop=__UpperCAmelCase , **__UpperCAmelCase )
encoding.update(__UpperCAmelCase )
return encoding
def snake_case ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase )
def snake_case ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase )
@property
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Any = self.tokenizer.model_input_names
lowerCAmelCase__ :Optional[int] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 93 |
"""simple docstring"""
import importlib
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Union
import torch
from ..utils import BaseOutput
a : int = '''scheduler_config.json'''
class a_ ( _UpperCAmelCase ):
a : List[Any] = 1
a : Tuple = 2
a : Dict = 3
a : str = 4
a : Optional[int] = 5
a : Any = 6
a : int = 7
a : Any = 8
a : List[Any] = 9
a : Any = 10
a : List[str] = 11
a : Optional[int] = 12
a : Any = 13
a : Dict = 14
@dataclass
class a_ ( _UpperCAmelCase ):
a : torch.FloatTensor
class a_ :
a : Dict = SCHEDULER_CONFIG_NAME
a : List[str] = []
a : Optional[int] = True
@classmethod
def _snake_case ( cls : List[str] , __UpperCamelCase : Dict[str, Any] = None , __UpperCamelCase : Optional[str] = None , __UpperCamelCase : List[str]=False , **__UpperCamelCase : int , ) ->Dict:
'''simple docstring'''
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase = cls.load_config(
pretrained_model_name_or_path=__UpperCamelCase , subfolder=__UpperCamelCase , return_unused_kwargs=__UpperCamelCase , return_commit_hash=__UpperCamelCase , **__UpperCamelCase , )
return cls.from_config(__UpperCamelCase , return_unused_kwargs=__UpperCamelCase , **__UpperCamelCase )
def _snake_case ( self : Tuple , __UpperCamelCase : Union[str, os.PathLike] , __UpperCamelCase : bool = False , **__UpperCamelCase : List[str] ) ->Any:
'''simple docstring'''
self.save_config(save_directory=__UpperCamelCase , push_to_hub=__UpperCamelCase , **__UpperCamelCase )
@property
def _snake_case ( self : Optional[int] ) ->List[Any]:
'''simple docstring'''
return self._get_compatibles()
@classmethod
def _snake_case ( cls : List[str] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase = list(set([cls.__name__] + cls._compatibles ) )
_UpperCAmelCase = importlib.import_module(__name__.split(""".""" )[0] )
_UpperCAmelCase = [
getattr(__UpperCamelCase , __UpperCamelCase ) for c in compatible_classes_str if hasattr(__UpperCamelCase , __UpperCamelCase )
]
return compatible_classes | 555 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , _lowercase , _lowercase=7 , _lowercase=3 , _lowercase=18 , _lowercase=30 , _lowercase=400 , _lowercase=True , _lowercase=None , _lowercase=True , _lowercase=False , _lowercase=True , _lowercase=True , _lowercase=[0.5, 0.5, 0.5] , _lowercase=[0.5, 0.5, 0.5] , ):
"""simple docstring"""
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = image_size
_lowerCAmelCase = min_resolution
_lowerCAmelCase = max_resolution
_lowerCAmelCase = do_resize
_lowerCAmelCase = size if size is not None else {"""height""": 18, """width""": 20}
_lowerCAmelCase = do_thumbnail
_lowerCAmelCase = do_align_axis
_lowerCAmelCase = do_pad
_lowerCAmelCase = do_normalize
_lowerCAmelCase = image_mean
_lowerCAmelCase = image_std
def _lowercase ( self ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
_lowercase : Union[str, Any] = DonutImageProcessor if is_vision_available() else None
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = DonutImageProcessingTester(self )
@property
def _lowercase ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowercase , """do_resize""" ) )
self.assertTrue(hasattr(_lowercase , """size""" ) )
self.assertTrue(hasattr(_lowercase , """do_thumbnail""" ) )
self.assertTrue(hasattr(_lowercase , """do_align_long_axis""" ) )
self.assertTrue(hasattr(_lowercase , """do_pad""" ) )
self.assertTrue(hasattr(_lowercase , """do_normalize""" ) )
self.assertTrue(hasattr(_lowercase , """image_mean""" ) )
self.assertTrue(hasattr(_lowercase , """image_std""" ) )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 20} )
_lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
# Previous config had dimensions in (width, height) order
_lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {"""height""": 84, """width""": 42} )
def _lowercase ( self ):
"""simple docstring"""
pass
@is_flaky()
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , Image.Image )
# Test not batched input
_lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
_lowerCAmelCase = image_processing(_lowercase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
@is_flaky()
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , numpify=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , np.ndarray )
# Test not batched input
_lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
_lowerCAmelCase = image_processing(_lowercase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
@is_flaky()
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , torchify=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , torch.Tensor )
# Test not batched input
_lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
_lowerCAmelCase = image_processing(_lowercase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
| 162 |
'''simple docstring'''
import os
def A (__lowerCamelCase :Dict ):
_lowerCAmelCase = len(grid[0] )
_lowerCAmelCase = len(__lowerCamelCase )
_lowerCAmelCase = 0
_lowerCAmelCase = 0
_lowerCAmelCase = 0
# Check vertically, horizontally, diagonally at the same time (only works
# for nxn grid)
for i in range(__lowerCamelCase ):
for j in range(n_rows - 3 ):
_lowerCAmelCase = grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i]
_lowerCAmelCase = grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3]
# Left-to-right diagonal (\) product
if i < n_columns - 3:
_lowerCAmelCase = (
grid[i][j]
* grid[i + 1][j + 1]
* grid[i + 2][j + 2]
* grid[i + 3][j + 3]
)
# Right-to-left diagonal(/) product
if i > 2:
_lowerCAmelCase = (
grid[i][j]
* grid[i - 1][j + 1]
* grid[i - 2][j + 2]
* grid[i - 3][j + 3]
)
_lowerCAmelCase = max(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if max_product > largest:
_lowerCAmelCase = max_product
return largest
def A ():
_lowerCAmelCase = []
with open(os.path.dirname(__lowerCamelCase ) + """/grid.txt""" ) as file:
for line in file:
grid.append(line.strip("""\n""" ).split(""" """ ) )
_lowerCAmelCase = [[int(__lowerCamelCase ) for i in grid[j]] for j in range(len(__lowerCamelCase ) )]
return largest_product(__lowerCamelCase )
if __name__ == "__main__":
print(solution())
| 162 | 1 |
'''simple docstring'''
def __lowerCamelCase ( A__ = 1_000 ) -> int:
"""simple docstring"""
UpperCamelCase = -1
UpperCamelCase = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
UpperCamelCase = (n * n - 2 * a * n) // (2 * n - 2 * a)
UpperCamelCase = n - a - b
if c * c == (a * a + b * b):
UpperCamelCase = a * b * c
if candidate >= product:
UpperCamelCase = candidate
return product
if __name__ == "__main__":
print(f'''{solution() = }''')
| 430 |
'''simple docstring'''
from __future__ import annotations
def __lowerCamelCase ( A__ , A__ , A__ , ) -> tuple[str, float]:
"""simple docstring"""
if (stress, tangential_force, area).count(0 ) != 1:
raise ValueError('You cannot supply more or less than 2 values' )
elif stress < 0:
raise ValueError('Stress cannot be negative' )
elif tangential_force < 0:
raise ValueError('Tangential Force cannot be negative' )
elif area < 0:
raise ValueError('Area cannot be negative' )
elif stress == 0:
return (
"stress",
tangential_force / area,
)
elif tangential_force == 0:
return (
"tangential_force",
stress * area,
)
else:
return (
"area",
tangential_force / stress,
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 430 | 1 |
'''simple docstring'''
import os
import pytest
from transformers.dynamic_module_utils import get_imports
lowercase__ : List[Any] = '''
import os
'''
lowercase__ : Union[str, Any] = '''
def foo():
import os
return False
'''
lowercase__ : Dict = '''
def foo():
def bar():
if True:
import os
return False
return bar()
'''
lowercase__ : Optional[Any] = '''
import os
try:
import bar
except ImportError:
raise ValueError()
'''
lowercase__ : Tuple = '''
import os
def foo():
try:
import bar
except ImportError:
raise ValueError()
'''
lowercase__ : str = '''
import os
try:
import bar
except (ImportError, AttributeError):
raise ValueError()
'''
lowercase__ : str = '''
import os
try:
import bar
except ImportError as e:
raise ValueError()
'''
lowercase__ : Optional[int] = '''
import os
try:
import bar
except:
raise ValueError()
'''
lowercase__ : Dict = '''
import os
try:
import bar
import baz
except ImportError:
raise ValueError()
'''
lowercase__ : Optional[int] = '''
import os
try:
import bar
import baz
except ImportError:
x = 1
raise ValueError()
'''
lowercase__ : List[Any] = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize('case' , __snake_case )
def _lowerCAmelCase ( __snake_case : Optional[Any] , __snake_case : Tuple ) -> List[str]:
__A : Optional[Any] = os.path.join(__snake_case , 'test_file.py' )
with open(__snake_case , 'w' ) as _tmp_file:
_tmp_file.write(__snake_case )
__A : Optional[Any] = get_imports(__snake_case )
assert parsed_imports == ["os"] | 338 |
'''simple docstring'''
from typing import List
from .keymap import KEYMAP, get_character
def _lowerCAmelCase ( __snake_case : str ) -> int:
def decorator(__snake_case : Union[str, Any] ):
__A : int = getattr(__snake_case , 'handle_key' , [] )
handle += [key]
setattr(__snake_case , 'handle_key' , __snake_case )
return func
return decorator
def _lowerCAmelCase ( *__snake_case : List[str] ) -> Optional[int]:
def decorator(__snake_case : List[str] ):
__A : Union[str, Any] = getattr(__snake_case , 'handle_key' , [] )
handle += keys
setattr(__snake_case , 'handle_key' , __snake_case )
return func
return decorator
class SCREAMING_SNAKE_CASE (a__ ):
def __new__( cls , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Optional[Any] = super().__new__(cls , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
if not hasattr(_UpperCAmelCase , 'key_handler'):
setattr(_UpperCAmelCase , 'key_handler' , {})
setattr(_UpperCAmelCase , 'handle_input' , KeyHandler.handle_input)
for value in attrs.values():
__A : Any = getattr(_UpperCAmelCase , 'handle_key' , [])
for key in handled_keys:
__A : List[Any] = value
return new_cls
@staticmethod
def SCREAMING_SNAKE_CASE ( cls):
'''simple docstring'''
__A : str = get_character()
if char != KEYMAP["undefined"]:
__A : Tuple = ord(_UpperCAmelCase)
__A : Optional[int] = cls.key_handler.get(_UpperCAmelCase)
if handler:
__A : int = char
return handler(cls)
else:
return None
def _lowerCAmelCase ( cls : int ) -> Dict:
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() ) | 338 | 1 |
'''simple docstring'''
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
__UpperCAmelCase =imread(R"digital_image_processing/image_data/lena_small.jpg")
__UpperCAmelCase =cvtColor(img, COLOR_BGR2GRAY)
def __lowerCAmelCase ( ) -> Optional[int]:
__lowerCamelCase = cn.convert_to_negative(UpperCamelCase__ )
# assert negative_img array for at least one True
assert negative_img.any()
def __lowerCAmelCase ( ) -> Any:
with Image.open('''digital_image_processing/image_data/lena_small.jpg''' ) as img:
# Work around assertion for response
assert str(cc.change_contrast(UpperCamelCase__ , 1_10 ) ).startswith(
'''<PIL.Image.Image image mode=RGB size=100x100 at''' )
def __lowerCAmelCase ( ) -> Optional[int]:
__lowerCamelCase = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def __lowerCAmelCase ( ) -> int:
__lowerCamelCase = imread('''digital_image_processing/image_data/lena_small.jpg''' , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
__lowerCamelCase = canny.canny(UpperCamelCase__ )
# assert canny array for at least one True
assert canny_array.any()
def __lowerCAmelCase ( ) -> Union[str, Any]:
assert gg.gaussian_filter(UpperCamelCase__ , 5 , sigma=0.9 ).all()
def __lowerCAmelCase ( ) -> Dict:
# laplace diagonals
__lowerCamelCase = array([[0.2_5, 0.5, 0.2_5], [0.5, -3, 0.5], [0.2_5, 0.5, 0.2_5]] )
__lowerCamelCase = conv.img_convolve(UpperCamelCase__ , UpperCamelCase__ ).astype(UpperCamelCase__ )
assert res.any()
def __lowerCAmelCase ( ) -> Optional[Any]:
assert med.median_filter(UpperCamelCase__ , 3 ).any()
def __lowerCAmelCase ( ) -> Optional[int]:
__lowerCamelCase , __lowerCamelCase = sob.sobel_filter(UpperCamelCase__ )
assert grad.any() and theta.any()
def __lowerCAmelCase ( ) -> Any:
__lowerCamelCase = sp.make_sepia(UpperCamelCase__ , 20 )
assert sepia.all()
def __lowerCAmelCase ( UpperCamelCase__ = "digital_image_processing/image_data/lena_small.jpg" ) -> Tuple:
__lowerCamelCase = bs.Burkes(imread(UpperCamelCase__ , 1 ) , 1_20 )
burkes.process()
assert burkes.output_img.any()
def __lowerCAmelCase ( UpperCamelCase__ = "digital_image_processing/image_data/lena_small.jpg" , ) -> List[Any]:
__lowerCamelCase = rs.NearestNeighbour(imread(UpperCamelCase__ , 1 ) , 4_00 , 2_00 )
nn.process()
assert nn.output.any()
def __lowerCAmelCase ( ) -> int:
__lowerCamelCase = '''digital_image_processing/image_data/lena.jpg'''
# Reading the image and converting it to grayscale.
__lowerCamelCase = imread(UpperCamelCase__ , 0 )
# Test for get_neighbors_pixel function() return not None
__lowerCamelCase = 0
__lowerCamelCase = 0
__lowerCamelCase = image[x_coordinate][y_coordinate]
__lowerCamelCase = lbp.get_neighbors_pixel(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
__lowerCamelCase = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
__lowerCamelCase = lbp.local_binary_value(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
assert lbp_image.any()
| 546 | '''simple docstring'''
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ) -> float:
if digit_amount > 0:
return round(number - int(UpperCamelCase__ ) , UpperCamelCase__ )
return number - int(UpperCamelCase__ )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.345, 1))
print(decimal_isolate(35.345, 2))
print(decimal_isolate(35.345, 3))
print(decimal_isolate(-14.789, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.123, 1))
print(decimal_isolate(-14.123, 2))
print(decimal_isolate(-14.123, 3))
| 546 | 1 |
'''simple docstring'''
import os
import unittest
from huggingface_hub.utils import are_progress_bars_disabled
import transformers.models.bart.tokenization_bart
from transformers import logging
from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context
from transformers.utils.logging import disable_progress_bar, enable_progress_bar
class __lowerCAmelCase ( unittest.TestCase ):
def snake_case_ (self ):
_UpperCAmelCase : List[Any] = logging.get_logger()
# the current default level is logging.WARNING
_UpperCAmelCase : List[Any] = logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
# restore to the original level
logging.set_verbosity(lowerCAmelCase__ )
def snake_case_ (self ):
_UpperCAmelCase : Union[str, Any] = logging.get_verbosity()
_UpperCAmelCase : Any = logging.get_logger("""transformers.models.bart.tokenization_bart""" )
_UpperCAmelCase : Union[str, Any] = """Testing 1, 2, 3"""
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(lowerCAmelCase__ ) as cl:
logger.warning(lowerCAmelCase__ )
self.assertEqual(cl.out , msg + """\n""" )
# this is setting the level for all of `transformers.*` loggers
logging.set_verbosity_error()
# should not be able to log warnings
with CaptureLogger(lowerCAmelCase__ ) as cl:
logger.warning(lowerCAmelCase__ )
self.assertEqual(cl.out , """""" )
# should be able to log warnings again
logging.set_verbosity_warning()
with CaptureLogger(lowerCAmelCase__ ) as cl:
logger.warning(lowerCAmelCase__ )
self.assertEqual(cl.out , msg + """\n""" )
# restore to the original level
logging.set_verbosity(lowerCAmelCase__ )
@mockenv(TRANSFORMERS_VERBOSITY="""error""" )
def snake_case_ (self ):
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
# this action activates the env var
_UpperCAmelCase : Any = logging.get_logger("""transformers.models.bart.tokenization_bart""" )
_UpperCAmelCase : List[str] = os.getenv("""TRANSFORMERS_VERBOSITY""" , lowerCAmelCase__ )
_UpperCAmelCase : Union[str, Any] = logging.log_levels[env_level_str]
_UpperCAmelCase : Optional[int] = logging.get_verbosity()
self.assertEqual(
lowerCAmelCase__ , lowerCAmelCase__ , F"TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}" , )
# restore to the original level
_UpperCAmelCase : Tuple = """"""
transformers.utils.logging._reset_library_root_logger()
@mockenv(TRANSFORMERS_VERBOSITY="""super-error""" )
def snake_case_ (self ):
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
_UpperCAmelCase : Union[str, Any] = logging.logging.getLogger()
with CaptureLogger(lowerCAmelCase__ ) as cl:
# this action activates the env var
logging.get_logger("""transformers.models.bart.tokenization_bart""" )
self.assertIn("""Unknown option TRANSFORMERS_VERBOSITY=super-error""" , cl.out )
# no need to restore as nothing was changed
def snake_case_ (self ):
# testing `logger.warning_advice()`
transformers.utils.logging._reset_library_root_logger()
_UpperCAmelCase : str = logging.get_logger("""transformers.models.bart.tokenization_bart""" )
_UpperCAmelCase : List[str] = """Testing 1, 2, 3"""
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="""1""" ):
# nothing should be logged as env var disables this method
with CaptureLogger(lowerCAmelCase__ ) as cl:
logger.warning_advice(lowerCAmelCase__ )
self.assertEqual(cl.out , """""" )
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="""""" ):
# should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset
with CaptureLogger(lowerCAmelCase__ ) as cl:
logger.warning_advice(lowerCAmelCase__ )
self.assertEqual(cl.out , msg + """\n""" )
def __A ( ):
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert not are_progress_bars_disabled()
| 156 |
'''simple docstring'''
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class __lowerCAmelCase ( __a ):
@slow
@require_torch
def snake_case_ (self ):
_UpperCAmelCase : Dict = EncoderDecoderModel.from_encoder_decoder_pretrained("""prajjwal1/bert-tiny""" , """prajjwal1/bert-tiny""" )
_UpperCAmelCase : Tuple = BertTokenizer.from_pretrained("""bert-base-uncased""" )
_UpperCAmelCase : List[str] = bertabert.config.encoder.vocab_size
_UpperCAmelCase : List[str] = tokenizer.sep_token_id
_UpperCAmelCase : List[Any] = tokenizer.cls_token_id
_UpperCAmelCase : List[str] = 1_2_8
_UpperCAmelCase : int = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""train[:1%]""" )
_UpperCAmelCase : Tuple = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""validation[:1%]""" )
_UpperCAmelCase : str = train_dataset.select(range(3_2 ) )
_UpperCAmelCase : str = val_dataset.select(range(1_6 ) )
_UpperCAmelCase : Any = 4
def _map_to_encoder_decoder_inputs(lowerCAmelCase__ ):
# Tokenizer will automatically set [BOS] <text> [EOS]
_UpperCAmelCase : List[str] = tokenizer(batch["""article"""] , padding="""max_length""" , truncation=lowerCAmelCase__ , max_length=5_1_2 )
_UpperCAmelCase : Union[str, Any] = tokenizer(batch["""highlights"""] , padding="""max_length""" , truncation=lowerCAmelCase__ , max_length=1_2_8 )
_UpperCAmelCase : List[Any] = inputs.input_ids
_UpperCAmelCase : int = inputs.attention_mask
_UpperCAmelCase : Union[str, Any] = outputs.input_ids
_UpperCAmelCase : List[Any] = outputs.input_ids.copy()
_UpperCAmelCase : str = [
[-1_0_0 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["""labels"""]
]
_UpperCAmelCase : Tuple = outputs.attention_mask
assert all(len(lowerCAmelCase__ ) == 5_1_2 for x in inputs.input_ids )
assert all(len(lowerCAmelCase__ ) == 1_2_8 for x in outputs.input_ids )
return batch
def _compute_metrics(lowerCAmelCase__ ):
_UpperCAmelCase : Optional[int] = pred.label_ids
_UpperCAmelCase : Optional[int] = pred.predictions
# all unnecessary tokens are removed
_UpperCAmelCase : Optional[Any] = tokenizer.batch_decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
_UpperCAmelCase : Optional[int] = tokenizer.batch_decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
_UpperCAmelCase : str = sum([int(pred_str[i] == label_str[i] ) for i in range(len(lowerCAmelCase__ ) )] ) / len(lowerCAmelCase__ )
return {"accuracy": accuracy}
# map train dataset
_UpperCAmelCase : int = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=lowerCAmelCase__ , batch_size=lowerCAmelCase__ , remove_columns=["""article""", """highlights"""] , )
train_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
# same for validation dataset
_UpperCAmelCase : Tuple = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=lowerCAmelCase__ , batch_size=lowerCAmelCase__ , remove_columns=["""article""", """highlights"""] , )
val_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
_UpperCAmelCase : Any = self.get_auto_remove_tmp_dir()
_UpperCAmelCase : int = SeqaSeqTrainingArguments(
output_dir=lowerCAmelCase__ , per_device_train_batch_size=lowerCAmelCase__ , per_device_eval_batch_size=lowerCAmelCase__ , predict_with_generate=lowerCAmelCase__ , evaluation_strategy="""steps""" , do_train=lowerCAmelCase__ , do_eval=lowerCAmelCase__ , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
_UpperCAmelCase : Any = SeqaSeqTrainer(
model=lowerCAmelCase__ , args=lowerCAmelCase__ , compute_metrics=_compute_metrics , train_dataset=lowerCAmelCase__ , eval_dataset=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , )
# start training
trainer.train()
| 156 | 1 |
'''simple docstring'''
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def lowercase ( __magic_name__ , __magic_name__="shi-labs/oneformer_demo" ):
'''simple docstring'''
with open(hf_hub_download(__magic_name__ , __magic_name__ , repo_type="dataset" ) , "r" ) as f:
UpperCAmelCase : str = json.load(__magic_name__ )
UpperCAmelCase : Optional[Any] = {}
UpperCAmelCase : List[str] = []
UpperCAmelCase : List[Any] = []
for key, info in class_info.items():
UpperCAmelCase : int = info["name"]
class_names.append(info["name"] )
if info["isthing"]:
thing_ids.append(int(__magic_name__ ) )
UpperCAmelCase : List[Any] = thing_ids
UpperCAmelCase : Tuple = class_names
return metadata
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , snake_case , snake_case=7 , snake_case=3 , snake_case=3_0 , snake_case=4_0_0 , snake_case=None , snake_case=True , snake_case=True , snake_case=[0.5, 0.5, 0.5] , snake_case=[0.5, 0.5, 0.5] , snake_case=1_0 , snake_case=False , snake_case=2_5_5 , snake_case="shi-labs/oneformer_demo" , snake_case="ade20k_panoptic.json" , snake_case=1_0 , ):
'''simple docstring'''
UpperCAmelCase : Tuple = parent
UpperCAmelCase : str = batch_size
UpperCAmelCase : Union[str, Any] = num_channels
UpperCAmelCase : Union[str, Any] = min_resolution
UpperCAmelCase : Union[str, Any] = max_resolution
UpperCAmelCase : List[str] = do_resize
UpperCAmelCase : Any = {"shortest_edge": 3_2, "longest_edge": 1_3_3_3} if size is None else size
UpperCAmelCase : Dict = do_normalize
UpperCAmelCase : List[Any] = image_mean
UpperCAmelCase : str = image_std
UpperCAmelCase : str = class_info_file
UpperCAmelCase : Dict = prepare_metadata(snake_case , snake_case )
UpperCAmelCase : Optional[Any] = num_text
UpperCAmelCase : Union[str, Any] = repo_path
# for the post_process_functions
UpperCAmelCase : Union[str, Any] = 2
UpperCAmelCase : int = 1_0
UpperCAmelCase : int = 1_0
UpperCAmelCase : int = 3
UpperCAmelCase : Dict = 4
UpperCAmelCase : Any = num_labels
UpperCAmelCase : Union[str, Any] = do_reduce_labels
UpperCAmelCase : Tuple = ignore_index
def A_ ( self ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def A_ ( self , snake_case , snake_case=False ):
'''simple docstring'''
if not batched:
UpperCAmelCase : Union[str, Any] = image_inputs[0]
if isinstance(snake_case , Image.Image ):
UpperCAmelCase , UpperCAmelCase : Dict = image.size
else:
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = image.shape[1], image.shape[2]
if w < h:
UpperCAmelCase : Dict = int(self.size["shortest_edge"] * h / w )
UpperCAmelCase : Dict = self.size["shortest_edge"]
elif w > h:
UpperCAmelCase : str = self.size["shortest_edge"]
UpperCAmelCase : str = int(self.size["shortest_edge"] * w / h )
else:
UpperCAmelCase : List[Any] = self.size["shortest_edge"]
UpperCAmelCase : Union[str, Any] = self.size["shortest_edge"]
else:
UpperCAmelCase : Dict = []
for image in image_inputs:
UpperCAmelCase , UpperCAmelCase : List[str] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
UpperCAmelCase : str = max(snake_case , key=lambda snake_case : item[0] )[0]
UpperCAmelCase : int = max(snake_case , key=lambda snake_case : item[1] )[1]
return expected_height, expected_width
def A_ ( self ):
'''simple docstring'''
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , )
@require_torch
@require_vision
class UpperCamelCase__ ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
SCREAMING_SNAKE_CASE__ : int = image_processing_class
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : List[Any] = OneFormerImageProcessorTester(self )
@property
def A_ ( self ):
'''simple docstring'''
return self.image_processing_tester.prepare_image_processor_dict()
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case , "image_mean" ) )
self.assertTrue(hasattr(snake_case , "image_std" ) )
self.assertTrue(hasattr(snake_case , "do_normalize" ) )
self.assertTrue(hasattr(snake_case , "do_resize" ) )
self.assertTrue(hasattr(snake_case , "size" ) )
self.assertTrue(hasattr(snake_case , "ignore_index" ) )
self.assertTrue(hasattr(snake_case , "class_info_file" ) )
self.assertTrue(hasattr(snake_case , "num_text" ) )
self.assertTrue(hasattr(snake_case , "repo_path" ) )
self.assertTrue(hasattr(snake_case , "metadata" ) )
self.assertTrue(hasattr(snake_case , "do_reduce_labels" ) )
def A_ ( self ):
'''simple docstring'''
pass
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase : Tuple = prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case )
for image in image_inputs:
self.assertIsInstance(snake_case , Image.Image )
# Test not batched input
UpperCAmelCase : Optional[int] = image_processor(image_inputs[0] , ["semantic"] , return_tensors="pt" ).pixel_values
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = self.image_processing_tester.get_expected_values(snake_case )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase , UpperCAmelCase : List[Any] = self.image_processing_tester.get_expected_values(snake_case , batched=snake_case )
UpperCAmelCase : Optional[Any] = image_processor(
snake_case , ["semantic"] * len(snake_case ) , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase : Tuple = prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case , numpify=snake_case )
for image in image_inputs:
self.assertIsInstance(snake_case , np.ndarray )
# Test not batched input
UpperCAmelCase : List[Any] = image_processor(image_inputs[0] , ["semantic"] , return_tensors="pt" ).pixel_values
UpperCAmelCase , UpperCAmelCase : str = self.image_processing_tester.get_expected_values(snake_case )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase , UpperCAmelCase : str = self.image_processing_tester.get_expected_values(snake_case , batched=snake_case )
UpperCAmelCase : Optional[Any] = image_processor(
snake_case , ["semantic"] * len(snake_case ) , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase : int = prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case , torchify=snake_case )
for image in image_inputs:
self.assertIsInstance(snake_case , torch.Tensor )
# Test not batched input
UpperCAmelCase : Optional[int] = image_processor(image_inputs[0] , ["semantic"] , return_tensors="pt" ).pixel_values
UpperCAmelCase , UpperCAmelCase : int = self.image_processing_tester.get_expected_values(snake_case )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase , UpperCAmelCase : Optional[Any] = self.image_processing_tester.get_expected_values(snake_case , batched=snake_case )
UpperCAmelCase : Tuple = image_processor(
snake_case , ["semantic"] * len(snake_case ) , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def A_ ( self , snake_case=False , snake_case=False , snake_case="np" ):
'''simple docstring'''
UpperCAmelCase : Dict = self.image_processing_class(**self.image_processor_dict )
# prepare image and target
UpperCAmelCase : Dict = self.image_processing_tester.num_labels
UpperCAmelCase : str = None
UpperCAmelCase : str = None
UpperCAmelCase : Optional[Any] = prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case )
if with_segmentation_maps:
UpperCAmelCase : int = num_labels
if is_instance_map:
UpperCAmelCase : Union[str, Any] = list(range(snake_case ) ) * 2
UpperCAmelCase : Optional[int] = dict(enumerate(snake_case ) )
UpperCAmelCase : Tuple = [
np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs
]
if segmentation_type == "pil":
UpperCAmelCase : List[str] = [Image.fromarray(snake_case ) for annotation in annotations]
UpperCAmelCase : Optional[Any] = image_processor(
snake_case , ["semantic"] * len(snake_case ) , snake_case , return_tensors="pt" , instance_id_to_semantic_id=snake_case , pad_and_return_pixel_mask=snake_case , )
return inputs
def A_ ( self ):
'''simple docstring'''
pass
def A_ ( self ):
'''simple docstring'''
def common(snake_case=False , snake_case=None ):
UpperCAmelCase : List[str] = self.comm_get_image_processor_inputs(
with_segmentation_maps=snake_case , is_instance_map=snake_case , segmentation_type=snake_case )
UpperCAmelCase : Optional[Any] = inputs["mask_labels"]
UpperCAmelCase : Optional[Any] = inputs["class_labels"]
UpperCAmelCase : str = inputs["pixel_values"]
UpperCAmelCase : Optional[Any] = inputs["text_inputs"]
# check the batch_size
for mask_label, class_label, text_input in zip(snake_case , snake_case , snake_case ):
self.assertEqual(mask_label.shape[0] , class_label.shape[0] )
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] )
self.assertEqual(len(snake_case ) , self.image_processing_tester.num_text )
common()
common(is_instance_map=snake_case )
common(is_instance_map=snake_case , segmentation_type="pil" )
common(is_instance_map=snake_case , segmentation_type="pil" )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : int = np.zeros((2_0, 5_0) )
UpperCAmelCase : int = 1
UpperCAmelCase : List[Any] = 1
UpperCAmelCase : List[Any] = 1
UpperCAmelCase : str = binary_mask_to_rle(snake_case )
self.assertEqual(len(snake_case ) , 4 )
self.assertEqual(rle[0] , 2_1 )
self.assertEqual(rle[1] , 4_5 )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=7_7 , task_seq_length=7_7 , class_info_file="ade20k_panoptic.json" , num_text=self.image_processing_tester.num_text , repo_path="shi-labs/oneformer_demo" , )
UpperCAmelCase : Optional[Any] = self.image_processing_tester.get_fake_oneformer_outputs()
UpperCAmelCase : List[str] = fature_extractor.post_process_semantic_segmentation(snake_case )
self.assertEqual(len(snake_case ) , self.image_processing_tester.batch_size )
self.assertEqual(
segmentation[0].shape , (
self.image_processing_tester.height,
self.image_processing_tester.width,
) , )
UpperCAmelCase : List[Any] = [(1, 4) for i in range(self.image_processing_tester.batch_size )]
UpperCAmelCase : Optional[int] = fature_extractor.post_process_semantic_segmentation(snake_case , target_sizes=snake_case )
self.assertEqual(segmentation[0].shape , target_sizes[0] )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : int = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=7_7 , task_seq_length=7_7 , class_info_file="ade20k_panoptic.json" , num_text=self.image_processing_tester.num_text , repo_path="shi-labs/oneformer_demo" , )
UpperCAmelCase : Optional[Any] = self.image_processing_tester.get_fake_oneformer_outputs()
UpperCAmelCase : List[Any] = image_processor.post_process_instance_segmentation(snake_case , threshold=0 )
self.assertTrue(len(snake_case ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue("segmentation" in el )
self.assertTrue("segments_info" in el )
self.assertEqual(type(el["segments_info"] ) , snake_case )
self.assertEqual(
el["segmentation"].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : List[str] = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=7_7 , task_seq_length=7_7 , class_info_file="ade20k_panoptic.json" , num_text=self.image_processing_tester.num_text , repo_path="shi-labs/oneformer_demo" , )
UpperCAmelCase : int = self.image_processing_tester.get_fake_oneformer_outputs()
UpperCAmelCase : Optional[Any] = image_processor.post_process_panoptic_segmentation(snake_case , threshold=0 )
self.assertTrue(len(snake_case ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue("segmentation" in el )
self.assertTrue("segments_info" in el )
self.assertEqual(type(el["segments_info"] ) , snake_case )
self.assertEqual(
el["segmentation"].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
| 679 |
'''simple docstring'''
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : Tuple = logging.get_logger(__name__)
a : str = {
"snap-research/efficientformer-l1-300": (
"https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json"
),
}
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = "efficientformer"
def __init__( self , snake_case = [3, 2, 6, 4] , snake_case = [4_8, 9_6, 2_2_4, 4_4_8] , snake_case = [True, True, True, True] , snake_case = 4_4_8 , snake_case = 3_2 , snake_case = 4 , snake_case = 7 , snake_case = 5 , snake_case = 8 , snake_case = 4 , snake_case = 0.0 , snake_case = 1_6 , snake_case = 3 , snake_case = 3 , snake_case = 3 , snake_case = 2 , snake_case = 1 , snake_case = 0.0 , snake_case = 1 , snake_case = True , snake_case = True , snake_case = 1e-5 , snake_case = "gelu" , snake_case = 0.02 , snake_case = 1e-12 , snake_case = 2_2_4 , snake_case = 1e-05 , **snake_case , ):
'''simple docstring'''
super().__init__(**snake_case )
UpperCAmelCase : Any = hidden_act
UpperCAmelCase : Optional[Any] = hidden_dropout_prob
UpperCAmelCase : List[Any] = hidden_sizes
UpperCAmelCase : str = num_hidden_layers
UpperCAmelCase : int = num_attention_heads
UpperCAmelCase : List[Any] = initializer_range
UpperCAmelCase : str = layer_norm_eps
UpperCAmelCase : int = patch_size
UpperCAmelCase : Optional[int] = num_channels
UpperCAmelCase : Any = depths
UpperCAmelCase : Dict = mlp_expansion_ratio
UpperCAmelCase : List[str] = downsamples
UpperCAmelCase : List[Any] = dim
UpperCAmelCase : Any = key_dim
UpperCAmelCase : List[str] = attention_ratio
UpperCAmelCase : Union[str, Any] = resolution
UpperCAmelCase : List[str] = pool_size
UpperCAmelCase : Dict = downsample_patch_size
UpperCAmelCase : Optional[int] = downsample_stride
UpperCAmelCase : Any = downsample_pad
UpperCAmelCase : int = drop_path_rate
UpperCAmelCase : Optional[Any] = num_metaad_blocks
UpperCAmelCase : List[str] = distillation
UpperCAmelCase : int = use_layer_scale
UpperCAmelCase : List[str] = layer_scale_init_value
UpperCAmelCase : Union[str, Any] = image_size
UpperCAmelCase : Any = batch_norm_eps
| 679 | 1 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class snake_case_ (lowercase__ ):
"""simple docstring"""
_lowerCamelCase = 42
_lowerCamelCase = 42
class snake_case_ (lowercase__ , lowercase__ ):
"""simple docstring"""
_lowerCamelCase = 1
@register_to_config
def __init__( self ,lowercase = 2000 ,lowercase = 0.15 ,lowercase = 0.01 ,lowercase = 1348.0 ,lowercase = 1E-5 ,lowercase = 1 ,):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = sigma_max
# setable values
UpperCAmelCase_ : Optional[int] = None
self.set_sigmas(lowercase ,lowercase ,lowercase ,lowercase)
def A_ ( self ,lowercase ,lowercase = None):
"""simple docstring"""
return sample
def A_ ( self ,lowercase ,lowercase = None ,lowercase = None):
"""simple docstring"""
UpperCAmelCase_ : int = sampling_eps if sampling_eps is not None else self.config.sampling_eps
UpperCAmelCase_ : List[Any] = torch.linspace(1 ,lowercase ,lowercase ,device=lowercase)
def A_ ( self ,lowercase ,lowercase = None ,lowercase = None ,lowercase = None):
"""simple docstring"""
UpperCAmelCase_ : Any = sigma_min if sigma_min is not None else self.config.sigma_min
UpperCAmelCase_ : int = sigma_max if sigma_max is not None else self.config.sigma_max
UpperCAmelCase_ : Union[str, Any] = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(lowercase ,lowercase)
UpperCAmelCase_ : Union[str, Any] = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
UpperCAmelCase_ : Optional[int] = torch.exp(torch.linspace(math.log(lowercase) ,math.log(lowercase) ,lowercase))
UpperCAmelCase_ : Any = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps])
def A_ ( self ,lowercase ,lowercase):
"""simple docstring"""
return torch.where(
timesteps == 0 ,torch.zeros_like(t.to(timesteps.device)) ,self.discrete_sigmas[timesteps - 1].to(timesteps.device) ,)
def A_ ( self ,lowercase ,lowercase ,lowercase ,lowercase = None ,lowercase = True ,):
"""simple docstring"""
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler")
UpperCAmelCase_ : Optional[int] = timestep * torch.ones(
sample.shape[0] ,device=sample.device) # torch.repeat_interleave(timestep, sample.shape[0])
UpperCAmelCase_ : Tuple = (timestep * (len(self.timesteps) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
UpperCAmelCase_ : Optional[int] = timesteps.to(self.discrete_sigmas.device)
UpperCAmelCase_ : Optional[Any] = self.discrete_sigmas[timesteps].to(sample.device)
UpperCAmelCase_ : Optional[Any] = self.get_adjacent_sigma(lowercase ,lowercase).to(sample.device)
UpperCAmelCase_ : Any = torch.zeros_like(lowercase)
UpperCAmelCase_ : Dict = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
UpperCAmelCase_ : Dict = diffusion.flatten()
while len(diffusion.shape) < len(sample.shape):
UpperCAmelCase_ : List[str] = diffusion.unsqueeze(-1)
UpperCAmelCase_ : List[Any] = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
UpperCAmelCase_ : Union[str, Any] = randn_tensor(
sample.shape ,layout=sample.layout ,generator=lowercase ,device=sample.device ,dtype=sample.dtype)
UpperCAmelCase_ : Any = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
UpperCAmelCase_ : Tuple = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=lowercase ,prev_sample_mean=lowercase)
def A_ ( self ,lowercase ,lowercase ,lowercase = None ,lowercase = True ,):
"""simple docstring"""
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler")
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
UpperCAmelCase_ : int = randn_tensor(sample.shape ,layout=sample.layout ,generator=lowercase).to(sample.device)
# compute step size from the model_output, the noise, and the snr
UpperCAmelCase_ : Union[str, Any] = torch.norm(model_output.reshape(model_output.shape[0] ,-1) ,dim=-1).mean()
UpperCAmelCase_ : Optional[Any] = torch.norm(noise.reshape(noise.shape[0] ,-1) ,dim=-1).mean()
UpperCAmelCase_ : List[Any] = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
UpperCAmelCase_ : Optional[Any] = step_size * torch.ones(sample.shape[0]).to(sample.device)
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
UpperCAmelCase_ : Any = step_size.flatten()
while len(step_size.shape) < len(sample.shape):
UpperCAmelCase_ : Tuple = step_size.unsqueeze(-1)
UpperCAmelCase_ : Dict = sample + step_size * model_output
UpperCAmelCase_ : int = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=lowercase)
def A_ ( self ,lowercase ,lowercase ,lowercase ,):
"""simple docstring"""
UpperCAmelCase_ : Any = timesteps.to(original_samples.device)
UpperCAmelCase_ : List[str] = self.discrete_sigmas.to(original_samples.device)[timesteps]
UpperCAmelCase_ : Tuple = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(lowercase) * sigmas[:, None, None, None]
)
UpperCAmelCase_ : Tuple = noise + original_samples
return noisy_samples
def __len__( self):
"""simple docstring"""
return self.config.num_train_timesteps
| 455 |
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class snake_case_ (lowercase__ ):
"""simple docstring"""
_lowerCamelCase = """ClapFeatureExtractor"""
_lowerCamelCase = ("""RobertaTokenizer""", """RobertaTokenizerFast""")
def __init__( self ,lowercase ,lowercase):
"""simple docstring"""
super().__init__(lowercase ,lowercase)
def __call__( self ,lowercase=None ,lowercase=None ,lowercase=None ,**lowercase):
"""simple docstring"""
UpperCAmelCase_ : Dict = kwargs.pop("sampling_rate" ,lowercase)
if text is None and audios is None:
raise ValueError("You have to specify either text or audios. Both cannot be none.")
if text is not None:
UpperCAmelCase_ : List[str] = self.tokenizer(lowercase ,return_tensors=lowercase ,**lowercase)
if audios is not None:
UpperCAmelCase_ : str = self.feature_extractor(
lowercase ,sampling_rate=lowercase ,return_tensors=lowercase ,**lowercase)
if text is not None and audios is not None:
UpperCAmelCase_ : Optional[int] = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowercase) ,tensor_type=lowercase)
def A_ ( self ,*lowercase ,**lowercase):
"""simple docstring"""
return self.tokenizer.batch_decode(*lowercase ,**lowercase)
def A_ ( self ,*lowercase ,**lowercase):
"""simple docstring"""
return self.tokenizer.decode(*lowercase ,**lowercase)
@property
def A_ ( self):
"""simple docstring"""
UpperCAmelCase_ : str = self.tokenizer.model_input_names
UpperCAmelCase_ : str = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names))
| 455 | 1 |
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class __UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def _UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ ):
UpperCAmelCase__: Dict = jnp.ones((batch_size, length) ) / length
return scores
def _UpperCAmelCase ( self ):
UpperCAmelCase__: Dict = None
UpperCAmelCase__: int = 2_0
UpperCAmelCase__: Optional[Any] = self._get_uniform_logits(batch_size=2 , length=lowerCamelCase__ )
# tweak scores to not be uniform anymore
UpperCAmelCase__: Any = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
UpperCAmelCase__: Optional[Any] = scores.at[1, 1_0].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
UpperCAmelCase__: Optional[Any] = jax.nn.softmax(lowerCamelCase__ , axis=-1 )
UpperCAmelCase__: Union[str, Any] = FlaxTemperatureLogitsWarper(temperature=0.5 )
UpperCAmelCase__: Any = FlaxTemperatureLogitsWarper(temperature=1.3 )
UpperCAmelCase__: Dict = jax.nn.softmax(temp_dist_warper_sharper(lowerCamelCase__ , scores.copy() , cur_len=lowerCamelCase__ ) , axis=-1 )
UpperCAmelCase__: Optional[int] = jax.nn.softmax(temp_dist_warper_smoother(lowerCamelCase__ , scores.copy() , cur_len=lowerCamelCase__ ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1e-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1e-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def _UpperCAmelCase ( self ):
UpperCAmelCase__: Optional[int] = None
UpperCAmelCase__: Union[str, Any] = 1_0
UpperCAmelCase__: Optional[int] = 2
# create ramp distribution
UpperCAmelCase__: Union[str, Any] = np.broadcast_to(np.arange(lowerCamelCase__ )[None, :] , (batch_size, vocab_size) ).copy()
UpperCAmelCase__: int = ramp_logits[1:, : vocab_size // 2] + vocab_size
UpperCAmelCase__: str = FlaxTopKLogitsWarper(3 )
UpperCAmelCase__: Union[str, Any] = top_k_warp(lowerCamelCase__ , lowerCamelCase__ , cur_len=lowerCamelCase__ )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
UpperCAmelCase__: Union[str, Any] = 5
UpperCAmelCase__: str = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
UpperCAmelCase__: Optional[Any] = np.broadcast_to(np.arange(lowerCamelCase__ )[None, :] , (batch_size, length) ).copy()
UpperCAmelCase__: Tuple = top_k_warp_safety_check(lowerCamelCase__ , lowerCamelCase__ , cur_len=lowerCamelCase__ )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def _UpperCAmelCase ( self ):
UpperCAmelCase__: Any = None
UpperCAmelCase__: List[str] = 1_0
UpperCAmelCase__: str = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
UpperCAmelCase__: Dict = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) )
UpperCAmelCase__: Tuple = FlaxTopPLogitsWarper(0.8 )
UpperCAmelCase__: Tuple = np.exp(top_p_warp(lowerCamelCase__ , lowerCamelCase__ , cur_len=lowerCamelCase__ ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
UpperCAmelCase__: Optional[int] = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] )
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 ) )
# check edge cases with negative and extreme logits
UpperCAmelCase__: Union[str, Any] = np.broadcast_to(np.arange(lowerCamelCase__ )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
UpperCAmelCase__: Dict = ramp_logits[1] * 100.0
# make sure at least 2 tokens are kept
UpperCAmelCase__: Dict = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
UpperCAmelCase__: Optional[int] = top_p_warp(lowerCamelCase__ , lowerCamelCase__ , cur_len=lowerCamelCase__ )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def _UpperCAmelCase ( self ):
UpperCAmelCase__: Optional[int] = 2_0
UpperCAmelCase__: List[str] = 4
UpperCAmelCase__: Tuple = 0
UpperCAmelCase__: Union[str, Any] = FlaxMinLengthLogitsProcessor(min_length=1_0 , eos_token_id=lowerCamelCase__ )
# check that min length is applied at length 5
UpperCAmelCase__: Optional[Any] = ids_tensor((batch_size, 2_0) , vocab_size=2_0 )
UpperCAmelCase__: Union[str, Any] = 5
UpperCAmelCase__: Any = self._get_uniform_logits(lowerCamelCase__ , lowerCamelCase__ )
UpperCAmelCase__: Optional[Any] = min_dist_processor(lowerCamelCase__ , lowerCamelCase__ , cur_len=lowerCamelCase__ )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float("inf" )] )
# check that min length is not applied anymore at length 15
UpperCAmelCase__: str = self._get_uniform_logits(lowerCamelCase__ , lowerCamelCase__ )
UpperCAmelCase__: Union[str, Any] = 1_5
UpperCAmelCase__: Any = min_dist_processor(lowerCamelCase__ , lowerCamelCase__ , cur_len=lowerCamelCase__ )
self.assertFalse(jnp.isinf(lowerCamelCase__ ).any() )
def _UpperCAmelCase ( self ):
UpperCAmelCase__: Union[str, Any] = 2_0
UpperCAmelCase__: List[Any] = 4
UpperCAmelCase__: Optional[int] = 0
UpperCAmelCase__: Tuple = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=lowerCamelCase__ )
# check that all scores are -inf except the bos_token_id score
UpperCAmelCase__: Union[str, Any] = ids_tensor((batch_size, 1) , vocab_size=2_0 )
UpperCAmelCase__: Any = 1
UpperCAmelCase__: Any = self._get_uniform_logits(lowerCamelCase__ , lowerCamelCase__ )
UpperCAmelCase__: str = logits_processor(lowerCamelCase__ , lowerCamelCase__ , cur_len=lowerCamelCase__ )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
UpperCAmelCase__: Tuple = 3
UpperCAmelCase__: Optional[int] = self._get_uniform_logits(lowerCamelCase__ , lowerCamelCase__ )
UpperCAmelCase__: int = logits_processor(lowerCamelCase__ , lowerCamelCase__ , cur_len=lowerCamelCase__ )
self.assertFalse(jnp.isinf(lowerCamelCase__ ).any() )
def _UpperCAmelCase ( self ):
UpperCAmelCase__: Union[str, Any] = 2_0
UpperCAmelCase__: Any = 4
UpperCAmelCase__: str = 0
UpperCAmelCase__: Optional[int] = 5
UpperCAmelCase__: Any = FlaxForcedEOSTokenLogitsProcessor(max_length=lowerCamelCase__ , eos_token_id=lowerCamelCase__ )
# check that all scores are -inf except the eos_token_id when max_length is reached
UpperCAmelCase__: Optional[Any] = ids_tensor((batch_size, 4) , vocab_size=2_0 )
UpperCAmelCase__: Any = 4
UpperCAmelCase__: List[Any] = self._get_uniform_logits(lowerCamelCase__ , lowerCamelCase__ )
UpperCAmelCase__: int = logits_processor(lowerCamelCase__ , lowerCamelCase__ , cur_len=lowerCamelCase__ )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
UpperCAmelCase__: Dict = 3
UpperCAmelCase__: str = self._get_uniform_logits(lowerCamelCase__ , lowerCamelCase__ )
UpperCAmelCase__: Union[str, Any] = logits_processor(lowerCamelCase__ , lowerCamelCase__ , cur_len=lowerCamelCase__ )
self.assertFalse(jnp.isinf(lowerCamelCase__ ).any() )
def _UpperCAmelCase ( self ):
UpperCAmelCase__: int = 4
UpperCAmelCase__: Tuple = 1_0
UpperCAmelCase__: List[Any] = 1_5
UpperCAmelCase__: str = 2
UpperCAmelCase__: Optional[Any] = 1
UpperCAmelCase__: List[Any] = 1_5
# dummy input_ids and scores
UpperCAmelCase__: Any = ids_tensor((batch_size, sequence_length) , lowerCamelCase__ )
UpperCAmelCase__: List[str] = input_ids.copy()
UpperCAmelCase__: Optional[int] = self._get_uniform_logits(lowerCamelCase__ , lowerCamelCase__ )
UpperCAmelCase__: List[str] = scores.copy()
# instantiate all dist processors
UpperCAmelCase__: Optional[Any] = FlaxTemperatureLogitsWarper(temperature=0.5 )
UpperCAmelCase__: Union[str, Any] = FlaxTopKLogitsWarper(3 )
UpperCAmelCase__: str = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
UpperCAmelCase__: int = FlaxMinLengthLogitsProcessor(min_length=1_0 , eos_token_id=lowerCamelCase__ )
UpperCAmelCase__: List[Any] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=lowerCamelCase__ )
UpperCAmelCase__: Optional[Any] = FlaxForcedEOSTokenLogitsProcessor(max_length=lowerCamelCase__ , eos_token_id=lowerCamelCase__ )
UpperCAmelCase__: Any = 1_0
# no processor list
UpperCAmelCase__: Union[str, Any] = temp_dist_warp(lowerCamelCase__ , lowerCamelCase__ , cur_len=lowerCamelCase__ )
UpperCAmelCase__: List[str] = top_k_warp(lowerCamelCase__ , lowerCamelCase__ , cur_len=lowerCamelCase__ )
UpperCAmelCase__: List[Any] = top_p_warp(lowerCamelCase__ , lowerCamelCase__ , cur_len=lowerCamelCase__ )
UpperCAmelCase__: int = min_dist_proc(lowerCamelCase__ , lowerCamelCase__ , cur_len=lowerCamelCase__ )
UpperCAmelCase__: Optional[Any] = bos_dist_proc(lowerCamelCase__ , lowerCamelCase__ , cur_len=lowerCamelCase__ )
UpperCAmelCase__: Any = eos_dist_proc(lowerCamelCase__ , lowerCamelCase__ , cur_len=lowerCamelCase__ )
# with processor list
UpperCAmelCase__: List[Any] = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
UpperCAmelCase__: Any = processor(lowerCamelCase__ , lowerCamelCase__ , cur_len=lowerCamelCase__ )
# scores should be equal
self.assertTrue(jnp.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def _UpperCAmelCase ( self ):
UpperCAmelCase__: int = 4
UpperCAmelCase__: List[Any] = 1_0
UpperCAmelCase__: List[str] = 1_5
UpperCAmelCase__: List[Any] = 2
UpperCAmelCase__: Optional[int] = 1
UpperCAmelCase__: Tuple = 1_5
# dummy input_ids and scores
UpperCAmelCase__: List[str] = ids_tensor((batch_size, sequence_length) , lowerCamelCase__ )
UpperCAmelCase__: Union[str, Any] = input_ids.copy()
UpperCAmelCase__: Any = self._get_uniform_logits(lowerCamelCase__ , lowerCamelCase__ )
UpperCAmelCase__: Tuple = scores.copy()
# instantiate all dist processors
UpperCAmelCase__: Any = FlaxTemperatureLogitsWarper(temperature=0.5 )
UpperCAmelCase__: List[str] = FlaxTopKLogitsWarper(3 )
UpperCAmelCase__: Dict = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
UpperCAmelCase__: Union[str, Any] = FlaxMinLengthLogitsProcessor(min_length=1_0 , eos_token_id=lowerCamelCase__ )
UpperCAmelCase__: List[str] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=lowerCamelCase__ )
UpperCAmelCase__: Any = FlaxForcedEOSTokenLogitsProcessor(max_length=lowerCamelCase__ , eos_token_id=lowerCamelCase__ )
UpperCAmelCase__: Optional[int] = 1_0
# no processor list
def run_no_processor_list(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
UpperCAmelCase__: str = temp_dist_warp(lowerCamelCase__ , lowerCamelCase__ , cur_len=lowerCamelCase__ )
UpperCAmelCase__: Tuple = top_k_warp(lowerCamelCase__ , lowerCamelCase__ , cur_len=lowerCamelCase__ )
UpperCAmelCase__: Union[str, Any] = top_p_warp(lowerCamelCase__ , lowerCamelCase__ , cur_len=lowerCamelCase__ )
UpperCAmelCase__: Dict = min_dist_proc(lowerCamelCase__ , lowerCamelCase__ , cur_len=lowerCamelCase__ )
UpperCAmelCase__: Dict = bos_dist_proc(lowerCamelCase__ , lowerCamelCase__ , cur_len=lowerCamelCase__ )
UpperCAmelCase__: Optional[Any] = eos_dist_proc(lowerCamelCase__ , lowerCamelCase__ , cur_len=lowerCamelCase__ )
return scores
# with processor list
def run_processor_list(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
UpperCAmelCase__: Optional[int] = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
UpperCAmelCase__: List[Any] = processor(lowerCamelCase__ , lowerCamelCase__ , cur_len=lowerCamelCase__ )
return scores
UpperCAmelCase__: Union[str, Any] = jax.jit(lowerCamelCase__ )
UpperCAmelCase__: int = jax.jit(lowerCamelCase__ )
UpperCAmelCase__: List[Any] = jitted_run_no_processor_list(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
UpperCAmelCase__: List[str] = jitted_run_processor_list(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# scores should be equal
self.assertTrue(jnp.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() ) | 113 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase : Union[str, Any] ={
"""configuration_clap""": [
"""CLAP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ClapAudioConfig""",
"""ClapConfig""",
"""ClapTextConfig""",
],
"""processing_clap""": ["""ClapProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Union[str, Any] =[
"""CLAP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ClapModel""",
"""ClapPreTrainedModel""",
"""ClapTextModel""",
"""ClapTextModelWithProjection""",
"""ClapAudioModel""",
"""ClapAudioModelWithProjection""",
]
_lowerCAmelCase : List[str] =["""ClapFeatureExtractor"""]
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
_lowerCAmelCase : Optional[int] =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 113 | 1 |
from __future__ import annotations
from typing import Any
class UpperCAmelCase__ :
'''simple docstring'''
def __init__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = 0 ):
a =row, column
a =[[default_value for c in range(_lowerCAmelCase )] for r in range(_lowerCAmelCase )]
def __str__( self ):
a =F'''Matrix consist of {self.row} rows and {self.column} columns\n'''
# Make string identifier
a =0
for row_vector in self.array:
for obj in row_vector:
a =max(_lowerCAmelCase , len(str(_lowerCAmelCase ) ) )
a =F'''%{max_element_length}s'''
# Make string and return
def single_line(_lowerCAmelCase ) -> str:
nonlocal string_format_identifier
a ='['
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(_lowerCAmelCase ) for row_vector in self.array )
return s
def __repr__( self ):
return str(self )
def lowerCAmelCase__ ( self , _lowerCAmelCase ):
if not (isinstance(_lowerCAmelCase , (list, tuple) ) and len(_lowerCAmelCase ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self , _lowerCAmelCase ):
assert self.validate_indicies(_lowerCAmelCase )
return self.array[loc[0]][loc[1]]
def __setitem__( self , _lowerCAmelCase , _lowerCAmelCase ):
assert self.validate_indicies(_lowerCAmelCase )
a =value
def __add__( self , _lowerCAmelCase ):
assert isinstance(_lowerCAmelCase , _lowerCAmelCase )
assert self.row == another.row and self.column == another.column
# Add
a =Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
a =self[r, c] + another[r, c]
return result
def __neg__( self ):
a =Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
a =-self[r, c]
return result
def __sub__( self , _lowerCAmelCase ):
return self + (-another)
def __mul__( self , _lowerCAmelCase ):
if isinstance(_lowerCAmelCase , (int, float) ): # Scalar multiplication
a =Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
a =self[r, c] * another
return result
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ): # Matrix multiplication
assert self.column == another.row
a =Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
a =F'''Unsupported type given for another ({type(_lowerCAmelCase )})'''
raise TypeError(_lowerCAmelCase )
def lowerCAmelCase__ ( self ):
a =Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
a =self[r, c]
return result
def lowerCAmelCase__ ( self , _lowerCAmelCase , _lowerCAmelCase ):
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and isinstance(_lowerCAmelCase , _lowerCAmelCase )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
a =v.transpose()
a =(v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def lowerCamelCase ( )-> None:
"""simple docstring"""
a =Matrix(3 , 3 , 0 )
for i in range(3 ):
a =1
print(F'''a^(-1) is {ainv}''' )
# u, v
a =Matrix(3 , 1 , 0 )
a =1, 2, -3
a =Matrix(3 , 1 , 0 )
a =4, -2, 5
print(F'''u is {u}''' )
print(F'''v is {v}''' )
print(F'''uv^T is {u * v.transpose()}''' )
# Sherman Morrison
print(F'''(a + uv^T)^(-1) is {ainv.sherman_morrison(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )}''' )
def lowerCamelCase ( )-> None:
"""simple docstring"""
import doctest
doctest.testmod()
testa()
| 703 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCamelCase = {
'''configuration_trajectory_transformer''': [
'''TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TrajectoryTransformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
'''TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TrajectoryTransformerModel''',
'''TrajectoryTransformerPreTrainedModel''',
'''load_tf_weights_in_trajectory_transformer''',
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
_lowerCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 321 | 0 |
'''simple docstring'''
import unittest
from knapsack import knapsack as k
class UpperCAmelCase__ ( unittest.TestCase):
def __lowerCamelCase ( self ) -> Optional[Any]:
__UpperCamelCase = 0
__UpperCamelCase = [0]
__UpperCamelCase = [0]
__UpperCamelCase = len(_SCREAMING_SNAKE_CASE )
self.assertEqual(k.knapsack(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , 0 )
__UpperCamelCase = [6_0]
__UpperCamelCase = [1_0]
__UpperCamelCase = len(_SCREAMING_SNAKE_CASE )
self.assertEqual(k.knapsack(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , 0 )
def __lowerCamelCase ( self ) -> int:
__UpperCamelCase = 3
__UpperCamelCase = [1, 2, 3]
__UpperCamelCase = [3, 2, 1]
__UpperCamelCase = len(_SCREAMING_SNAKE_CASE )
self.assertEqual(k.knapsack(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , 5 )
def __lowerCamelCase ( self ) -> List[Any]:
__UpperCamelCase = 5_0
__UpperCamelCase = [6_0, 1_0_0, 1_2_0]
__UpperCamelCase = [1_0, 2_0, 3_0]
__UpperCamelCase = len(_SCREAMING_SNAKE_CASE )
self.assertEqual(k.knapsack(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , 2_2_0 )
if __name__ == "__main__":
unittest.main()
| 601 |
import re
import tempfile
from pathlib import Path
import pytest
import yaml
from datasets.utils.readme import ReadMe
# @pytest.fixture
# def example_yaml_structure():
a = yaml.safe_load(
"\\nname: \"\"\nallow_empty: false\nallow_empty_text: true\nsubsections:\n - name: \"Dataset Card for X\" # First-level markdown heading\n allow_empty: false\n allow_empty_text: true\n subsections:\n - name: \"Table of Contents\"\n allow_empty: false\n allow_empty_text: false\n subsections: null\n - name: \"Dataset Description\"\n allow_empty: false\n allow_empty_text: false\n subsections:\n - name: \"Dataset Summary\"\n allow_empty: false\n allow_empty_text: false\n subsections: null\n - name: \"Supported Tasks and Leaderboards\"\n allow_empty: true\n allow_empty_text: true\n subsections: null\n - name: Languages\n allow_empty: false\n allow_empty_text: true\n subsections: null\n"
)
a = {
"name": "root",
"text": "",
"is_empty_text": True,
"subsections": [
{
"name": "Dataset Card for My Dataset",
"text": "",
"is_empty_text": True,
"subsections": [
{"name": "Table of Contents", "text": "Some text here.", "is_empty_text": False, "subsections": []},
{
"name": "Dataset Description",
"text": "Some text here.",
"is_empty_text": False,
"subsections": [
{
"name": "Dataset Summary",
"text": "Some text here.",
"is_empty_text": False,
"subsections": [],
},
{
"name": "Supported Tasks and Leaderboards",
"text": "",
"is_empty_text": True,
"subsections": [],
},
{"name": "Languages", "text": "Language Text", "is_empty_text": False, "subsections": []},
],
},
],
}
],
}
a = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
a = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n#### Extra Ignored Subsection\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
a = {
"name": "root",
"text": "",
"is_empty_text": True,
"subsections": [
{
"name": "Dataset Card for My Dataset",
"text": "",
"is_empty_text": True,
"subsections": [
{"name": "Table of Contents", "text": "Some text here.", "is_empty_text": False, "subsections": []},
{
"name": "Dataset Description",
"text": "Some text here.",
"is_empty_text": False,
"subsections": [
{
"name": "Dataset Summary",
"text": "Some text here.",
"is_empty_text": False,
"subsections": [
{
"name": "Extra Ignored Subsection",
"text": "",
"is_empty_text": True,
"subsections": [],
}
],
},
{
"name": "Supported Tasks and Leaderboards",
"text": "",
"is_empty_text": True,
"subsections": [],
},
{"name": "Languages", "text": "Language Text", "is_empty_text": False, "subsections": []},
],
},
],
}
],
}
a = "\\n---\n---\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
a = (
"The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README."
)
a = "\\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
a = (
"The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README."
)
a = "\\n---\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
a = "The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README."
a = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
a = "The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored)."
a = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n"
a = "The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found 'None'."
a = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Languages\nLanguage Text\n"
a = "The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`."
a = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\n"
a = "The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty."
a = "\\n---\nlanguage:\n- zh\n- en\n---\n\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
a = "The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README."
a = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n# Dataset Card My Dataset\n"
a = "The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README."
a = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
a = "The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README."
a = ""
a = "The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README."
a = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
a = "The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections."
@pytest.mark.parametrize(
"""readme_md, expected_dict""" , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case ) -> Dict:
assert ReadMe.from_string(snake_case , snake_case ).to_dict() == expected_dict
@pytest.mark.parametrize(
"""readme_md, expected_error""" , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case ) -> Union[str, Any]:
with pytest.raises(snake_case , match=re.escape(expected_error.format(path="""root""" ) ) ):
_UpperCAmelCase = ReadMe.from_string(snake_case , snake_case )
readme.validate()
@pytest.mark.parametrize(
"""readme_md, expected_error""" , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case ) -> Tuple:
with pytest.raises(snake_case , match=re.escape(expected_error.format(path="""root""" ) ) ):
ReadMe.from_string(snake_case , snake_case )
@pytest.mark.parametrize(
"""readme_md,""" , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def _SCREAMING_SNAKE_CASE ( snake_case ) -> Union[str, Any]:
ReadMe.from_string(snake_case , snake_case , suppress_parsing_errors=snake_case )
@pytest.mark.parametrize(
"""readme_md, expected_dict""" , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case ) -> Union[str, Any]:
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCAmelCase = Path(snake_case ) / """README.md"""
with open(snake_case , """w+""" ) as readme_file:
readme_file.write(snake_case )
_UpperCAmelCase = ReadMe.from_readme(snake_case , snake_case ).to_dict()
assert out["name"] == path
assert out["text"] == ""
assert out["is_empty_text"]
assert out["subsections"] == expected_dict["subsections"]
@pytest.mark.parametrize(
"""readme_md, expected_error""" , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case ) -> List[Any]:
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCAmelCase = Path(snake_case ) / """README.md"""
with open(snake_case , """w+""" ) as readme_file:
readme_file.write(snake_case )
_UpperCAmelCase = expected_error.format(path=snake_case )
with pytest.raises(snake_case , match=re.escape(snake_case ) ):
_UpperCAmelCase = ReadMe.from_readme(snake_case , snake_case )
readme.validate()
@pytest.mark.parametrize(
"""readme_md, expected_error""" , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case ) -> str:
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCAmelCase = Path(snake_case ) / """README.md"""
with open(snake_case , """w+""" ) as readme_file:
readme_file.write(snake_case )
_UpperCAmelCase = expected_error.format(path=snake_case )
with pytest.raises(snake_case , match=re.escape(snake_case ) ):
ReadMe.from_readme(snake_case , snake_case )
@pytest.mark.parametrize(
"""readme_md,""" , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def _SCREAMING_SNAKE_CASE ( snake_case ) -> Tuple:
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCAmelCase = Path(snake_case ) / """README.md"""
with open(snake_case , """w+""" ) as readme_file:
readme_file.write(snake_case )
ReadMe.from_readme(snake_case , snake_case , suppress_parsing_errors=snake_case ) | 518 | 0 |
'''simple docstring'''
from itertools import count
def __lowercase (_lowercase = 50 ) -> Any:
"""simple docstring"""
__lowerCamelCase : Optional[Any] = [1] * min_block_length
for n in count(_snake_case ):
fill_count_functions.append(1 )
for block_length in range(_snake_case, n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 1_000_000:
break
return n
if __name__ == "__main__":
print(f'''{solution() = }''')
| 708 |
'''simple docstring'''
def __lowercase (_lowercase ) -> list:
"""simple docstring"""
# bit count represents no. of bits in the gray code
if bit_count < 0:
raise ValueError("""The given input must be positive""" )
# get the generated string sequence
__lowerCamelCase : Optional[Any] = gray_code_sequence_string(_lowercase )
#
# convert them to integers
for i in range(len(_lowercase ) ):
__lowerCamelCase : List[str] = int(sequence[i], 2 )
return sequence
def __lowercase (_lowercase ) -> list:
"""simple docstring"""
# The approach is a recursive one
# Base case achieved when either n = 0 or n=1
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
__lowerCamelCase : str = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
__lowerCamelCase : Union[str, Any] = gray_code_sequence_string(bit_count - 1 )
__lowerCamelCase : Optional[int] = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
__lowerCamelCase : Union[str, Any] = """0""" + smaller_sequence[i]
sequence.append(_lowercase )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
__lowerCamelCase : Optional[Any] = """1""" + smaller_sequence[i]
sequence.append(_lowercase )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 483 | 0 |
'''simple docstring'''
from pathlib import Path
import numpy as np
from PIL import Image
def __a ( lowerCAmelCase__ : np.ndarray ):
a__ , a__ , a__ : Dict = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
return 0.2989 * r + 0.5870 * g + 0.1140 * b
def __a ( lowerCAmelCase__ : np.ndarray ):
return (gray > 127) & (gray <= 255)
def __a ( lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : np.ndarray ):
a__ : Any = np.zeros_like(lowerCAmelCase__ )
a__ : Optional[Any] = np.zeros(
(image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) )
# Copy image to padded image
a__ : int = image
# Iterate over image & apply kernel
for x in range(image.shape[1] ):
for y in range(image.shape[0] ):
a__ : Optional[int] = (
kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]]
).sum()
a__ : str = int(summation > 0 )
return output
if __name__ == "__main__":
# read original image
__SCREAMING_SNAKE_CASE = Path(__file__).resolve().parent / 'image_data' / 'lena.jpg'
__SCREAMING_SNAKE_CASE = np.array(Image.open(lena_path))
# kernel to be applied
__SCREAMING_SNAKE_CASE = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
__SCREAMING_SNAKE_CASE = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element)
# Save the output image
__SCREAMING_SNAKE_CASE = Image.fromarray(output).convert('RGB')
pil_img.save('result_dilation.png')
| 688 |
'''simple docstring'''
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def __a ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : str ):
# Initialise PyTorch model.
# If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of
# TapasConfig to False.
# initialize configuration from json file
a__ : Dict = TapasConfig.from_json_file(lowerCAmelCase__ )
# set absolute/relative position embeddings parameter
a__ : List[Any] = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
a__ : Optional[Any] = TapasForQuestionAnswering(config=lowerCAmelCase__ )
elif task == "WTQ":
# run_task_main.py hparams
a__ : List[str] = 4
a__ : Optional[int] = True
# hparam_utils.py hparams
a__ : List[Any] = 0.664694
a__ : List[Any] = 0.207951
a__ : Union[str, Any] = 0.121194
a__ : Optional[Any] = True
a__ : Optional[int] = True
a__ : List[str] = False
a__ : Union[str, Any] = 0.0352513
a__ : Any = TapasForQuestionAnswering(config=lowerCAmelCase__ )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
a__ : Tuple = 4
a__ : Dict = False
# hparam_utils.py hparams
a__ : str = 36.4519
a__ : str = 0.903421
a__ : Optional[Any] = 222.088
a__ : Dict = True
a__ : Dict = True
a__ : Dict = True
a__ : str = 0.763141
a__ : List[Any] = TapasForQuestionAnswering(config=lowerCAmelCase__ )
elif task == "TABFACT":
a__ : List[str] = TapasForSequenceClassification(config=lowerCAmelCase__ )
elif task == "MLM":
a__ : Tuple = TapasForMaskedLM(config=lowerCAmelCase__ )
elif task == "INTERMEDIATE_PRETRAINING":
a__ : List[str] = TapasModel(config=lowerCAmelCase__ )
else:
raise ValueError(F'Task {task} not supported.' )
print(F'Building PyTorch model from configuration: {config}' )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Save pytorch-model (weights and configuration)
print(F'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(lowerCAmelCase__ )
# Save tokenizer files
print(F'Save tokenizer files to {pytorch_dump_path}' )
a__ : Optional[Any] = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + '''vocab.txt''' , model_max_length=512 )
tokenizer.save_pretrained(lowerCAmelCase__ )
print('''Used relative position embeddings:''' , model.config.reset_position_index_per_cell )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--task', default='SQA', type=str, help='Model task for which to convert a checkpoint. Defaults to SQA.'
)
parser.add_argument(
'--reset_position_index_per_cell',
default=False,
action='store_true',
help='Whether to use relative position embeddings or not. Defaults to True.',
)
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--tapas_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained TAPAS model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__SCREAMING_SNAKE_CASE = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 688 | 1 |
'''simple docstring'''
# limitations under the License.
from typing import Optional, Tuple, Union
import torch
from diffusers import DiffusionPipeline, ImagePipelineOutput
class UpperCAmelCase__ ( _snake_case ):
"""simple docstring"""
def __init__(self , _a , _a ) -> int:
super().__init__()
self.register_modules(unet=_a , scheduler=_a )
@torch.no_grad()
def __call__(self , _a = 1 , _a = None , _a = 50 , _a = "pil" , _a = True , **_a , ) -> Union[ImagePipelineOutput, Tuple]:
lowercase_ : List[str] = torch.randn(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=_a , )
lowercase_ : Union[str, Any] = image.to(self.device )
# set step values
self.scheduler.set_timesteps(_a )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
lowercase_ : Any = self.unet(_a , _a ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
lowercase_ : Optional[Any] = self.scheduler.step(_a , _a , _a ).prev_sample
lowercase_ : List[Any] = (image / 2 + 0.5).clamp(0 , 1 )
lowercase_ : List[str] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowercase_ : Any = self.numpy_to_pil(_a )
if not return_dict:
return (image,), "This is a local test"
return ImagePipelineOutput(images=_a ), "This is a local test"
| 438 | '''simple docstring'''
def _UpperCamelCase ( SCREAMING_SNAKE_CASE_ ):
lowercase_ : List[Any] = len(SCREAMING_SNAKE_CASE_ )
for _ in range(SCREAMING_SNAKE_CASE_ ):
for i in range(_ % 2 , arr_size - 1 , 2 ):
if arr[i + 1] < arr[i]:
lowercase_ ,lowercase_ : Optional[Any] = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
_A = list(range(1_0, 0, -1))
print(F"""Original: {arr}. Sorted: {odd_even_transposition(arr)}""")
| 438 | 1 |
class _SCREAMING_SNAKE_CASE :
def __init__( self , lowerCamelCase ):
# we need a list not a string, so do something to change the type
snake_case__ = arr.split("," )
def A_ ( self ):
snake_case__ = [int(self.array[0] )] * len(self.array )
snake_case__ = [int(self.array[0] )] * len(self.array )
for i in range(1 , len(self.array ) ):
snake_case__ = max(
int(self.array[i] ) + sum_value[i - 1] , int(self.array[i] ) )
snake_case__ = max(sum_value[i] , rear[i - 1] )
return rear[len(self.array ) - 1]
if __name__ == "__main__":
__magic_name__ = input('''please input some numbers:''')
__magic_name__ = SubArray(whole_array)
__magic_name__ = array.solve_sub_array()
print(('''the results is:''', re))
| 276 |
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase ):
# vision encoder
if "img_encoder.pos_embed" in name:
snake_case__ = name.replace("img_encoder.pos_embed" , "vision_model.embeddings.position_embeddings" )
if "img_encoder.patch_embed.proj" in name:
snake_case__ = name.replace("img_encoder.patch_embed.proj" , "vision_model.embeddings.patch_embeddings.projection" )
if "img_encoder.patch_embed.norm" in name:
snake_case__ = name.replace("img_encoder.patch_embed.norm" , "vision_model.embeddings.layernorm" )
if "img_encoder.layers" in name:
snake_case__ = name.replace("img_encoder.layers" , "vision_model.encoder.stages" )
if "blocks" in name and "res" not in name:
snake_case__ = name.replace("blocks" , "layers" )
if "attn" in name and "pre_assign" not in name:
snake_case__ = name.replace("attn" , "self_attn" )
if "proj" in name and "self_attn" in name and "text" not in name:
snake_case__ = name.replace("proj" , "out_proj" )
if "pre_assign_attn.attn.proj" in name:
snake_case__ = name.replace("pre_assign_attn.attn.proj" , "pre_assign_attn.attn.out_proj" )
if "norm1" in name:
snake_case__ = name.replace("norm1" , "layer_norm1" )
if "norm2" in name and "pre_assign" not in name:
snake_case__ = name.replace("norm2" , "layer_norm2" )
if "img_encoder.norm" in name:
snake_case__ = name.replace("img_encoder.norm" , "vision_model.layernorm" )
# text encoder
if "text_encoder.token_embedding" in name:
snake_case__ = name.replace("text_encoder.token_embedding" , "text_model.embeddings.token_embedding" )
if "text_encoder.positional_embedding" in name:
snake_case__ = name.replace("text_encoder.positional_embedding" , "text_model.embeddings.position_embedding.weight" )
if "text_encoder.transformer.resblocks." in name:
snake_case__ = name.replace("text_encoder.transformer.resblocks." , "text_model.encoder.layers." )
if "ln_1" in name:
snake_case__ = name.replace("ln_1" , "layer_norm1" )
if "ln_2" in name:
snake_case__ = name.replace("ln_2" , "layer_norm2" )
if "c_fc" in name:
snake_case__ = name.replace("c_fc" , "fc1" )
if "c_proj" in name:
snake_case__ = name.replace("c_proj" , "fc2" )
if "text_encoder" in name:
snake_case__ = name.replace("text_encoder" , "text_model" )
if "ln_final" in name:
snake_case__ = name.replace("ln_final" , "final_layer_norm" )
# projection layers
if "img_projector.linear_hidden." in name:
snake_case__ = name.replace("img_projector.linear_hidden." , "visual_projection." )
if "img_projector.linear_out." in name:
snake_case__ = name.replace("img_projector.linear_out." , "visual_projection.3." )
if "text_projector.linear_hidden" in name:
snake_case__ = name.replace("text_projector.linear_hidden" , "text_projection" )
if "text_projector.linear_out" in name:
snake_case__ = name.replace("text_projector.linear_out" , "text_projection.3" )
return name
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase , __lowerCAmelCase ):
for key in orig_state_dict.copy().keys():
snake_case__ = orig_state_dict.pop(__lowerCAmelCase )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
snake_case__ = key.split("." )
snake_case__ , snake_case__ = int(key_split[2] ), int(key_split[4] )
snake_case__ = config.vision_config.hidden_size
if "weight" in key:
snake_case__ = val[:dim, :]
snake_case__ = val[dim : dim * 2, :]
snake_case__ = val[-dim:, :]
else:
snake_case__ = val[:dim]
snake_case__ = val[dim : dim * 2]
snake_case__ = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
snake_case__ = key.split("." )
snake_case__ = int(key_split[3] )
snake_case__ = config.text_config.hidden_size
if "weight" in key:
snake_case__ = val[:dim, :]
snake_case__ = val[
dim : dim * 2, :
]
snake_case__ = val[-dim:, :]
else:
snake_case__ = val[:dim]
snake_case__ = val[dim : dim * 2]
snake_case__ = val[-dim:]
else:
snake_case__ = rename_key(__lowerCAmelCase )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
snake_case__ = val.squeeze_()
else:
snake_case__ = val
return orig_state_dict
def SCREAMING_SNAKE_CASE__ ( ):
snake_case__ = "http://images.cocodataset.org/val2017/000000039769.jpg"
snake_case__ = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase="groupvit-gcc-yfcc" , __lowerCAmelCase=False ):
snake_case__ = GroupViTConfig()
snake_case__ = GroupViTModel(__lowerCAmelCase ).eval()
snake_case__ = torch.load(__lowerCAmelCase , map_location="cpu" )["model"]
snake_case__ = convert_state_dict(__lowerCAmelCase , __lowerCAmelCase )
snake_case__ , snake_case__ = model.load_state_dict(__lowerCAmelCase , strict=__lowerCAmelCase )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(__lowerCAmelCase ) == 0)
# verify result
snake_case__ = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32" )
snake_case__ = prepare_img()
snake_case__ = processor(text=["a photo of a cat", "a photo of a dog"] , images=__lowerCAmelCase , padding=__lowerCAmelCase , return_tensors="pt" )
with torch.no_grad():
snake_case__ = model(**__lowerCAmelCase )
if model_name == "groupvit-gcc-yfcc":
snake_case__ = torch.tensor([[13.3_523, 6.3_629]] )
elif model_name == "groupvit-gcc-redcaps":
snake_case__ = torch.tensor([[16.1_873, 8.6_230]] )
else:
raise ValueError(F"""Model name {model_name} not supported.""" )
assert torch.allclose(outputs.logits_per_image , __lowerCAmelCase , atol=1E-3 )
processor.save_pretrained(__lowerCAmelCase )
model.save_pretrained(__lowerCAmelCase )
print("Successfully saved processor and model to" , __lowerCAmelCase )
if push_to_hub:
print("Pushing to the hub..." )
processor.push_to_hub(__lowerCAmelCase , organization="nielsr" )
model.push_to_hub(__lowerCAmelCase , organization="nielsr" )
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to dump the processor and PyTorch model.'''
)
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to GroupViT checkpoint''')
parser.add_argument(
'''--model_name''',
default='''groupvit-gccy-fcc''',
type=str,
help='''Name of the model. Expecting either \'groupvit-gcc-yfcc\' or \'groupvit-gcc-redcaps\'''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.''',
)
__magic_name__ = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 276 | 1 |
import torch
from torch import nn
class UpperCamelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self, snake_case__, snake_case__, snake_case__, snake_case__, snake_case__=1, snake_case__=False ) -> str:
"""simple docstring"""
super().__init__()
lowercase_ : Optional[int] = n_token
lowercase_ : Optional[int] = d_embed
lowercase_ : Union[str, Any] = d_proj
lowercase_ : List[str] = cutoffs + [n_token]
lowercase_ : Union[str, Any] = [0] + self.cutoffs
lowercase_ : Optional[int] = div_val
lowercase_ : List[str] = self.cutoffs[0]
lowercase_ : List[Any] = len(self.cutoffs ) - 1
lowercase_ : Optional[int] = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
lowercase_ : Optional[Any] = nn.Parameter(torch.zeros(self.n_clusters, self.d_embed ) )
lowercase_ : Dict = nn.Parameter(torch.zeros(self.n_clusters ) )
lowercase_ : List[str] = nn.ModuleList()
lowercase_ : Tuple = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(snake_case__, snake_case__ ) ) )
else:
self.out_projs.append(snake_case__ )
self.out_layers.append(nn.Linear(snake_case__, snake_case__ ) )
else:
for i in range(len(self.cutoffs ) ):
lowercase_ : Optional[int] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowercase_ : Dict = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(snake_case__, snake_case__ ) ) )
self.out_layers.append(nn.Linear(snake_case__, r_idx - l_idx ) )
lowercase_ : List[str] = keep_order
def snake_case__ ( self, snake_case__, snake_case__, snake_case__, snake_case__ ) -> List[Any]:
"""simple docstring"""
if proj is None:
lowercase_ : Optional[Any] = nn.functional.linear(snake_case__, snake_case__, bias=snake_case__ )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
lowercase_ : Optional[int] = nn.functional.linear(snake_case__, proj.t().contiguous() )
lowercase_ : str = nn.functional.linear(snake_case__, snake_case__, bias=snake_case__ )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def snake_case__ ( self, snake_case__, snake_case__=None, snake_case__=False ) -> Union[str, Any]:
"""simple docstring"""
if labels is not None:
# Shift so that tokens < n predict n
lowercase_ : Optional[int] = hidden[..., :-1, :].contiguous()
lowercase_ : int = labels[..., 1:].contiguous()
lowercase_ : Optional[int] = hidden.view(-1, hidden.size(-1 ) )
lowercase_ : Any = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError("""Input and labels should have the same size in the batch dimension.""" )
else:
lowercase_ : Optional[int] = hidden.view(-1, hidden.size(-1 ) )
if self.n_clusters == 0:
lowercase_ : Optional[Any] = self._compute_logit(snake_case__, self.out_layers[0].weight, self.out_layers[0].bias, self.out_projs[0] )
if labels is not None:
lowercase_ : List[str] = labels != -1_00
lowercase_ : List[Any] = torch.zeros_like(snake_case__, dtype=hidden.dtype, device=hidden.device )
lowercase_ : Any = (
-nn.functional.log_softmax(snake_case__, dim=-1 )[mask].gather(1, labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
lowercase_ : int = nn.functional.log_softmax(snake_case__, dim=-1 )
else:
# construct weights and biases
lowercase_ : Dict = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
lowercase_ : Optional[Any] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowercase_ : Dict = self.out_layers[0].weight[l_idx:r_idx]
lowercase_ : str = self.out_layers[0].bias[l_idx:r_idx]
else:
lowercase_ : Optional[int] = self.out_layers[i].weight
lowercase_ : List[Any] = self.out_layers[i].bias
if i == 0:
lowercase_ : Optional[Any] = torch.cat([weight_i, self.cluster_weight], dim=0 )
lowercase_ : List[str] = torch.cat([bias_i, self.cluster_bias], dim=0 )
weights.append(snake_case__ )
biases.append(snake_case__ )
lowercase_ : Dict = weights[0], biases[0], self.out_projs[0]
lowercase_ : Any = self._compute_logit(snake_case__, snake_case__, snake_case__, snake_case__ )
lowercase_ : str = nn.functional.log_softmax(snake_case__, dim=1 )
if labels is None:
lowercase_ : Optional[Any] = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
lowercase_ : Union[str, Any] = torch.zeros_like(snake_case__, dtype=hidden.dtype, device=hidden.device )
lowercase_ : Union[str, Any] = 0
lowercase_ : str = [0] + self.cutoffs
for i in range(len(snake_case__ ) - 1 ):
lowercase_ : Tuple = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
lowercase_ : List[Any] = (labels >= l_idx) & (labels < r_idx)
lowercase_ : List[Any] = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
lowercase_ : Dict = labels.index_select(0, snake_case__ ) - l_idx
lowercase_ : List[Any] = head_logprob.index_select(0, snake_case__ )
lowercase_ : List[str] = hidden.index_select(0, snake_case__ )
else:
lowercase_ : List[str] = hidden
if i == 0:
if labels is not None:
lowercase_ : Optional[Any] = head_logprob_i.gather(1, target_i[:, None] ).squeeze(1 )
else:
lowercase_ : str = head_logprob[:, : self.cutoffs[0]]
else:
lowercase_ : str = weights[i], biases[i], self.out_projs[i]
lowercase_ : Tuple = self._compute_logit(snake_case__, snake_case__, snake_case__, snake_case__ )
lowercase_ : Dict = nn.functional.log_softmax(snake_case__, dim=1 )
lowercase_ : Dict = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
lowercase_ : Tuple = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1, target_i[:, None] ).squeeze(1 )
else:
lowercase_ : Optional[int] = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
lowercase_ : int = logprob_i
if labels is not None:
if (hasattr(self, """keep_order""" ) and self.keep_order) or keep_order:
out.index_copy_(0, snake_case__, -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def snake_case__ ( self, snake_case__ ) -> int:
"""simple docstring"""
if self.n_clusters == 0:
lowercase_ : List[str] = self._compute_logit(snake_case__, self.out_layers[0].weight, self.out_layers[0].bias, self.out_projs[0] )
return nn.functional.log_softmax(snake_case__, dim=-1 )
else:
# construct weights and biases
lowercase_ : Any = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
lowercase_ : Dict = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowercase_ : Union[str, Any] = self.out_layers[0].weight[l_idx:r_idx]
lowercase_ : Union[str, Any] = self.out_layers[0].bias[l_idx:r_idx]
else:
lowercase_ : Union[str, Any] = self.out_layers[i].weight
lowercase_ : Optional[Any] = self.out_layers[i].bias
if i == 0:
lowercase_ : List[str] = torch.cat([weight_i, self.cluster_weight], dim=0 )
lowercase_ : Tuple = torch.cat([bias_i, self.cluster_bias], dim=0 )
weights.append(snake_case__ )
biases.append(snake_case__ )
lowercase_ : int = weights[0], biases[0], self.out_projs[0]
lowercase_ : Dict = self._compute_logit(snake_case__, snake_case__, snake_case__, snake_case__ )
lowercase_ : Any = hidden.new_empty((head_logit.size(0 ), self.n_token) )
lowercase_ : Optional[int] = nn.functional.log_softmax(snake_case__, dim=1 )
lowercase_ : List[Any] = [0] + self.cutoffs
for i in range(len(snake_case__ ) - 1 ):
lowercase_ : List[str] = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
lowercase_ : Optional[int] = head_logprob[:, : self.cutoffs[0]]
else:
lowercase_ : Dict = weights[i], biases[i], self.out_projs[i]
lowercase_ : Any = self._compute_logit(snake_case__, snake_case__, snake_case__, snake_case__ )
lowercase_ : List[str] = nn.functional.log_softmax(snake_case__, dim=1 )
lowercase_ : Optional[int] = head_logprob[:, -i] + tail_logprob_i
lowercase_ : Optional[Any] = logprob_i
return out | 715 |
import math
def __magic_name__ ( lowercase ) -> bool:
"""simple docstring"""
lowercase_ : Optional[Any] = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(lowercase )
def __magic_name__ ( lowercase = 1 / 12345 ) -> int:
"""simple docstring"""
lowercase_ : Dict = 0
lowercase_ : List[Any] = 0
lowercase_ : List[str] = 3
while True:
lowercase_ : int = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(lowercase ):
lowercase_ : Optional[int] = int(lowercase )
total_partitions += 1
if check_partition_perfect(lowercase ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(lowercase )
integer += 1
if __name__ == "__main__":
print(F'''{solution() = }''') | 436 | 0 |
'''simple docstring'''
class UpperCamelCase__:
def __init__( self : str , lowerCAmelCase : str = "" , lowerCAmelCase : bool = False )-> None:
"""simple docstring"""
UpperCAmelCase = {}
# A node will be a leaf if the tree contains its word
UpperCAmelCase = is_leaf
UpperCAmelCase = prefix
def a__( self : str , lowerCAmelCase : str )-> tuple[str, str, str]:
"""simple docstring"""
UpperCAmelCase = 0
for q, w in zip(self.prefix , lowerCAmelCase ):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def a__( self : Optional[Any] , lowerCAmelCase : list[str] )-> None:
"""simple docstring"""
for word in words:
self.insert(lowerCAmelCase )
def a__( self : Union[str, Any] , lowerCAmelCase : str )-> None:
"""simple docstring"""
if self.prefix == word:
UpperCAmelCase = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
UpperCAmelCase = RadixNode(prefix=lowerCAmelCase , is_leaf=lowerCAmelCase )
else:
UpperCAmelCase = self.nodes[word[0]]
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = incoming_node.match(
lowerCAmelCase )
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(lowerCAmelCase )
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
UpperCAmelCase = remaining_prefix
UpperCAmelCase = self.nodes[matching_string[0]]
UpperCAmelCase = RadixNode(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase = aux_node
if remaining_word == "":
UpperCAmelCase = True
else:
self.nodes[matching_string[0]].insert(lowerCAmelCase )
def a__( self : Dict , lowerCAmelCase : str )-> bool:
"""simple docstring"""
UpperCAmelCase = self.nodes.get(word[0] , lowerCAmelCase )
if not incoming_node:
return False
else:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = incoming_node.match(
lowerCAmelCase )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(lowerCAmelCase )
def a__( self : Optional[int] , lowerCAmelCase : str )-> bool:
"""simple docstring"""
UpperCAmelCase = self.nodes.get(word[0] , lowerCAmelCase )
if not incoming_node:
return False
else:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = incoming_node.match(
lowerCAmelCase )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(lowerCAmelCase )
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes ) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes ) == 1 and not self.is_leaf:
UpperCAmelCase = list(self.nodes.values() )[0]
UpperCAmelCase = merging_node.is_leaf
self.prefix += merging_node.prefix
UpperCAmelCase = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes ) > 1:
UpperCAmelCase = False
# If there is 1 edge, we merge it with its child
else:
UpperCAmelCase = list(incoming_node.nodes.values() )[0]
UpperCAmelCase = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
UpperCAmelCase = merging_node.nodes
return True
def a__( self : int , lowerCAmelCase : int = 0 )-> None:
"""simple docstring"""
if self.prefix != "":
print('''-''' * height , self.prefix , ''' (leaf)''' if self.is_leaf else '''''' )
for value in self.nodes.values():
value.print_tree(height + 1 )
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase = '''banana bananas bandana band apple all beast'''.split()
UpperCAmelCase = RadixNode()
root.insert_many(A )
assert all(root.find(A ) for word in words )
assert not root.find('''bandanas''' )
assert not root.find('''apps''' )
root.delete('''all''' )
assert not root.find('''all''' )
root.delete('''banana''' )
assert not root.find('''banana''' )
assert root.find('''bananas''' )
return True
def lowerCamelCase__ ( ):
'''simple docstring'''
assert test_trie()
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase = RadixNode()
UpperCAmelCase = '''banana bananas bandanas bandana band apple all beast'''.split()
root.insert_many(A )
print('''Words:''' , A )
print('''Tree:''' )
root.print_tree()
if __name__ == "__main__":
main()
| 210 |
'''simple docstring'''
def lowerCamelCase__ ( A : int = 50 ):
'''simple docstring'''
UpperCAmelCase = [1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 210 | 1 |
'''simple docstring'''
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from transformers import GradientAccumulator, create_optimizer
@require_tf
class UpperCamelCase_ ( unittest.TestCase ):
def _snake_case ( self :Dict , __A :List[Any] , __A :Union[str, Any] , __A :int ) -> str:
"""simple docstring"""
self.assertEqual(len(lowerCamelCase__ ) , len(lowerCamelCase__ ) )
for a, b in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assertAlmostEqual(lowerCamelCase__ , lowerCamelCase__ , delta=lowerCamelCase__ )
def _snake_case ( self :Union[str, Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = GradientAccumulator()
accumulator([tf.constant([1.0, 2.0] )] )
accumulator([tf.constant([-2.0, 1.0] )] )
accumulator([tf.constant([-1.0, 2.0] )] )
with self.assertRaises(lowerCamelCase__ ):
accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] )
self.assertEqual(accumulator.step , 3 )
self.assertEqual(len(accumulator.gradients ) , 1 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1E-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1E-2 )
def _snake_case ( self :str ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = None
ops.enable_eager_execution_internal()
SCREAMING_SNAKE_CASE__ = tf.config.list_physical_devices("""CPU""" )
if len(lowerCamelCase__ ) == 1:
tf.config.set_logical_device_configuration(
physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] )
SCREAMING_SNAKE_CASE__ = tf.config.list_logical_devices(device_type="""CPU""" )
SCREAMING_SNAKE_CASE__ = tf.distribute.MirroredStrategy(devices=devices[:2] )
with strategy.scope():
SCREAMING_SNAKE_CASE__ = GradientAccumulator()
SCREAMING_SNAKE_CASE__ = tf.Variable([4.0, 3.0] )
SCREAMING_SNAKE_CASE__ = create_optimizer(5E-5 , 10 , 5 )
SCREAMING_SNAKE_CASE__ = tf.Variable([0.0, 0.0] , trainable=lowerCamelCase__ )
def accumulate_on_replica(__A :str ):
accumulator([gradient] )
def apply_on_replica():
optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) )
@tf.function
def accumulate(__A :Any , __A :Union[str, Any] ):
with strategy.scope():
SCREAMING_SNAKE_CASE__ = strategy.experimental_local_results(lowerCamelCase__ )
local_variables[0].assign(lowerCamelCase__ )
local_variables[1].assign(lowerCamelCase__ )
strategy.run(lowerCamelCase__ , args=(gradient_placeholder,) )
@tf.function
def apply_grad():
with strategy.scope():
strategy.run(lowerCamelCase__ )
def _check_local_values(__A :Optional[Any] , __A :Union[str, Any] ):
SCREAMING_SNAKE_CASE__ = strategy.experimental_local_results(accumulator._gradients[0] )
self.assertListAlmostEqual(values[0].value() , lowerCamelCase__ , tol=1E-2 )
self.assertListAlmostEqual(values[1].value() , lowerCamelCase__ , tol=1E-2 )
accumulate([1.0, 2.0] , [-1.0, 1.0] )
accumulate([3.0, -1.0] , [-1.0, -1.0] )
accumulate([-2.0, 2.0] , [3.0, -2.0] )
self.assertEqual(accumulator.step , 3 )
_check_local_values([2.0, 3.0] , [1.0, -2.0] )
apply_grad()
self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1E-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
_check_local_values([0.0, 0.0] , [0.0, 0.0] ) | 718 |
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCamelCase_ ( UpperCamelCase__ , unittest.TestCase ):
lowerCamelCase_ = OpenAIGPTTokenizer
lowerCamelCase_ = OpenAIGPTTokenizerFast
lowerCamelCase_ = True
lowerCamelCase_ = False
def _snake_case ( self :Optional[Any] ) -> Dict:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
SCREAMING_SNAKE_CASE__ = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
SCREAMING_SNAKE_CASE__ = dict(zip(__A , range(len(__A ) ) ) )
SCREAMING_SNAKE_CASE__ = ["""#version: 0.2""", """l o""", """lo w""", """e r</w>""", """"""]
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" ) as fp:
fp.write(json.dumps(__A ) )
with open(self.merges_file , """w""" ) as fp:
fp.write("""\n""".join(__A ) )
def _snake_case ( self :Union[str, Any] , __A :str ) -> List[Any]:
"""simple docstring"""
return "lower newer", "lower newer"
def _snake_case ( self :Optional[Any] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = OpenAIGPTTokenizer(self.vocab_file , self.merges_file )
SCREAMING_SNAKE_CASE__ = """lower"""
SCREAMING_SNAKE_CASE__ = ["""low""", """er</w>"""]
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize(__A )
self.assertListEqual(__A , __A )
SCREAMING_SNAKE_CASE__ = tokens + ["""<unk>"""]
SCREAMING_SNAKE_CASE__ = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , __A )
def _snake_case ( self :Optional[Any] , __A :Optional[Any]=15 ) -> Any:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
SCREAMING_SNAKE_CASE__ = self.rust_tokenizer_class.from_pretrained(__A , **__A )
# Simple input
SCREAMING_SNAKE_CASE__ = """This is a simple input"""
SCREAMING_SNAKE_CASE__ = ["""This is a simple input 1""", """This is a simple input 2"""]
SCREAMING_SNAKE_CASE__ = ("""This is a simple input""", """This is a pair""")
SCREAMING_SNAKE_CASE__ = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(__A , tokenizer_r.encode , __A , max_length=__A , padding="""max_length""" )
# Simple input
self.assertRaises(__A , tokenizer_r.encode_plus , __A , max_length=__A , padding="""max_length""" )
# Simple input
self.assertRaises(
__A , tokenizer_r.batch_encode_plus , __A , max_length=__A , padding="""max_length""" , )
# Pair input
self.assertRaises(__A , tokenizer_r.encode , __A , max_length=__A , padding="""max_length""" )
# Pair input
self.assertRaises(__A , tokenizer_r.encode_plus , __A , max_length=__A , padding="""max_length""" )
# Pair input
self.assertRaises(
__A , tokenizer_r.batch_encode_plus , __A , max_length=__A , padding="""max_length""" , )
def _snake_case ( self :Dict ) -> List[Any]:
"""simple docstring"""
pass
@require_ftfy
@require_spacy
@require_tokenizers
class UpperCamelCase_ ( UpperCamelCase__ ):
pass | 59 | 0 |
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def snake_case ( snake_case__ :dict) -> tuple:
return (data["data"], data["target"])
def snake_case ( snake_case__ :np.ndarray , snake_case__ :np.ndarray) -> XGBClassifier:
_A = XGBClassifier()
classifier.fit(snake_case__ , snake_case__)
return classifier
def snake_case ( ) -> None:
_A = load_iris()
_A , _A = data_handling(snake_case__)
_A , _A , _A , _A = train_test_split(
snake_case__ , snake_case__ , test_size=0.25)
_A = iris["""target_names"""]
# Create an XGBoost Classifier from the training data
_A = xgboost(snake_case__ , snake_case__)
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
snake_case__ , snake_case__ , snake_case__ , display_labels=snake_case__ , cmap="""Blues""" , normalize="""true""" , )
plt.title("""Normalized Confusion Matrix - IRIS Dataset""")
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 401 | import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Optional[int]:
_A = tempfile.mkdtemp()
# fmt: off
_A = ["""""", """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
_A = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
_A = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""]
_A = {"""unk_token""": """<unk>"""}
_A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
_A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowerCAmelCase_ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(lowerCAmelCase_ ) )
_A = {
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.4814_5466, 0.457_8275, 0.4082_1073],
"""image_std""": [0.2686_2954, 0.2613_0258, 0.2757_7711],
}
_A = os.path.join(self.tmpdirname , lowerCAmelCase_ )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(lowerCAmelCase_ , lowerCAmelCase_ )
def UpperCAmelCase ( self , **lowerCAmelCase_ ) -> List[Any]:
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token="""!""" , **lowerCAmelCase_ )
def UpperCAmelCase ( self , **lowerCAmelCase_ ) -> List[str]:
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token="""!""" , **lowerCAmelCase_ )
def UpperCAmelCase ( self , **lowerCAmelCase_ ) -> Optional[Any]:
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Dict:
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase ( self ) -> List[Any]:
_A = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
_A = [Image.fromarray(np.moveaxis(lowerCAmelCase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase ( self ) -> List[str]:
_A = self.get_tokenizer()
_A = self.get_rust_tokenizer()
_A = self.get_image_processor()
_A = OwlViTProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
processor_slow.save_pretrained(self.tmpdirname )
_A = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=lowerCAmelCase_ )
_A = OwlViTProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
processor_fast.save_pretrained(self.tmpdirname )
_A = OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , lowerCAmelCase_ )
self.assertIsInstance(processor_fast.tokenizer , lowerCAmelCase_ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , lowerCAmelCase_ )
self.assertIsInstance(processor_fast.image_processor , lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Tuple:
_A = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_A = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
_A = self.get_image_processor(do_normalize=lowerCAmelCase_ )
_A = OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=lowerCAmelCase_ )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCAmelCase_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Optional[int]:
_A = self.get_image_processor()
_A = self.get_tokenizer()
_A = OwlViTProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
_A = self.prepare_image_inputs()
_A = image_processor(lowerCAmelCase_ , return_tensors="""np""" )
_A = processor(images=lowerCAmelCase_ , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCAmelCase ( self ) -> Optional[int]:
_A = self.get_image_processor()
_A = self.get_tokenizer()
_A = OwlViTProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
_A = """lower newer"""
_A = processor(text=lowerCAmelCase_ , return_tensors="""np""" )
_A = tokenizer(lowerCAmelCase_ , return_tensors="""np""" )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() )
def UpperCAmelCase ( self ) -> List[Any]:
_A = self.get_image_processor()
_A = self.get_tokenizer()
_A = OwlViTProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
_A = """lower newer"""
_A = self.prepare_image_inputs()
_A = processor(text=lowerCAmelCase_ , images=lowerCAmelCase_ )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase_ ):
processor()
def UpperCAmelCase ( self ) -> List[Any]:
_A = """google/owlvit-base-patch32"""
_A = OwlViTProcessor.from_pretrained(lowerCAmelCase_ )
_A = ["""cat""", """nasa badge"""]
_A = processor(text=lowerCAmelCase_ )
_A = 16
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask"""] )
self.assertEqual(inputs["""input_ids"""].shape , (2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase_ ):
processor()
def UpperCAmelCase ( self ) -> Optional[int]:
_A = """google/owlvit-base-patch32"""
_A = OwlViTProcessor.from_pretrained(lowerCAmelCase_ )
_A = [["""cat""", """nasa badge"""], ["""person"""]]
_A = processor(text=lowerCAmelCase_ )
_A = 16
_A = len(lowerCAmelCase_ )
_A = max([len(lowerCAmelCase_ ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask"""] )
self.assertEqual(inputs["""input_ids"""].shape , (batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase_ ):
processor()
def UpperCAmelCase ( self ) -> Tuple:
_A = """google/owlvit-base-patch32"""
_A = OwlViTProcessor.from_pretrained(lowerCAmelCase_ )
_A = ["""cat""", """nasa badge"""]
_A = processor(text=lowerCAmelCase_ )
_A = 16
_A = inputs["""input_ids"""]
_A = [
[4_94_06, 23_68, 4_94_07, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[4_94_06, 68_41, 1_13_01, 4_94_07, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask"""] )
self.assertEqual(inputs["""input_ids"""].shape , (2, seq_length) )
self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] )
def UpperCAmelCase ( self ) -> List[Any]:
_A = self.get_image_processor()
_A = self.get_tokenizer()
_A = OwlViTProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
_A = self.prepare_image_inputs()
_A = self.prepare_image_inputs()
_A = processor(images=lowerCAmelCase_ , query_images=lowerCAmelCase_ )
self.assertListEqual(list(inputs.keys() ) , ["""query_pixel_values""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase_ ):
processor()
def UpperCAmelCase ( self ) -> List[Any]:
_A = self.get_image_processor()
_A = self.get_tokenizer()
_A = OwlViTProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
_A = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_A = processor.batch_decode(lowerCAmelCase_ )
_A = tokenizer.batch_decode(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
| 401 | 1 |
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
_lowerCamelCase = logging.get_logger(__name__)
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = ["input_features", "attention_mask"]
def __init__( self :List[Any] , __A :int=80 , __A :Dict=1_6000 , __A :str=0.0 , __A :Dict=10 , __A :Optional[int]=25 , __A :Dict="hamming_window" , __A :List[str]=3_2_7_6_8.0 , __A :Dict=0.9_7 , __A :Union[str, Any]=1.0 , __A :List[str]=True , __A :Tuple=True , __A :List[Any]=False , **__A :Optional[int] , ) -> List[str]:
"""simple docstring"""
super().__init__(feature_size=__A , sampling_rate=__A , padding_value=__A , **__A )
SCREAMING_SNAKE_CASE__ = feature_size
SCREAMING_SNAKE_CASE__ = sampling_rate
SCREAMING_SNAKE_CASE__ = padding_value
SCREAMING_SNAKE_CASE__ = hop_length
SCREAMING_SNAKE_CASE__ = win_length
SCREAMING_SNAKE_CASE__ = frame_signal_scale
SCREAMING_SNAKE_CASE__ = preemphasis_coeff
SCREAMING_SNAKE_CASE__ = mel_floor
SCREAMING_SNAKE_CASE__ = normalize_means
SCREAMING_SNAKE_CASE__ = normalize_vars
SCREAMING_SNAKE_CASE__ = win_function
SCREAMING_SNAKE_CASE__ = return_attention_mask
SCREAMING_SNAKE_CASE__ = win_length * sampling_rate // 1000
SCREAMING_SNAKE_CASE__ = hop_length * sampling_rate // 1000
SCREAMING_SNAKE_CASE__ = optimal_fft_length(self.sample_size )
SCREAMING_SNAKE_CASE__ = (self.n_fft // 2) + 1
def _snake_case ( self :Optional[int] , __A :np.array ) -> np.ndarray:
"""simple docstring"""
if self.win_function == "hamming_window":
SCREAMING_SNAKE_CASE__ = window_function(window_length=self.sample_size , name=self.win_function , periodic=__A )
else:
SCREAMING_SNAKE_CASE__ = window_function(window_length=self.sample_size , name=self.win_function )
SCREAMING_SNAKE_CASE__ = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.feature_size , min_frequency=0.0 , max_frequency=self.sampling_rate / 2.0 , sampling_rate=self.sampling_rate , )
SCREAMING_SNAKE_CASE__ = spectrogram(
one_waveform * self.frame_signal_scale , window=__A , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , center=__A , preemphasis=self.preemphasis_coeff , mel_filters=__A , mel_floor=self.mel_floor , log_mel="""log""" , )
return msfc_features.T
def _snake_case ( self :Optional[int] , __A :Optional[int] , __A :Optional[Any] , __A :Optional[Any] ) -> List[str]:
"""simple docstring"""
if self.normalize_means:
SCREAMING_SNAKE_CASE__ = x[:input_length].mean(axis=0 )
SCREAMING_SNAKE_CASE__ = np.subtract(__A , __A )
if self.normalize_vars:
SCREAMING_SNAKE_CASE__ = x[:input_length].std(axis=0 )
SCREAMING_SNAKE_CASE__ = np.divide(__A , __A )
if input_length < x.shape[0]:
SCREAMING_SNAKE_CASE__ = padding_value
# make sure array is in float32
SCREAMING_SNAKE_CASE__ = x.astype(np.floataa )
return x
def _snake_case ( self :Tuple , __A :List[np.ndarray] , __A :Optional[np.ndarray] = None ) -> List[np.ndarray]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(__A , __A , self.padding_value ) for x, n in zip(__A , __A )]
def __call__( self :Optional[int] , __A :Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __A :Union[bool, str, PaddingStrategy] = False , __A :Optional[int] = None , __A :bool = False , __A :Optional[int] = None , __A :Optional[bool] = None , __A :Optional[Union[str, TensorType]] = None , __A :Optional[int] = None , **__A :List[str] , ) -> BatchFeature:
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
f''' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'''
f''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"""It is strongly recommended to pass the ``sampling_rate`` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
SCREAMING_SNAKE_CASE__ = isinstance(__A , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
SCREAMING_SNAKE_CASE__ = is_batched_numpy or (
isinstance(__A , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
SCREAMING_SNAKE_CASE__ = [np.asarray(__A , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(__A , np.ndarray ):
SCREAMING_SNAKE_CASE__ = np.asarray(__A , dtype=np.floataa )
elif isinstance(__A , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
SCREAMING_SNAKE_CASE__ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
SCREAMING_SNAKE_CASE__ = [raw_speech]
# extract fbank features
SCREAMING_SNAKE_CASE__ = [self._extract_mfsc_features(__A ) for one_waveform in raw_speech]
# convert into correct format for padding
SCREAMING_SNAKE_CASE__ = BatchFeature({"""input_features""": features} )
SCREAMING_SNAKE_CASE__ = self.pad(
__A , padding=__A , max_length=__A , truncation=__A , pad_to_multiple_of=__A , return_attention_mask=__A , **__A , )
# make sure list is in array format
SCREAMING_SNAKE_CASE__ = padded_inputs.get("""input_features""" )
if isinstance(input_features[0] , __A ):
SCREAMING_SNAKE_CASE__ = [np.asarray(__A , dtype=np.floataa ) for feature in input_features]
SCREAMING_SNAKE_CASE__ = padded_inputs.get("""attention_mask""" )
if attention_mask is not None:
SCREAMING_SNAKE_CASE__ = [np.asarray(__A , dtype=np.intaa ) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
SCREAMING_SNAKE_CASE__ = (
np.array(__A , dtype=np.intaa )
if self._get_padding_strategies(__A , max_length=__A ) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
SCREAMING_SNAKE_CASE__ = self.normalize(
padded_inputs["""input_features"""] , attention_mask=__A )
if return_tensors is not None:
SCREAMING_SNAKE_CASE__ = padded_inputs.convert_to_tensors(__A )
return padded_inputs | 59 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCamelCase = {
'configuration_xmod': [
'XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XmodConfig',
'XmodOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
'XMOD_PRETRAINED_MODEL_ARCHIVE_LIST',
'XmodForCausalLM',
'XmodForMaskedLM',
'XmodForMultipleChoice',
'XmodForQuestionAnswering',
'XmodForSequenceClassification',
'XmodForTokenClassification',
'XmodModel',
'XmodPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
_lowerCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 59 | 1 |
class __snake_case :
'''simple docstring'''
def __init__( self , A_ , A_ , A_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = name
SCREAMING_SNAKE_CASE__ = value
SCREAMING_SNAKE_CASE__ = weight
def __repr__( self ):
'''simple docstring'''
return f'''{self.__class__.__name__}({self.name}, {self.value}, {self.weight})'''
def lowercase_ ( self ):
'''simple docstring'''
return self.value
def lowercase_ ( self ):
'''simple docstring'''
return self.name
def lowercase_ ( self ):
'''simple docstring'''
return self.weight
def lowercase_ ( self ):
'''simple docstring'''
return self.value / self.weight
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Any:
SCREAMING_SNAKE_CASE__ = []
for i in range(len(lowerCAmelCase_ ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Dict:
SCREAMING_SNAKE_CASE__ = sorted(lowerCAmelCase_ , key=lowerCAmelCase_ , reverse=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 0.0, 0.0
for i in range(len(lowerCAmelCase_ ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def __snake_case ( ) -> str:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 100 |
import requests
from bsa import BeautifulSoup
def lowerCamelCase__ (__lowerCamelCase = "AAPL" ):
_SCREAMING_SNAKE_CASE : Dict = f"""https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"""
_SCREAMING_SNAKE_CASE : str = BeautifulSoup(requests.get(__lowerCamelCase ).text, "html.parser" )
_SCREAMING_SNAKE_CASE : Union[str, Any] = "My(6px) Pos(r) smartphone_Mt(6px)"
return soup.find("div", class_=class_ ).find("span" ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(f"Current {symbol:<4} stock price is {stock_price(symbol):>8}") | 249 | 0 |
from math import isclose, sqrt
def _a ( lowercase__ : float , lowercase__ : float , lowercase__ : float ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = point_y / 4 / point_x
SCREAMING_SNAKE_CASE__ : Tuple = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
SCREAMING_SNAKE_CASE__ : List[str] = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
SCREAMING_SNAKE_CASE__ : int = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
SCREAMING_SNAKE_CASE__ : Optional[int] = outgoing_gradient**2 + 4
SCREAMING_SNAKE_CASE__ : Any = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
SCREAMING_SNAKE_CASE__ : List[str] = (point_y - outgoing_gradient * point_x) ** 2 - 1_00
SCREAMING_SNAKE_CASE__ : Tuple = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
SCREAMING_SNAKE_CASE__ : int = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
SCREAMING_SNAKE_CASE__ : List[Any] = x_minus if isclose(lowercase__ , lowercase__ ) else x_plus
SCREAMING_SNAKE_CASE__ : List[str] = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def _a ( lowercase__ : float = 1.4 , lowercase__ : float = -9.6 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = 0
SCREAMING_SNAKE_CASE__ : float = first_x_coord
SCREAMING_SNAKE_CASE__ : float = first_y_coord
SCREAMING_SNAKE_CASE__ : float = (10.1 - point_y) / (0.0 - point_x)
while not (-0.01 <= point_x <= 0.01 and point_y > 0):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = next_point(lowercase__ , lowercase__ , lowercase__ )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(F"""{solution() = }""")
| 636 | class snake_case ( UpperCamelCase_ ):
pass
class snake_case ( UpperCamelCase_ ):
pass
class snake_case :
def __init__( self : Union[str, Any] )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = [
[],
[],
[],
]
def __lowercase( self : int , a_ : int , a_ : int )-> None:
"""simple docstring"""
try:
if len(self.queues[priority] ) >= 100:
raise OverflowError('Maximum queue size is 100' )
self.queues[priority].append(a_ )
except IndexError:
raise ValueError('Valid priorities are 0, 1, and 2' )
def __lowercase( self : int )-> int:
"""simple docstring"""
for queue in self.queues:
if queue:
return queue.pop(0 )
raise UnderFlowError('All queues are empty' )
def __str__( self : Any )-> str:
"""simple docstring"""
return "\n".join(F'''Priority {i}: {q}''' for i, q in enumerate(self.queues ) )
class snake_case :
def __init__( self : Union[str, Any] )-> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = []
def __lowercase( self : List[str] , a_ : int )-> None:
"""simple docstring"""
if len(self.queue ) == 100:
raise OverFlowError('Maximum queue size is 100' )
self.queue.append(a_ )
def __lowercase( self : int )-> int:
"""simple docstring"""
if not self.queue:
raise UnderFlowError('The queue is empty' )
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = min(self.queue )
self.queue.remove(a_ )
return data
def __str__( self : List[str] )-> str:
"""simple docstring"""
return str(self.queue )
def _a ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = FixedPriorityQueue()
fpq.enqueue(0 , 10 )
fpq.enqueue(1 , 70 )
fpq.enqueue(0 , 1_00 )
fpq.enqueue(2 , 1 )
fpq.enqueue(2 , 5 )
fpq.enqueue(1 , 7 )
fpq.enqueue(2 , 4 )
fpq.enqueue(1 , 64 )
fpq.enqueue(0 , 1_28 )
print(lowercase__ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(lowercase__ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def _a ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = ElementPriorityQueue()
epq.enqueue(10 )
epq.enqueue(70 )
epq.enqueue(1_00 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(64 )
epq.enqueue(1_28 )
print(lowercase__ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(lowercase__ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 636 | 1 |
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
UpperCAmelCase_ = logging.getLogger()
def A__ ( ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('''-f''' )
_UpperCAmelCase = parser.parse_args()
return args.f
def A__ ( SCREAMING_SNAKE_CASE_ : Dict ) -> str:
"""simple docstring"""
_UpperCAmelCase = {}
_UpperCAmelCase = os.path.join(SCREAMING_SNAKE_CASE_ , '''all_results.json''' )
if os.path.exists(SCREAMING_SNAKE_CASE_ ):
with open(SCREAMING_SNAKE_CASE_ , '''r''' ) as f:
_UpperCAmelCase = json.load(SCREAMING_SNAKE_CASE_ )
else:
raise ValueError(F'''can\'t find {path}''' )
return results
def A__ ( ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = torch.cuda.is_available() and torch_device == '''cuda'''
return is_using_cuda and is_apex_available()
UpperCAmelCase_ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class __UpperCamelCase ( A__ ):
@classmethod
def UpperCamelCase( cls ):
# Write Accelerate config, will pick up on CPU, GPU, and multi-GPU
_UpperCAmelCase = tempfile.mkdtemp()
_UpperCAmelCase = os.path.join(cls.tmpdir , '''default_config.yml''' )
write_basic_config(save_location=cls.configPath )
_UpperCAmelCase = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath]
@classmethod
def UpperCamelCase( cls ):
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def UpperCamelCase( self ):
_UpperCAmelCase = self.get_auto_remove_tmp_dir()
_UpperCAmelCase = f'''
{self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--seed=42
--checkpointing_steps epoch
--with_tracking
'''.split()
if is_cuda_and_apex_available():
testargs.append('''--fp16''' )
run_command(self._launch_args + testargs )
_UpperCAmelCase = get_results(_UpperCamelCase )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , '''glue_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def UpperCamelCase( self ):
_UpperCAmelCase = self.get_auto_remove_tmp_dir()
_UpperCAmelCase = f'''
{self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--block_size 128
--per_device_train_batch_size 5
--per_device_eval_batch_size 5
--num_train_epochs 2
--output_dir {tmp_dir}
--checkpointing_steps epoch
--with_tracking
'''.split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
_UpperCAmelCase = get_results(_UpperCamelCase )
self.assertLess(result['''perplexity'''] , 100 )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , '''clm_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def UpperCamelCase( self ):
_UpperCAmelCase = self.get_auto_remove_tmp_dir()
_UpperCAmelCase = f'''
{self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--num_train_epochs=1
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
_UpperCAmelCase = get_results(_UpperCamelCase )
self.assertLess(result['''perplexity'''] , 42 )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , '''mlm_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def UpperCamelCase( self ):
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
_UpperCAmelCase = 7 if get_gpu_count() > 1 else 2
_UpperCAmelCase = self.get_auto_remove_tmp_dir()
_UpperCAmelCase = f'''
{self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
_UpperCAmelCase = get_results(_UpperCamelCase )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertLess(result['''train_loss'''] , 0.5 )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , '''ner_no_trainer''' ) ) )
@unittest.skip(reason='''Fix me @muellerzr''' )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def UpperCamelCase( self ):
_UpperCAmelCase = self.get_auto_remove_tmp_dir()
_UpperCAmelCase = f'''
{self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--seed=42
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
_UpperCAmelCase = get_results(_UpperCamelCase )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result['''eval_f1'''] , 28 )
self.assertGreaterEqual(result['''eval_exact'''] , 28 )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , '''qa_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def UpperCamelCase( self ):
_UpperCAmelCase = self.get_auto_remove_tmp_dir()
_UpperCAmelCase = f'''
{self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/swag/sample.json
--validation_file tests/fixtures/tests_samples/swag/sample.json
--output_dir {tmp_dir}
--max_train_steps=20
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
_UpperCAmelCase = get_results(_UpperCamelCase )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.8 )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , '''swag_no_trainer''' ) ) )
@slow
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def UpperCamelCase( self ):
_UpperCAmelCase = self.get_auto_remove_tmp_dir()
_UpperCAmelCase = f'''
{self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
_UpperCAmelCase = get_results(_UpperCamelCase )
self.assertGreaterEqual(result['''eval_rouge1'''] , 10 )
self.assertGreaterEqual(result['''eval_rouge2'''] , 2 )
self.assertGreaterEqual(result['''eval_rougeL'''] , 7 )
self.assertGreaterEqual(result['''eval_rougeLsum'''] , 7 )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , '''summarization_no_trainer''' ) ) )
@slow
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def UpperCamelCase( self ):
_UpperCAmelCase = self.get_auto_remove_tmp_dir()
_UpperCAmelCase = f'''
{self.examples_dir}/pytorch/translation/run_translation_no_trainer.py
--model_name_or_path sshleifer/student_marian_en_ro_6_1
--source_lang en
--target_lang ro
--train_file tests/fixtures/tests_samples/wmt16/sample.json
--validation_file tests/fixtures/tests_samples/wmt16/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--num_beams=6
--learning_rate=3e-3
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--source_lang en_XX
--target_lang ro_RO
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
_UpperCAmelCase = get_results(_UpperCamelCase )
self.assertGreaterEqual(result['''eval_bleu'''] , 30 )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , '''translation_no_trainer''' ) ) )
@slow
def UpperCamelCase( self ):
_UpperCAmelCase = logging.StreamHandler(sys.stdout )
logger.addHandler(_UpperCamelCase )
_UpperCAmelCase = self.get_auto_remove_tmp_dir()
_UpperCAmelCase = f'''
{self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py
--dataset_name huggingface/semantic-segmentation-test-sample
--output_dir {tmp_dir}
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
'''.split()
run_command(self._launch_args + testargs )
_UpperCAmelCase = get_results(_UpperCamelCase )
self.assertGreaterEqual(result['''eval_overall_accuracy'''] , 0.10 )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def UpperCamelCase( self ):
_UpperCAmelCase = self.get_auto_remove_tmp_dir()
_UpperCAmelCase = f'''
{self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py
--model_name_or_path google/vit-base-patch16-224-in21k
--dataset_name hf-internal-testing/cats_vs_dogs_sample
--learning_rate 1e-4
--per_device_train_batch_size 2
--per_device_eval_batch_size 1
--max_train_steps 2
--train_val_split 0.1
--seed 42
--output_dir {tmp_dir}
--with_tracking
--checkpointing_steps 1
'''.split()
if is_cuda_and_apex_available():
testargs.append('''--fp16''' )
run_command(self._launch_args + testargs )
_UpperCAmelCase = get_results(_UpperCamelCase )
# The base model scores a 25%
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.6 )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , '''step_1''' ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , '''image_classification_no_trainer''' ) ) ) | 32 |
import inspect
import unittest
from transformers import RegNetConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import RegNetForImageClassification, RegNetModel
from transformers.models.regnet.modeling_regnet import REGNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase :
def __init__( self , _a , _a=3 , _a=32 , _a=3 , _a=10 , _a=[10, 20, 30, 40] , _a=[1, 1, 2, 1] , _a=True , _a=True , _a="relu" , _a=3 , _a=None , ) -> Union[str, Any]:
_A : List[str] = parent
_A : Optional[int] = batch_size
_A : int = image_size
_A : Optional[Any] = num_channels
_A : Any = embeddings_size
_A : Dict = hidden_sizes
_A : Any = depths
_A : List[Any] = is_training
_A : Optional[Any] = use_labels
_A : Tuple = hidden_act
_A : Dict = num_labels
_A : Union[str, Any] = scope
_A : Optional[Any] = len(_a )
def a__ ( self ) -> Dict:
_A : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_A : List[str] = None
if self.use_labels:
_A : Tuple = ids_tensor([self.batch_size] , self.num_labels )
_A : Any = self.get_config()
return config, pixel_values, labels
def a__ ( self ) -> List[str]:
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def a__ ( self , _a , _a , _a ) -> Optional[int]:
_A : Any = RegNetModel(config=_a )
model.to(_a )
model.eval()
_A : Union[str, Any] = model(_a )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def a__ ( self , _a , _a , _a ) -> Optional[int]:
_A : str = self.num_labels
_A : Any = RegNetForImageClassification(_a )
model.to(_a )
model.eval()
_A : str = model(_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a__ ( self ) -> str:
_A : Union[str, Any] = self.prepare_config_and_inputs()
_A , _A , _A : Tuple = config_and_inputs
_A : int = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowercase ( UpperCamelCase__,UpperCamelCase__,unittest.TestCase ):
_a = (RegNetModel, RegNetForImageClassification) if is_torch_available() else ()
_a = (
{"feature-extraction": RegNetModel, "image-classification": RegNetForImageClassification}
if is_torch_available()
else {}
)
_a = False
_a = False
_a = False
_a = False
def a__ ( self ) -> Union[str, Any]:
_A : Optional[int] = RegNetModelTester(self )
_A : Tuple = ConfigTester(self , config_class=_a , has_text_modality=_a )
def a__ ( self ) -> Tuple:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a__ ( self ) -> Optional[Any]:
return
@unittest.skip(reason="""RegNet does not use inputs_embeds""" )
def a__ ( self ) -> Optional[int]:
pass
@unittest.skip(reason="""RegNet does not support input and output embeddings""" )
def a__ ( self ) -> Union[str, Any]:
pass
def a__ ( self ) -> Optional[Any]:
_A , _A : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A : Dict = model_class(_a )
_A : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A : Dict = [*signature.parameters.keys()]
_A : Any = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _a )
def a__ ( self ) -> str:
_A : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def a__ ( self ) -> Optional[Any]:
_A , _A : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A : Union[str, Any] = model_class(config=_a )
for name, module in model.named_modules():
if isinstance(_a , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
def a__ ( self ) -> Optional[int]:
def check_hidden_states_output(_a , _a , _a ):
_A : str = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
_A : List[str] = model(**self._prepare_for_class(_a , _a ) )
_A : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_A : Union[str, Any] = self.model_tester.num_stages
self.assertEqual(len(_a ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
_A , _A : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_A : Optional[Any] = ["""basic""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
_A : Union[str, Any] = layer_type
_A : Tuple = True
check_hidden_states_output(_a , _a , _a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_A : Optional[int] = True
check_hidden_states_output(_a , _a , _a )
def a__ ( self ) -> Optional[int]:
_A : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@slow
def a__ ( self ) -> Tuple:
for model_name in REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A : Optional[Any] = RegNetModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def lowerCAmelCase_ ( ):
_A : Dict = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowercase ( unittest.TestCase ):
@cached_property
def a__ ( self ) -> List[Any]:
return (
AutoImageProcessor.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def a__ ( self ) -> str:
_A : Optional[int] = RegNetForImageClassification.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_a )
_A : Any = self.default_image_processor
_A : Optional[int] = prepare_img()
_A : Tuple = image_processor(images=_a , return_tensors="""pt""" ).to(_a )
# forward pass
with torch.no_grad():
_A : Union[str, Any] = model(**_a )
# verify the logits
_A : Tuple = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _a )
_A : int = torch.tensor([-0.4180, -1.5051, -3.4836] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1e-4 ) )
| 307 | 0 |
'''simple docstring'''
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def a(lowercase__ , lowercase__ ):
'''simple docstring'''
snake_case_ = args.log_outputs
snake_case_ = '_'.join(args.dataset.split('/' ) + [args.config, args.split] )
# load metric
snake_case_ = load_metric('wer' )
snake_case_ = load_metric('cer' )
# compute metrics
snake_case_ = wer.compute(references=result['target'] , predictions=result['prediction'] )
snake_case_ = cer.compute(references=result['target'] , predictions=result['prediction'] )
# print & log results
snake_case_ = f"""WER: {wer_result}\nCER: {cer_result}"""
print(_A )
with open(f"""{dataset_id}_eval_results.txt""" , 'w' ) as f:
f.write(_A )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
snake_case_ = f"""log_{dataset_id}_predictions.txt"""
snake_case_ = f"""log_{dataset_id}_targets.txt"""
with open(_A , 'w' ) as p, open(_A , 'w' ) as t:
# mapping function to write output
def write_to_file(lowercase__ , lowercase__ ):
p.write(f"""{i}""" + '\n' )
p.write(batch['prediction'] + '\n' )
t.write(f"""{i}""" + '\n' )
t.write(batch['target'] + '\n' )
result.map(_A , with_indices=_A )
def a(lowercase__ ):
'''simple docstring'''
snake_case_ = '[,?.!\-\;\:\"“%‘”�—’…–]' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
snake_case_ = re.sub(_A , '' , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
snake_case_ = ['\n\n', '\n', ' ', ' ']
for t in token_sequences_to_ignore:
snake_case_ = ' '.join(text.split(_A ) )
return text
def a(lowercase__ ):
'''simple docstring'''
snake_case_ = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=_A )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
snake_case_ = AutoFeatureExtractor.from_pretrained(args.model_id )
snake_case_ = feature_extractor.sampling_rate
# resample audio
snake_case_ = dataset.cast_column('audio' , Audio(sampling_rate=_A ) )
# load eval pipeline
if args.device is None:
snake_case_ = 0 if torch.cuda.is_available() else -1
snake_case_ = pipeline('automatic-speech-recognition' , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(lowercase__ ):
snake_case_ = asr(
batch['audio']['array'] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
snake_case_ = prediction['text']
snake_case_ = normalize_text(batch['sentence'] )
return batch
# run inference on all examples
snake_case_ = dataset.map(_A , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(_A , _A )
if __name__ == "__main__":
A = argparse.ArgumentParser()
parser.add_argument(
'--model_id', type=str, required=True, help='Model identifier. Should be loadable with 🤗 Transformers'
)
parser.add_argument(
'--dataset',
type=str,
required=True,
help='Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets',
)
parser.add_argument(
'--config', type=str, required=True, help='Config of the dataset. *E.g.* `\'en\'` for Common Voice'
)
parser.add_argument('--split', type=str, required=True, help='Split of the dataset. *E.g.* `\'test\'`')
parser.add_argument(
'--chunk_length_s', type=float, default=None, help='Chunk length in seconds. Defaults to 5 seconds.'
)
parser.add_argument(
'--stride_length_s', type=float, default=None, help='Stride of the audio chunks. Defaults to 1 second.'
)
parser.add_argument(
'--log_outputs', action='store_true', help='If defined, write outputs to log file for analysis.'
)
parser.add_argument(
'--device',
type=int,
default=None,
help='The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.',
)
A = parser.parse_args()
main(args)
| 708 |
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
A = '3'
print('Python version:', sys.version)
print('OS platform:', platform.platform())
print('OS architecture:', platform.machine())
try:
import torch
print('Torch version:', torch.__version__)
print('Cuda available:', torch.cuda.is_available())
print('Cuda version:', torch.version.cuda)
print('CuDNN version:', torch.backends.cudnn.version())
print('Number of GPUs available:', torch.cuda.device_count())
except ImportError:
print('Torch version:', None)
try:
import transformers
print('transformers version:', transformers.__version__)
except ImportError:
print('transformers version:', None)
| 46 | 0 |
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
_lowerCamelCase = "pt"
elif is_tf_available():
_lowerCamelCase = "tf"
else:
_lowerCamelCase = "jax"
class UpperCamelCase_ ( __lowercase , unittest.TestCase ):
lowerCamelCase_ = ByTaTokenizer
lowerCamelCase_ = False
def _snake_case ( self :int ) -> int:
"""simple docstring"""
super().setUp()
SCREAMING_SNAKE_CASE__ = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _snake_case ( self :Any ) -> Optional[int]:
"""simple docstring"""
return ByTaTokenizer.from_pretrained("""google/byt5-small""" )
def _snake_case ( self :Union[str, Any] , **__A :Tuple ) -> ByTaTokenizer:
"""simple docstring"""
return self.tokenizer_class.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def _snake_case ( self :Dict , __A :Optional[Any] , __A :str=False , __A :Dict=20 , __A :List[str]=5 ) -> Tuple[str, list]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = []
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
try:
SCREAMING_SNAKE_CASE__ = tokenizer.decode([i] , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
SCREAMING_SNAKE_CASE__ = list(filter(lambda __A : re.match(r"""^[ a-zA-Z]+$""" , t[1] ) , SCREAMING_SNAKE_CASE_ ) )
SCREAMING_SNAKE_CASE__ = list(filter(lambda __A : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) )
if max_length is not None and len(SCREAMING_SNAKE_CASE_ ) > max_length:
SCREAMING_SNAKE_CASE__ = toks[:max_length]
if min_length is not None and len(SCREAMING_SNAKE_CASE_ ) < min_length and len(SCREAMING_SNAKE_CASE_ ) > 0:
while len(SCREAMING_SNAKE_CASE_ ) < min_length:
SCREAMING_SNAKE_CASE__ = toks + toks
# toks_str = [t[1] for t in toks]
SCREAMING_SNAKE_CASE__ = [t[0] for t in toks]
# Ensure consistency
SCREAMING_SNAKE_CASE__ = tokenizer.decode(SCREAMING_SNAKE_CASE_ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ )
if " " not in output_txt and len(SCREAMING_SNAKE_CASE_ ) > 1:
SCREAMING_SNAKE_CASE__ = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ )
+ """ """
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ )
)
if with_prefix_space:
SCREAMING_SNAKE_CASE__ = """ """ + output_txt
SCREAMING_SNAKE_CASE__ = tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
return output_txt, output_ids
def _snake_case ( self :Optional[int] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE__ = tokenizer(["""hi</s>""", """I went to the gym</s>""", """</s>"""] )
SCREAMING_SNAKE_CASE__ = tokenizer(["""hi""", """I went to the gym""", """"""] )
self.assertListEqual(batch_with_eos_added["""input_ids"""] , batch_without_eos_added["""input_ids"""] )
def _snake_case ( self :Any ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE__ = """Unicode €."""
SCREAMING_SNAKE_CASE__ = tokenizer(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE__ = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded["""input_ids"""] , SCREAMING_SNAKE_CASE_ )
# decoding
SCREAMING_SNAKE_CASE__ = tokenizer.decode(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , """Unicode €.</s>""" )
SCREAMING_SNAKE_CASE__ = tokenizer("""e è é ê ë""" )
SCREAMING_SNAKE_CASE__ = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded["""input_ids"""] , SCREAMING_SNAKE_CASE_ )
# decoding
SCREAMING_SNAKE_CASE__ = tokenizer.decode(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , """e è é ê ë</s>""" )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode("""e è é ê ë""" ) ) , """e è é ê ë</s>""" )
def _snake_case ( self :int ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE__ = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
# fmt: off
SCREAMING_SNAKE_CASE__ = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
# fmt: on
SCREAMING_SNAKE_CASE__ = tokenizer(SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if FRAMEWORK != "jax":
SCREAMING_SNAKE_CASE__ = list(batch.input_ids.numpy()[0] )
else:
SCREAMING_SNAKE_CASE__ = list(batch.input_ids.tolist()[0] )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual((2, 37) , batch.input_ids.shape )
self.assertEqual((2, 37) , batch.attention_mask.shape )
def _snake_case ( self :Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE__ = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
SCREAMING_SNAKE_CASE__ = tokenizer(SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ )
# check if input_ids are returned and no decoder_input_ids
self.assertIn("""input_ids""" , SCREAMING_SNAKE_CASE_ )
self.assertIn("""attention_mask""" , SCREAMING_SNAKE_CASE_ )
self.assertNotIn("""decoder_input_ids""" , SCREAMING_SNAKE_CASE_ )
self.assertNotIn("""decoder_attention_mask""" , SCREAMING_SNAKE_CASE_ )
def _snake_case ( self :int ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE__ = [
"""Summary of the text.""",
"""Another summary.""",
]
SCREAMING_SNAKE_CASE__ = tokenizer(
text_target=SCREAMING_SNAKE_CASE_ , max_length=32 , padding="""max_length""" , truncation=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ )
self.assertEqual(32 , targets["""input_ids"""].shape[1] )
def _snake_case ( self :Dict ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE__ = ["""A long paragraph for summarization. </s>"""]
SCREAMING_SNAKE_CASE__ = ["""Summary of the text. </s>"""]
# fmt: off
SCREAMING_SNAKE_CASE__ = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
SCREAMING_SNAKE_CASE__ = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
# fmt: on
SCREAMING_SNAKE_CASE__ = tokenizer(SCREAMING_SNAKE_CASE_ , text_target=SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , batch["""input_ids"""][0] )
self.assertEqual(SCREAMING_SNAKE_CASE_ , batch["""labels"""][0] )
def _snake_case ( self :Tuple ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
SCREAMING_SNAKE_CASE__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
SCREAMING_SNAKE_CASE__ = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE__ = """ He is very happy, UNwant\u00E9d,running"""
SCREAMING_SNAKE_CASE__ = tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE__ = tokenizer.__class__.from_pretrained(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE__ = after_tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
shutil.rmtree(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE__ = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
SCREAMING_SNAKE_CASE__ = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE__ = """ He is very happy, UNwant\u00E9d,running"""
tokenizer.add_tokens(["""bim""", """bambam"""] )
SCREAMING_SNAKE_CASE__ = tokenizer.additional_special_tokens
additional_special_tokens.append("""new_additional_special_token""" )
tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} )
SCREAMING_SNAKE_CASE__ = tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE__ = tokenizer.__class__.from_pretrained(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE__ = after_tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertIn("""new_additional_special_token""" , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
SCREAMING_SNAKE_CASE__ = tokenizer.__class__.from_pretrained(SCREAMING_SNAKE_CASE_ , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(SCREAMING_SNAKE_CASE_ )
def _snake_case ( self :int ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(SCREAMING_SNAKE_CASE_ )
with open(os.path.join(SCREAMING_SNAKE_CASE_ , """special_tokens_map.json""" ) , encoding="""utf-8""" ) as json_file:
SCREAMING_SNAKE_CASE__ = json.load(SCREAMING_SNAKE_CASE_ )
with open(os.path.join(SCREAMING_SNAKE_CASE_ , """tokenizer_config.json""" ) , encoding="""utf-8""" ) as json_file:
SCREAMING_SNAKE_CASE__ = json.load(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE__ = [f'''<extra_id_{i}>''' for i in range(125 )]
SCREAMING_SNAKE_CASE__ = added_tokens_extra_ids + [
"""an_additional_special_token"""
]
SCREAMING_SNAKE_CASE__ = added_tokens_extra_ids + [
"""an_additional_special_token"""
]
with open(os.path.join(SCREAMING_SNAKE_CASE_ , """special_tokens_map.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
with open(os.path.join(SCREAMING_SNAKE_CASE_ , """tokenizer_config.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
SCREAMING_SNAKE_CASE__ = tokenizer_class.from_pretrained(
SCREAMING_SNAKE_CASE_ , )
self.assertIn(
"""an_additional_special_token""" , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
["""an_additional_special_token"""] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(["""an_additional_special_token"""] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
SCREAMING_SNAKE_CASE__ = added_tokens_extra_ids + [AddedToken("""a_new_additional_special_token""" , lstrip=SCREAMING_SNAKE_CASE_ )]
SCREAMING_SNAKE_CASE__ = tokenizer_class.from_pretrained(
SCREAMING_SNAKE_CASE_ , additional_special_tokens=SCREAMING_SNAKE_CASE_ , )
self.assertIn("""a_new_additional_special_token""" , tokenizer.additional_special_tokens )
self.assertEqual(
["""a_new_additional_special_token"""] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(["""a_new_additional_special_token"""] ) ) , )
def _snake_case ( self :Tuple ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE__ = tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertTrue(tokenizer.decode([255] ) == """""" )
def _snake_case ( self :Optional[int] ) -> Optional[int]:
"""simple docstring"""
pass
def _snake_case ( self :Optional[Any] ) -> Any:
"""simple docstring"""
pass
def _snake_case ( self :Any ) -> Union[str, Any]:
"""simple docstring"""
pass
def _snake_case ( self :Dict ) -> Union[str, Any]:
"""simple docstring"""
pass
def _snake_case ( self :List[str] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.get_tokenizers(fast=SCREAMING_SNAKE_CASE_ , do_lower_case=SCREAMING_SNAKE_CASE_ )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
SCREAMING_SNAKE_CASE__ = ["""t""", """h""", """i""", """s""", """ """, """i""", """s""", """ """, """a""", """ """, """t""", """e""", """x""", """t""", """</s>"""]
SCREAMING_SNAKE_CASE__ = tokenizer.convert_tokens_to_string(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _snake_case ( self :List[str] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
SCREAMING_SNAKE_CASE__ = [
"""bos_token""",
"""eos_token""",
"""unk_token""",
"""sep_token""",
"""pad_token""",
"""cls_token""",
"""mask_token""",
]
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = tokenizer.convert_ids_to_tokens(
SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
for attr in attributes_list:
setattr(SCREAMING_SNAKE_CASE_ , attr + """_id""" , SCREAMING_SNAKE_CASE_ )
self.assertEqual(getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(getattr(SCREAMING_SNAKE_CASE_ , attr + """_id""" ) , SCREAMING_SNAKE_CASE_ )
setattr(SCREAMING_SNAKE_CASE_ , attr + """_id""" , SCREAMING_SNAKE_CASE_ )
self.assertEqual(getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(getattr(SCREAMING_SNAKE_CASE_ , attr + """_id""" ) , SCREAMING_SNAKE_CASE_ )
setattr(SCREAMING_SNAKE_CASE_ , """additional_special_tokens_ids""" , [] )
self.assertListEqual(getattr(SCREAMING_SNAKE_CASE_ , """additional_special_tokens""" ) , [] )
self.assertListEqual(getattr(SCREAMING_SNAKE_CASE_ , """additional_special_tokens_ids""" ) , [] )
setattr(SCREAMING_SNAKE_CASE_ , """additional_special_tokens_ids""" , [token_id_to_test_setters] )
self.assertListEqual(getattr(SCREAMING_SNAKE_CASE_ , """additional_special_tokens""" ) , [token_to_test_setters] )
self.assertListEqual(getattr(SCREAMING_SNAKE_CASE_ , """additional_special_tokens_ids""" ) , [token_id_to_test_setters] ) | 6 |
'''simple docstring'''
from typing import Any
class _lowercase :
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Any ) -> Any:
__snake_case = data
__snake_case = None
class _lowercase :
def __init__( self : List[Any] ) -> Tuple:
__snake_case = None
def a ( self : int ) -> Union[str, Any]:
__snake_case = self.head
while temp is not None:
print(temp.data , end=' ' )
__snake_case = temp.next
print()
def a ( self : Dict , SCREAMING_SNAKE_CASE_ : Any ) -> List[str]:
__snake_case = Node(SCREAMING_SNAKE_CASE_ )
__snake_case = self.head
__snake_case = new_node
def a ( self : str , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Any ) -> List[str]:
if node_data_a == node_data_a:
return
else:
__snake_case = self.head
while node_a is not None and node_a.data != node_data_a:
__snake_case = node_a.next
__snake_case = self.head
while node_a is not None and node_a.data != node_data_a:
__snake_case = node_a.next
if node_a is None or node_a is None:
return
__snake_case , __snake_case = node_a.data, node_a.data
if __name__ == "__main__":
_a : Dict = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print("After swapping")
ll.print_list()
| 56 | 0 |
from collections import defaultdict
from math import ceil, sqrt
def a_ (_lowerCAmelCase : int = 1000000 , _lowerCAmelCase : int = 10 )-> int:
snake_case: defaultdict = defaultdict(_lowerCAmelCase )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
snake_case: Dict = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
snake_case: List[Any] = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(_lowerCAmelCase , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 164 | # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class lowerCamelCase ( __snake_case ):
__lowerCamelCase = 'facebook/bart-large-mnli'
__lowerCamelCase = (
'This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which '
'should be the text to classify, and `labels`, which should be the list of labels to use for classification. '
'It returns the most likely label in the list of provided `labels` for the input text.'
)
__lowerCamelCase = 'text_classifier'
__lowerCamelCase = AutoTokenizer
__lowerCamelCase = AutoModelForSequenceClassification
__lowerCamelCase = ['text', ['text']]
__lowerCamelCase = ['text']
def lowerCAmelCase_ ( self ) -> int:
'''simple docstring'''
super().setup()
snake_case: Dict = self.model.config
snake_case: Optional[int] = -1
for idx, label in config.idalabel.items():
if label.lower().startswith("""entail""" ):
snake_case: Any = int(__lowerCamelCase )
if self.entailment_id == -1:
raise ValueError("""Could not determine the entailment ID from the model config, please pass it at init.""" )
def lowerCAmelCase_ ( self , __lowerCamelCase , __lowerCamelCase ) -> str:
'''simple docstring'''
snake_case: Union[str, Any] = labels
return self.pre_processor(
[text] * len(__lowerCamelCase ) , [F"This example is {label}" for label in labels] , return_tensors="""pt""" , padding="""max_length""" , )
def lowerCAmelCase_ ( self , __lowerCamelCase ) -> str:
'''simple docstring'''
snake_case: List[str] = outputs.logits
snake_case: int = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 164 | 1 |
from __future__ import annotations
from cmath import sqrt
def lowerCamelCase__ ( _a , _a , _a):
if a == 0:
raise ValueError("Coefficient 'a' must not be zero.")
SCREAMING_SNAKE_CASE : Tuple = b * b - 4 * a * c
SCREAMING_SNAKE_CASE : Dict = (-b + sqrt(_a)) / (2 * a)
SCREAMING_SNAKE_CASE : List[Any] = (-b - sqrt(_a)) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def lowerCamelCase__ ( ):
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Optional[int] = quadratic_roots(a=5 , b=6 , c=1)
print(f"The solutions are: {solutiona} and {solutiona}")
if __name__ == "__main__":
main() | 25 |
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class _UpperCamelCase ( __A ):
'''simple docstring'''
def __init__( self : Dict , a : Tuple , a : Any=13 , a : Any=7 , a : Union[str, Any]=True , a : List[Any]=True , a : List[str]=False , a : List[str]=True , a : Any=99 , a : str=32 , a : Any=5 , a : Optional[int]=4 , a : Union[str, Any]=37 , a : Dict="gelu" , a : List[Any]=0.1 , a : Optional[Any]=0.1 , a : List[str]=512 , a : Union[str, Any]=16 , a : str=2 , a : Dict=0.02 , a : Optional[int]=3 , a : Union[str, Any]=4 , a : int=None , ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = parent
SCREAMING_SNAKE_CASE : Any = batch_size
SCREAMING_SNAKE_CASE : Optional[int] = seq_length
SCREAMING_SNAKE_CASE : List[Any] = is_training
SCREAMING_SNAKE_CASE : int = use_input_mask
SCREAMING_SNAKE_CASE : Tuple = use_token_type_ids
SCREAMING_SNAKE_CASE : str = use_labels
SCREAMING_SNAKE_CASE : Any = vocab_size
SCREAMING_SNAKE_CASE : List[Any] = hidden_size
SCREAMING_SNAKE_CASE : str = num_hidden_layers
SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads
SCREAMING_SNAKE_CASE : Tuple = intermediate_size
SCREAMING_SNAKE_CASE : Optional[int] = hidden_act
SCREAMING_SNAKE_CASE : Dict = hidden_dropout_prob
SCREAMING_SNAKE_CASE : str = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Tuple = max_position_embeddings
SCREAMING_SNAKE_CASE : List[str] = type_vocab_size
SCREAMING_SNAKE_CASE : List[str] = type_sequence_label_size
SCREAMING_SNAKE_CASE : Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE : Tuple = num_labels
SCREAMING_SNAKE_CASE : Tuple = num_choices
SCREAMING_SNAKE_CASE : Optional[Any] = scope
def __UpperCamelCase ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : Union[str, Any] = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE : str = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE : int = None
SCREAMING_SNAKE_CASE : List[Any] = None
SCREAMING_SNAKE_CASE : List[str] = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE : int = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCamelCase ( self : Dict ) -> str:
"""simple docstring"""
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def __UpperCamelCase ( self : Optional[Any] , a : int , a : Optional[int] , a : Optional[int] , a : Dict , a : str , a : str ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = DistilBertModel(config=a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE : Optional[Any] = model(a , a )
SCREAMING_SNAKE_CASE : Optional[Any] = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self : Tuple , a : Optional[int] , a : Dict , a : Tuple , a : int , a : int , a : Any ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = DistilBertForMaskedLM(config=a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE : str = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase ( self : List[Any] , a : int , a : Optional[Any] , a : Optional[Any] , a : str , a : str , a : Tuple ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = DistilBertForQuestionAnswering(config=a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE : List[str] = model(
a , attention_mask=a , start_positions=a , end_positions=a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCamelCase ( self : Optional[int] , a : str , a : Any , a : int , a : Optional[Any] , a : int , a : str ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = self.num_labels
SCREAMING_SNAKE_CASE : Union[str, Any] = DistilBertForSequenceClassification(a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE : Optional[int] = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCamelCase ( self : Optional[Any] , a : List[Any] , a : Optional[int] , a : Union[str, Any] , a : Dict , a : Any , a : Optional[Any] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = self.num_labels
SCREAMING_SNAKE_CASE : List[str] = DistilBertForTokenClassification(config=a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE : List[str] = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCamelCase ( self : int , a : Any , a : Optional[int] , a : Union[str, Any] , a : Tuple , a : Optional[int] , a : Tuple ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = self.num_choices
SCREAMING_SNAKE_CASE : Any = DistilBertForMultipleChoice(config=a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE : Optional[int] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE : Dict = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE : Optional[Any] = model(
a , attention_mask=a , labels=a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __UpperCamelCase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = self.prepare_config_and_inputs()
((SCREAMING_SNAKE_CASE) ,(SCREAMING_SNAKE_CASE) ,(SCREAMING_SNAKE_CASE) ,(SCREAMING_SNAKE_CASE) ,(SCREAMING_SNAKE_CASE) ,(SCREAMING_SNAKE_CASE)) : Tuple = config_and_inputs
SCREAMING_SNAKE_CASE : int = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _UpperCamelCase ( __A , __A , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =(
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
lowerCamelCase__ =(
{
'feature-extraction': DistilBertModel,
'fill-mask': DistilBertForMaskedLM,
'question-answering': DistilBertForQuestionAnswering,
'text-classification': DistilBertForSequenceClassification,
'token-classification': DistilBertForTokenClassification,
'zero-shot': DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ =True
lowerCamelCase__ =True
lowerCamelCase__ =True
lowerCamelCase__ =True
def __UpperCamelCase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = DistilBertModelTester(self )
SCREAMING_SNAKE_CASE : List[str] = ConfigTester(self , config_class=a , dim=37 )
def __UpperCamelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def __UpperCamelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*a )
def __UpperCamelCase ( self : Tuple ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*a )
def __UpperCamelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*a )
def __UpperCamelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*a )
def __UpperCamelCase ( self : str ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*a )
def __UpperCamelCase ( self : List[Any] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*a )
@slow
def __UpperCamelCase ( self : int ) -> Any:
"""simple docstring"""
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : Optional[Any] = DistilBertModel.from_pretrained(a )
self.assertIsNotNone(a )
@slow
@require_torch_gpu
def __UpperCamelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
SCREAMING_SNAKE_CASE : Union[str, Any] = True
SCREAMING_SNAKE_CASE : Any = model_class(config=a )
SCREAMING_SNAKE_CASE : List[Any] = self._prepare_for_class(a , a )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.jit.trace(
a , (inputs_dict["input_ids"].to("cpu" ), inputs_dict["attention_mask"].to("cpu" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(a , os.path.join(a , "traced_model.pt" ) )
SCREAMING_SNAKE_CASE : Tuple = torch.jit.load(os.path.join(a , "traced_model.pt" ) , map_location=a )
loaded(inputs_dict["input_ids"].to(a ) , inputs_dict["attention_mask"].to(a ) )
@require_torch
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCamelCase ( self : int ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = DistilBertModel.from_pretrained("distilbert-base-uncased" )
SCREAMING_SNAKE_CASE : List[str] = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[Any] = model(a , attention_mask=a )[0]
SCREAMING_SNAKE_CASE : List[str] = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , a )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor(
[[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , a , atol=1e-4 ) ) | 25 | 1 |
A_ :Optional[int] = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
A_ :str = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
A_ :List[Any] = {
0: '''Sunday''',
1: '''Monday''',
2: '''Tuesday''',
3: '''Wednesday''',
4: '''Thursday''',
5: '''Friday''',
6: '''Saturday''',
}
def A ( a_ ,a_ ,a_ ) -> str:
assert len(str(a_ ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
__UpperCamelCase : List[str] =year // 100
__UpperCamelCase : Tuple =(5 * (century % 4) + 2) % 7
__UpperCamelCase : Optional[Any] =year % 100
__UpperCamelCase : str =centurian % 12
__UpperCamelCase : str =(
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
__UpperCamelCase : Union[str, Any] =(
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 400) == 0)
else DOOMSDAY_LEAP[month - 1]
)
__UpperCamelCase : int =(dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 154 |
from __future__ import annotations
A_ :Union[str, Any] = '''#'''
class __A :
"""simple docstring"""
def __init__( self ):
"""simple docstring"""
__UpperCamelCase : dict ={}
def __lowercase ( self , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : Optional[int] =self._trie
for char in text:
if char not in trie:
__UpperCamelCase : Optional[Any] ={}
__UpperCamelCase : List[Any] =trie[char]
__UpperCamelCase : List[str] =True
def __lowercase ( self , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] =self._trie
for char in prefix:
if char in trie:
__UpperCamelCase : Any =trie[char]
else:
return []
return self._elements(lowerCamelCase__ )
def __lowercase ( self , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : Optional[int] =[]
for c, v in d.items():
__UpperCamelCase : List[Any] =[' '] if c == END else [(c + s) for s in self._elements(lowerCamelCase__ )]
result.extend(lowerCamelCase__ )
return tuple(lowerCamelCase__ )
A_ :Tuple = Trie()
A_ :Tuple = ('''depart''', '''detergent''', '''daring''', '''dog''', '''deer''', '''deal''')
for word in words:
trie.insert_word(word)
def A ( a_ ) -> tuple:
__UpperCamelCase : str =trie.find_word(a_ )
return tuple(string + word for word in suffixes )
def A ( ) -> None:
print(autocomplete_using_trie('de' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 154 | 1 |
from queue import Queue
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from ..models.auto import AutoTokenizer
class _UpperCAmelCase :
"""simple docstring"""
def lowercase ( self : Dict , lowerCAmelCase_ : Tuple ) -> str:
raise NotImplementedError()
def lowercase ( self : Optional[int] ) -> Any:
raise NotImplementedError()
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
def __init__( self : List[str] , lowerCAmelCase_ : "AutoTokenizer" , lowerCAmelCase_ : bool = False , **lowerCAmelCase_ : Tuple ) -> List[str]:
__lowerCAmelCase = tokenizer
__lowerCAmelCase = skip_prompt
__lowerCAmelCase = decode_kwargs
# variables used in the streaming process
__lowerCAmelCase = []
__lowerCAmelCase = 0
__lowerCAmelCase = True
def lowercase ( self : List[str] , lowerCAmelCase_ : Any ) -> List[str]:
if len(value.shape ) > 1 and value.shape[0] > 1:
raise ValueError('TextStreamer only supports batch size 1' )
elif len(value.shape ) > 1:
__lowerCAmelCase = value[0]
if self.skip_prompt and self.next_tokens_are_prompt:
__lowerCAmelCase = False
return
# Add the new token to the cache and decodes the entire thing.
self.token_cache.extend(value.tolist() )
__lowerCAmelCase = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
# After the symbol for a new line, we flush the cache.
if text.endswith('\n' ):
__lowerCAmelCase = text[self.print_len :]
__lowerCAmelCase = []
__lowerCAmelCase = 0
# If the last token is a CJK character, we print the characters.
elif len(lowerCAmelCase_ ) > 0 and self._is_chinese_char(ord(text[-1] ) ):
__lowerCAmelCase = text[self.print_len :]
self.print_len += len(lowerCAmelCase_ )
# Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words,
# which may change with the subsequent token -- there are probably smarter ways to do this!)
else:
__lowerCAmelCase = text[self.print_len : text.rfind(' ' ) + 1]
self.print_len += len(lowerCAmelCase_ )
self.on_finalized_text(lowerCAmelCase_ )
def lowercase ( self : Optional[int] ) -> Optional[int]:
# Flush the cache, if it exists
if len(self.token_cache ) > 0:
__lowerCAmelCase = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
__lowerCAmelCase = text[self.print_len :]
__lowerCAmelCase = []
__lowerCAmelCase = 0
else:
__lowerCAmelCase = ''
__lowerCAmelCase = True
self.on_finalized_text(lowerCAmelCase_ , stream_end=lowerCAmelCase_ )
def lowercase ( self : Union[str, Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : bool = False ) -> List[str]:
print(lowerCAmelCase_ , flush=lowerCAmelCase_ , end='' if not stream_end else None )
def lowercase ( self : Optional[Any] , lowerCAmelCase_ : List[Any] ) -> List[str]:
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4e_00 and cp <= 0X9f_ff)
or (cp >= 0X34_00 and cp <= 0X4d_bf) #
or (cp >= 0X2_00_00 and cp <= 0X2_a6_df) #
or (cp >= 0X2_a7_00 and cp <= 0X2_b7_3f) #
or (cp >= 0X2_b7_40 and cp <= 0X2_b8_1f) #
or (cp >= 0X2_b8_20 and cp <= 0X2_ce_af) #
or (cp >= 0Xf9_00 and cp <= 0Xfa_ff)
or (cp >= 0X2_f8_00 and cp <= 0X2_fa_1f) #
): #
return True
return False
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
def __init__( self : Union[str, Any] , lowerCAmelCase_ : "AutoTokenizer" , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : Optional[float] = None , **lowerCAmelCase_ : Optional[Any] ) -> Optional[Any]:
super().__init__(lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ )
__lowerCAmelCase = Queue()
__lowerCAmelCase = None
__lowerCAmelCase = timeout
def lowercase ( self : int , lowerCAmelCase_ : str , lowerCAmelCase_ : bool = False ) -> Tuple:
self.text_queue.put(lowerCAmelCase_ , timeout=self.timeout )
if stream_end:
self.text_queue.put(self.stop_signal , timeout=self.timeout )
def __iter__( self : Tuple ) -> str:
return self
def lowercase ( self : Any ) -> Optional[int]:
__lowerCAmelCase = self.text_queue.get(timeout=self.timeout )
if value == self.stop_signal:
raise StopIteration()
else:
return value
| 53 |
'''simple docstring'''
import argparse
from collections import defaultdict
import yaml
lowerCAmelCase : Dict = """docs/source/en/_toctree.yml"""
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : Optional[int] = defaultdict(_A )
_lowerCAmelCase : Tuple = []
_lowerCAmelCase : Dict = []
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({'local': doc['local'], 'title': doc['title']} )
else:
new_doc_list.append(_A )
_lowerCAmelCase : str = new_doc_list
_lowerCAmelCase : List[Any] = [key for key, value in counts.items() if value > 1]
_lowerCAmelCase : List[str] = []
for duplicate_key in duplicates:
_lowerCAmelCase : Any = list({doc['title'] for doc in doc_list if doc['local'] == duplicate_key} )
if len(_A ) > 1:
raise ValueError(
f'{duplicate_key} is present several times in the documentation table of content at '
'`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '
'others.' )
# Only add this once
new_doc.append({'local': duplicate_key, 'title': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if 'local' not in counts or counts[doc['local']] == 1] )
_lowerCAmelCase : Any = sorted(_A , key=lambda _A : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(_A ) > 1:
raise ValueError('{doc_list} has two \'overview\' docs which is not allowed.' )
overview_doc.extend(_A )
# Sort
return overview_doc
def lowercase (_A=False ):
"""simple docstring"""
with open(_A , encoding='utf-8' ) as f:
_lowerCAmelCase : Union[str, Any] = yaml.safe_load(f.read() )
# Get to the API doc
_lowerCAmelCase : Tuple = 0
while content[api_idx]["title"] != "API":
api_idx += 1
_lowerCAmelCase : str = content[api_idx]['sections']
# Then to the model doc
_lowerCAmelCase : Dict = 0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
_lowerCAmelCase : Optional[int] = api_doc[scheduler_idx]['sections']
_lowerCAmelCase : Any = clean_doc_toc(_A )
_lowerCAmelCase : int = False
if new_scheduler_doc != scheduler_doc:
_lowerCAmelCase : str = True
if overwrite:
_lowerCAmelCase : int = new_scheduler_doc
if diff:
if overwrite:
_lowerCAmelCase : Dict = api_doc
with open(_A , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(_A , allow_unicode=_A ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
def lowercase (_A=False ):
"""simple docstring"""
with open(_A , encoding='utf-8' ) as f:
_lowerCAmelCase : Optional[Any] = yaml.safe_load(f.read() )
# Get to the API doc
_lowerCAmelCase : Optional[Any] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
_lowerCAmelCase : Union[str, Any] = content[api_idx]['sections']
# Then to the model doc
_lowerCAmelCase : Optional[int] = 0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
_lowerCAmelCase : List[Any] = False
_lowerCAmelCase : List[Any] = api_doc[pipeline_idx]['sections']
_lowerCAmelCase : Union[str, Any] = []
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
_lowerCAmelCase : Tuple = pipeline_doc['section']
_lowerCAmelCase : Optional[int] = clean_doc_toc(_A )
if overwrite:
_lowerCAmelCase : str = new_sub_pipeline_doc
new_pipeline_docs.append(_A )
# sort overall pipeline doc
_lowerCAmelCase : str = clean_doc_toc(_A )
if new_pipeline_docs != pipeline_docs:
_lowerCAmelCase : Any = True
if overwrite:
_lowerCAmelCase : Optional[int] = new_pipeline_docs
if diff:
if overwrite:
_lowerCAmelCase : str = api_doc
with open(_A , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(_A , allow_unicode=_A ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
if __name__ == "__main__":
lowerCAmelCase : Tuple = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
lowerCAmelCase : List[Any] = parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 444 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
__lowercase : int = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Any = ['''BartphoTokenizer''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
__lowercase : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 357 |
'''simple docstring'''
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class __lowercase :
def __init__(self , A , ):
lowerCamelCase_ : int = parent
lowerCamelCase_ : Dict = 1_3
lowerCamelCase_ : Any = 7
lowerCamelCase_ : Dict = 3_0
lowerCamelCase_ : Optional[Any] = self.seq_length + self.mem_len
lowerCamelCase_ : Tuple = 1_5
lowerCamelCase_ : Tuple = True
lowerCamelCase_ : List[Any] = True
lowerCamelCase_ : Dict = 9_9
lowerCamelCase_ : Any = [1_0, 5_0, 8_0]
lowerCamelCase_ : List[str] = 3_2
lowerCamelCase_ : Tuple = 3_2
lowerCamelCase_ : Optional[Any] = 4
lowerCamelCase_ : Union[str, Any] = 8
lowerCamelCase_ : Dict = 1_2_8
lowerCamelCase_ : Dict = 2
lowerCamelCase_ : Tuple = 2
lowerCamelCase_ : Any = None
lowerCamelCase_ : Optional[Any] = 1
lowerCamelCase_ : Union[str, Any] = 0
lowerCamelCase_ : str = 3
lowerCamelCase_ : Dict = self.vocab_size - 1
lowerCamelCase_ : Dict = 0.01
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ : Union[str, Any] = None
if self.use_labels:
lowerCamelCase_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ : str = TransfoXLConfig(
vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , )
return (config, input_ids_a, input_ids_a, lm_labels)
def UpperCAmelCase__ (self ):
random.seed(self.seed )
tf.random.set_seed(self.seed )
def UpperCAmelCase__ (self , A , A , A , A ):
lowerCamelCase_ : int = TFTransfoXLModel(A )
lowerCamelCase_, lowerCamelCase_ : Tuple = model(A ).to_tuple()
lowerCamelCase_ : Union[str, Any] = {'''input_ids''': input_ids_a, '''mems''': mems_a}
lowerCamelCase_, lowerCamelCase_ : Any = model(A ).to_tuple()
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def UpperCAmelCase__ (self , A , A , A , A ):
lowerCamelCase_ : List[Any] = TFTransfoXLLMHeadModel(A )
lowerCamelCase_, lowerCamelCase_ : List[Any] = model(A ).to_tuple()
lowerCamelCase_ : Optional[int] = {'''input_ids''': input_ids_a, '''labels''': lm_labels}
lowerCamelCase_, lowerCamelCase_ : Union[str, Any] = model(A ).to_tuple()
lowerCamelCase_, lowerCamelCase_ : Tuple = model([input_ids_a, mems_a] ).to_tuple()
lowerCamelCase_ : List[str] = {'''input_ids''': input_ids_a, '''mems''': mems_a, '''labels''': lm_labels}
lowerCamelCase_, lowerCamelCase_ : str = model(A ).to_tuple()
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def UpperCAmelCase__ (self , A , A , A , A ):
lowerCamelCase_ : Dict = TFTransfoXLForSequenceClassification(A )
lowerCamelCase_ : Optional[int] = model(A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Tuple = self.prepare_config_and_inputs()
((lowerCamelCase_), (lowerCamelCase_), (lowerCamelCase_), (lowerCamelCase_)) : List[Any] = config_and_inputs
lowerCamelCase_ : Tuple = {'''input_ids''': input_ids_a}
return config, inputs_dict
@require_tf
class __lowercase ( _lowercase , _lowercase , unittest.TestCase ):
lowerCamelCase : Any = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
lowerCamelCase : Tuple = () if is_tf_available() else ()
lowerCamelCase : Any = (
{
"feature-extraction": TFTransfoXLModel,
"text-classification": TFTransfoXLForSequenceClassification,
"text-generation": TFTransfoXLLMHeadModel,
"zero-shot": TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
lowerCamelCase : str = False
lowerCamelCase : Optional[int] = False
lowerCamelCase : Dict = False
lowerCamelCase : Optional[int] = False
def UpperCAmelCase__ (self , A , A , A , A , A ):
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Optional[Any] = TFTransfoXLModelTester(self )
lowerCamelCase_ : Optional[Any] = ConfigTester(self , config_class=A , d_embed=3_7 )
def UpperCAmelCase__ (self ):
self.config_tester.run_common_tests()
def UpperCAmelCase__ (self ):
self.model_tester.set_seed()
lowerCamelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*A )
def UpperCAmelCase__ (self ):
self.model_tester.set_seed()
lowerCamelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*A )
def UpperCAmelCase__ (self ):
lowerCamelCase_, lowerCamelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ : List[Any] = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
lowerCamelCase_ : Optional[Any] = model_class(A )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class in list_other_models_with_output_ebd:
lowerCamelCase_ : str = model.get_output_embeddings()
assert isinstance(A , tf.keras.layers.Layer )
lowerCamelCase_ : int = model.get_bias()
assert name is None
else:
lowerCamelCase_ : Tuple = model.get_output_embeddings()
assert x is None
lowerCamelCase_ : int = model.get_bias()
assert name is None
def UpperCAmelCase__ (self ):
# TODO JP: Make TransfoXL XLA compliant
pass
@slow
def UpperCAmelCase__ (self ):
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ : Optional[Any] = TFTransfoXLModel.from_pretrained(A )
self.assertIsNotNone(A )
@unittest.skip(reason='''This model doesn\'t play well with fit() due to not returning a single loss.''' )
def UpperCAmelCase__ (self ):
pass
@require_tf
class __lowercase ( unittest.TestCase ):
@unittest.skip('''Skip test until #12651 is resolved.''' )
@slow
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Union[str, Any] = TFTransfoXLLMHeadModel.from_pretrained('''transfo-xl-wt103''' )
# fmt: off
lowerCamelCase_ : Optional[Any] = tf.convert_to_tensor([[3_3,1_2_9_7,2,1,1_0_0_9,4,1_1_0_9,1_1_7_3_9,4_7_6_2,3_5_8,5,2_5,2_4_5,2_2,1_7_0_6,1_7,2_0_0_9_8,5,3_2_1_5,2_1,3_7,1_1_1_0,3,1_3,1_0_4_1,4,2_4,6_0_3,4_9_0,2,7_1_4_7_7,2_0_0_9_8,1_0_4_4_4_7,2,2_0_9_6_1,1,2_6_0_4,4,1,3_2_9,3,6_2_2_4,8_3_1,1_6_0_0_2,2,8,6_0_3,7_8_9_6_7,2_9_5_4_6,2_3,8_0_3,2_0,2_5,4_1_6,5,8,2_3_2,4,2_7_7,6,1_8_5_5,4_6_0_1,3,2_9_5_4_6,5_4,8,3_6_0_9,5,5_7_2_1_1,4_9,4,1,2_7_7,1_8,8,1_7_5_5,1_5_6_9_1,3,3_4_1,2_5,4_1_6,6_9_3,4_2_5_7_3,7_1,1_7,4_0_1,9_4,3_1,1_7_9_1_9,2,2_9_5_4_6,7_8_7_3,1_8,1,4_3_5,2_3,1_1_0_1_1,7_5_5,5,5_1_6_7,3,7_9_8_3,9_8,8_4,2,2_9_5_4_6,3_2_6_7,8,3_6_0_9,4,1,4_8_6_5,1_0_7_5,2,6_0_8_7,7_1,6,3_4_6,8,5_8_5_4,3,2_9_5_4_6,8_2_4,1_4_0_0,1_8_6_8,2,1_9,1_6_0,2,3_1_1,8,5_4_9_6,2,2_0_9_2_0,1_7,2_5,1_5_0_9_7,3,2_4,2_4,0]] , dtype=tf.intaa ) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
lowerCamelCase_ : Union[str, Any] = [3_3,1_2_9_7,2,1,1_0_0_9,4,1_1_0_9,1_1_7_3_9,4_7_6_2,3_5_8,5,2_5,2_4_5,2_2,1_7_0_6,1_7,2_0_0_9_8,5,3_2_1_5,2_1,3_7,1_1_1_0,3,1_3,1_0_4_1,4,2_4,6_0_3,4_9_0,2,7_1_4_7_7,2_0_0_9_8,1_0_4_4_4_7,2,2_0_9_6_1,1,2_6_0_4,4,1,3_2_9,3,6_2_2_4,8_3_1,1_6_0_0_2,2,8,6_0_3,7_8_9_6_7,2_9_5_4_6,2_3,8_0_3,2_0,2_5,4_1_6,5,8,2_3_2,4,2_7_7,6,1_8_5_5,4_6_0_1,3,2_9_5_4_6,5_4,8,3_6_0_9,5,5_7_2_1_1,4_9,4,1,2_7_7,1_8,8,1_7_5_5,1_5_6_9_1,3,3_4_1,2_5,4_1_6,6_9_3,4_2_5_7_3,7_1,1_7,4_0_1,9_4,3_1,1_7_9_1_9,2,2_9_5_4_6,7_8_7_3,1_8,1,4_3_5,2_3,1_1_0_1_1,7_5_5,5,5_1_6_7,3,7_9_8_3,9_8,8_4,2,2_9_5_4_6,3_2_6_7,8,3_6_0_9,4,1,4_8_6_5,1_0_7_5,2,6_0_8_7,7_1,6,3_4_6,8,5_8_5_4,3,2_9_5_4_6,8_2_4,1_4_0_0,1_8_6_8,2,1_9,1_6_0,2,3_1_1,8,5_4_9_6,2,2_0_9_2_0,1_7,2_5,1_5_0_9_7,3,2_4,2_4,0,3_3,1,1_8_5_7,2,1,1_0_0_9,4,1_1_0_9,1_1_7_3_9,4_7_6_2,3_5_8,5,2_5,2_4_5,2_8,1_1_1_0,3,1_3,1_0_4_1,4,2_4,6_0_3,4_9_0,2,7_1_4_7_7,2_0_0_9_8,1_0_4_4_4_7,2,2_0_9_6_1,1,2_6_0_4,4,1,3_2_9,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
lowerCamelCase_ : Optional[Any] = model.generate(A , max_length=2_0_0 , do_sample=A )
self.assertListEqual(output_ids[0].numpy().tolist() , A )
| 357 | 1 |
'''simple docstring'''
def lowerCamelCase__ ( a , a ):
return int((input_a, input_a).count(0 ) == 0 )
def lowerCamelCase__ ( ):
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 356 |
'''simple docstring'''
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
_lowercase = pytest.mark.integration
@require_faiss
class a_ ( UpperCAmelCase__ ):
def lowercase__ ( self : List[Any] ):
__snake_case = Dataset.from_dict({'filename': ['my_name-train' + '_' + str(__lowerCAmelCase ) for x in np.arange(3_0 ).tolist()]} )
return dset
def lowercase__ ( self : List[str] ):
import faiss
__snake_case = self._create_dummy_dataset()
__snake_case = dset.map(
lambda __lowerCAmelCase , __lowerCAmelCase : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=__lowerCAmelCase , keep_in_memory=__lowerCAmelCase )
__snake_case = dset.add_faiss_index('vecs' , batch_size=1_0_0 , metric_type=faiss.METRIC_INNER_PRODUCT )
__snake_case , __snake_case = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
dset.drop_index('vecs' )
def lowercase__ ( self : Optional[int] ):
import faiss
__snake_case = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((3_0, 5) ) * np.arange(3_0 ).reshape(-1 , 1 ) , index_name='vecs' , batch_size=1_0_0 , metric_type=faiss.METRIC_INNER_PRODUCT , )
__snake_case , __snake_case = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
def lowercase__ ( self : Dict ):
import faiss
__snake_case = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((3_0, 5) ) * np.arange(3_0 ).reshape(-1 , 1 ) , index_name='vecs' , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=__lowerCAmelCase ) as tmp_file:
dset.save_faiss_index('vecs' , tmp_file.name )
dset.load_faiss_index('vecs2' , tmp_file.name )
os.unlink(tmp_file.name )
__snake_case , __snake_case = dset.get_nearest_examples('vecs2' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
def lowercase__ ( self : Union[str, Any] ):
__snake_case = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((3_0, 5) ) * np.arange(3_0 ).reshape(-1 , 1 ) , index_name='vecs' )
dset.drop_index('vecs' )
self.assertRaises(__lowerCAmelCase , partial(dset.get_nearest_examples , 'vecs2' , np.ones(5 , dtype=np.floataa ) ) )
def lowercase__ ( self : List[str] ):
from elasticsearch import Elasticsearch
__snake_case = self._create_dummy_dataset()
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
__snake_case = {'acknowledged': True}
mocked_bulk.return_value([(True, None)] * 3_0 )
__snake_case = {'hits': {'hits': [{'_score': 1, '_id': 2_9}]}}
__snake_case = Elasticsearch()
dset.add_elasticsearch_index('filename' , es_client=__lowerCAmelCase )
__snake_case , __snake_case = dset.get_nearest_examples('filename' , 'my_name-train_29' )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
@require_faiss
class a_ ( UpperCAmelCase__ ):
def lowercase__ ( self : Dict ):
import faiss
__snake_case = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 1_0 )
# single query
__snake_case = np.zeros(5 , dtype=np.floataa )
__snake_case = 1
__snake_case , __snake_case = index.search(__lowerCAmelCase )
self.assertRaises(__lowerCAmelCase , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
__snake_case = np.eye(5 , dtype=np.floataa )[::-1]
__snake_case , __snake_case = index.search_batch(__lowerCAmelCase )
self.assertRaises(__lowerCAmelCase , index.search_batch , queries[0] )
__snake_case = [scores[0] for scores in total_scores]
__snake_case = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__lowerCAmelCase ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , __lowerCAmelCase )
def lowercase__ ( self : Optional[int] ):
import faiss
__snake_case = FaissIndex(string_factory='Flat' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
__snake_case = FaissIndex(string_factory='LSH' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(__lowerCAmelCase ):
__snake_case = FaissIndex(string_factory='Flat' , custom_index=faiss.IndexFlat(5 ) )
def lowercase__ ( self : int ):
import faiss
__snake_case = faiss.IndexFlat(5 )
__snake_case = FaissIndex(custom_index=__lowerCAmelCase )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def lowercase__ ( self : Tuple ):
import faiss
__snake_case = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=__lowerCAmelCase ) as tmp_file:
index.save(tmp_file.name )
__snake_case = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
__snake_case = np.zeros(5 , dtype=np.floataa )
__snake_case = 1
__snake_case , __snake_case = index.search(__lowerCAmelCase )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def lowerCamelCase__ ( a ):
import faiss
__snake_case = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
__snake_case = 'index.faiss'
__snake_case = f'mock://{index_name}'
index.save(a , storage_options=mockfs.storage_options )
__snake_case = FaissIndex.load(a , storage_options=mockfs.storage_options )
__snake_case = np.zeros(5 , dtype=np.floataa )
__snake_case = 1
__snake_case , __snake_case = index.search(a )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class a_ ( UpperCAmelCase__ ):
def lowercase__ ( self : int ):
from elasticsearch import Elasticsearch
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
__snake_case = Elasticsearch()
__snake_case = {'acknowledged': True}
__snake_case = ElasticSearchIndex(es_client=__lowerCAmelCase )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(['foo', 'bar', 'foobar'] )
# single query
__snake_case = 'foo'
__snake_case = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
__snake_case , __snake_case = index.search(__lowerCAmelCase )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
__snake_case = 'foo'
__snake_case = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
__snake_case , __snake_case = index.search(__lowerCAmelCase , request_timeout=3_0 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
__snake_case = ['foo', 'bar', 'foobar']
__snake_case = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
__snake_case , __snake_case = index.search_batch(__lowerCAmelCase )
__snake_case = [scores[0] for scores in total_scores]
__snake_case = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__lowerCAmelCase ) , 0 )
self.assertListEqual([1, 1, 1] , __lowerCAmelCase )
# batched queries with timeout
__snake_case = ['foo', 'bar', 'foobar']
__snake_case = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
__snake_case , __snake_case = index.search_batch(__lowerCAmelCase , request_timeout=3_0 )
__snake_case = [scores[0] for scores in total_scores]
__snake_case = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__lowerCAmelCase ) , 0 )
self.assertListEqual([1, 1, 1] , __lowerCAmelCase )
| 356 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
"google/vit-base-patch16-224": "https://huggingface.co/vit-base-patch16-224/resolve/main/config.json",
# See all ViT models at https://huggingface.co/models?filter=vit
}
class __UpperCAmelCase ( snake_case__ ):
"""simple docstring"""
_snake_case : Tuple = 'vit'
def __init__( self : List[Any] , A_ : Union[str, Any]=7_68 , A_ : List[Any]=12 , A_ : List[Any]=12 , A_ : List[Any]=30_72 , A_ : Optional[Any]="gelu" , A_ : List[str]=0.0 , A_ : Union[str, Any]=0.0 , A_ : Dict=0.02 , A_ : List[str]=1e-1_2 , A_ : Any=2_24 , A_ : int=16 , A_ : Optional[Any]=3 , A_ : Dict=True , A_ : int=16 , **A_ : Optional[int] , )-> Tuple:
super().__init__(**lowercase_ )
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_act
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = initializer_range
__UpperCamelCase = layer_norm_eps
__UpperCamelCase = image_size
__UpperCamelCase = patch_size
__UpperCamelCase = num_channels
__UpperCamelCase = qkv_bias
__UpperCamelCase = encoder_stride
class __UpperCAmelCase ( snake_case__ ):
"""simple docstring"""
_snake_case : str = version.parse('1.11' )
@property
def A ( self : Union[str, Any] )-> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def A ( self : Dict )-> float:
return 1e-4
| 716 |
"""simple docstring"""
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def lowercase (_snake_case ,_snake_case ,_snake_case=1024 ,_snake_case=1024 ,_snake_case=False ,**_snake_case ) -> Optional[int]:
'''simple docstring'''
__UpperCamelCase = AutoTokenizer.from_pretrained(_snake_case )
__UpperCamelCase = SeqaSeqDataset(_snake_case ,_snake_case ,_snake_case ,_snake_case ,type_path="train" ,**_snake_case )
__UpperCamelCase = tok.pad_token_id
def get_lens(_snake_case ):
__UpperCamelCase = tqdm(
DataLoader(_snake_case ,batch_size=512 ,num_workers=8 ,shuffle=_snake_case ,collate_fn=ds.collate_fn ) ,desc=str(ds.len_file ) ,)
__UpperCamelCase = []
for batch in dl:
__UpperCamelCase = batch["input_ids"].ne(_snake_case ).sum(1 ).tolist()
__UpperCamelCase = batch["labels"].ne(_snake_case ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(_snake_case ,_snake_case ):
max_lens.append(max(_snake_case ,_snake_case ) )
else:
max_lens.extend(_snake_case )
return max_lens
__UpperCamelCase = get_lens(_snake_case )
__UpperCamelCase = SeqaSeqDataset(_snake_case ,_snake_case ,_snake_case ,_snake_case ,type_path="val" ,**_snake_case )
__UpperCamelCase = get_lens(_snake_case )
pickle_save(_snake_case ,train_ds.len_file )
pickle_save(_snake_case ,val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file) | 228 | 0 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import (
BaseOutput,
OptionalDependencyNotAvailable,
is_flax_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_onnx_available,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
@dataclass
class _UpperCamelCase( __lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Union[List[PIL.Image.Image], np.ndarray]
__SCREAMING_SNAKE_CASE : Optional[List[bool]]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_cycle_diffusion import CycleDiffusionPipeline
from .pipeline_stable_diffusion import StableDiffusionPipeline
from .pipeline_stable_diffusion_attend_and_excite import StableDiffusionAttendAndExcitePipeline
from .pipeline_stable_diffusion_imgaimg import StableDiffusionImgaImgPipeline
from .pipeline_stable_diffusion_inpaint import StableDiffusionInpaintPipeline
from .pipeline_stable_diffusion_inpaint_legacy import StableDiffusionInpaintPipelineLegacy
from .pipeline_stable_diffusion_instruct_pixapix import StableDiffusionInstructPixaPixPipeline
from .pipeline_stable_diffusion_latent_upscale import StableDiffusionLatentUpscalePipeline
from .pipeline_stable_diffusion_ldmad import StableDiffusionLDMaDPipeline
from .pipeline_stable_diffusion_model_editing import StableDiffusionModelEditingPipeline
from .pipeline_stable_diffusion_panorama import StableDiffusionPanoramaPipeline
from .pipeline_stable_diffusion_paradigms import StableDiffusionParadigmsPipeline
from .pipeline_stable_diffusion_sag import StableDiffusionSAGPipeline
from .pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from .pipeline_stable_unclip import StableUnCLIPPipeline
from .pipeline_stable_unclip_imgaimg import StableUnCLIPImgaImgPipeline
from .safety_checker import StableDiffusionSafetyChecker
from .stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import StableDiffusionImageVariationPipeline
else:
from .pipeline_stable_diffusion_image_variation import StableDiffusionImageVariationPipeline
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.26.0''')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionPixaPixZeroPipeline,
)
else:
from .pipeline_stable_diffusion_depthaimg import StableDiffusionDepthaImgPipeline
from .pipeline_stable_diffusion_diffedit import StableDiffusionDiffEditPipeline
from .pipeline_stable_diffusion_pixapix_zero import StableDiffusionPixaPixZeroPipeline
try:
if not (
is_torch_available()
and is_transformers_available()
and is_k_diffusion_available()
and is_k_diffusion_version('''>=''', '''0.0.12''')
):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipeline_stable_diffusion_k_diffusion import StableDiffusionKDiffusionPipeline
try:
if not (is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_onnx_objects import * # noqa F403
else:
from .pipeline_onnx_stable_diffusion import OnnxStableDiffusionPipeline, StableDiffusionOnnxPipeline
from .pipeline_onnx_stable_diffusion_imgaimg import OnnxStableDiffusionImgaImgPipeline
from .pipeline_onnx_stable_diffusion_inpaint import OnnxStableDiffusionInpaintPipeline
from .pipeline_onnx_stable_diffusion_inpaint_legacy import OnnxStableDiffusionInpaintPipelineLegacy
from .pipeline_onnx_stable_diffusion_upscale import OnnxStableDiffusionUpscalePipeline
if is_transformers_available() and is_flax_available():
import flax
@flax.struct.dataclass
class _UpperCamelCase( __lowerCamelCase ):
__SCREAMING_SNAKE_CASE : np.ndarray
__SCREAMING_SNAKE_CASE : List[bool]
from ...schedulers.scheduling_pndm_flax import PNDMSchedulerState
from .pipeline_flax_stable_diffusion import FlaxStableDiffusionPipeline
from .pipeline_flax_stable_diffusion_imgaimg import FlaxStableDiffusionImgaImgPipeline
from .pipeline_flax_stable_diffusion_inpaint import FlaxStableDiffusionInpaintPipeline
from .safety_checker_flax import FlaxStableDiffusionSafetyChecker
| 47 |
from __future__ import annotations
class UpperCamelCase:
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str ) -> str:
'''simple docstring'''
__snake_case , __snake_case = text, pattern
__snake_case , __snake_case = len(SCREAMING_SNAKE_CASE ), len(SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , SCREAMING_SNAKE_CASE : str ) -> int:
'''simple docstring'''
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def SCREAMING_SNAKE_CASE_ ( self : int , SCREAMING_SNAKE_CASE : int ) -> int:
'''simple docstring'''
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> list[int]:
'''simple docstring'''
__snake_case = []
for i in range(self.textLen - self.patLen + 1 ):
__snake_case = self.mismatch_in_text(SCREAMING_SNAKE_CASE )
if mismatch_index == -1:
positions.append(SCREAMING_SNAKE_CASE )
else:
__snake_case = self.match_in_pattern(self.text[mismatch_index] )
__snake_case = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
A : str = 'ABAABA'
A : Optional[Any] = 'AB'
A : Any = BoyerMooreSearch(text, pattern)
A : Tuple = bms.bad_character_heuristic()
if len(positions) == 0:
print('No match found')
else:
print('Pattern found in following positions: ')
print(positions)
| 371 | 0 |
import os
import sys
UpperCamelCase__ : Optional[Any] = os.path.join(os.path.dirname(__file__), """src""")
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
UpperCamelCase__ : Union[str, Any] = [
"""torch""",
"""numpy""",
"""tokenizers""",
"""filelock""",
"""requests""",
"""tqdm""",
"""regex""",
"""sentencepiece""",
"""sacremoses""",
"""importlib_metadata""",
"""huggingface_hub""",
]
@add_start_docstrings(AutoConfig.__doc__ )
def A_( *A , **A ):
return AutoConfig.from_pretrained(*A , **A )
@add_start_docstrings(AutoTokenizer.__doc__ )
def A_( *A , **A ):
return AutoTokenizer.from_pretrained(*A , **A )
@add_start_docstrings(AutoModel.__doc__ )
def A_( *A , **A ):
return AutoModel.from_pretrained(*A , **A )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def A_( *A , **A ):
return AutoModelForCausalLM.from_pretrained(*A , **A )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def A_( *A , **A ):
return AutoModelForMaskedLM.from_pretrained(*A , **A )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def A_( *A , **A ):
return AutoModelForSequenceClassification.from_pretrained(*A , **A )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def A_( *A , **A ):
return AutoModelForQuestionAnswering.from_pretrained(*A , **A )
| 486 |
from __future__ import annotations
def A_( A ):
if not nums:
raise ValueError("""List is empty""" )
return sum(A ) / len(A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 486 | 1 |
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import BaseOutput, is_torch_available, is_transformers_available
@dataclass
class snake_case_ ( lowerCAmelCase ):
__lowerCamelCase : Union[List[PIL.Image.Image], np.ndarray]
__lowerCamelCase : Optional[List[bool]]
if is_transformers_available() and is_torch_available():
from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
| 345 |
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Any:
assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int:
SCREAMING_SNAKE_CASE_ : int = tmp_path / 'cache'
SCREAMING_SNAKE_CASE_ : Optional[int] = {'text': 'string'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
SCREAMING_SNAKE_CASE_ : Dict = TextDatasetReader(SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE , keep_in_memory=SCREAMING_SNAKE_CASE ).read()
_check_text_dataset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
'features' , [
None,
{'text': 'string'},
{'text': 'int32'},
{'text': 'float32'},
] , )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[str]:
SCREAMING_SNAKE_CASE_ : Optional[int] = tmp_path / 'cache'
SCREAMING_SNAKE_CASE_ : List[str] = {'text': 'string'}
SCREAMING_SNAKE_CASE_ : Any = features.copy() if features else default_expected_features
SCREAMING_SNAKE_CASE_ : List[str] = (
Features({feature: Value(SCREAMING_SNAKE_CASE ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE_ : str = TextDatasetReader(SCREAMING_SNAKE_CASE , features=SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE ).read()
_check_text_dataset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Any:
SCREAMING_SNAKE_CASE_ : Dict = tmp_path / 'cache'
SCREAMING_SNAKE_CASE_ : str = {'text': 'string'}
SCREAMING_SNAKE_CASE_ : Union[str, Any] = TextDatasetReader(SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE , split=SCREAMING_SNAKE_CASE ).read()
_check_text_dataset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('path_type' , [str, list] )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict:
if issubclass(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = text_path
elif issubclass(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ : int = [text_path]
SCREAMING_SNAKE_CASE_ : List[Any] = tmp_path / 'cache'
SCREAMING_SNAKE_CASE_ : Optional[int] = {'text': 'string'}
SCREAMING_SNAKE_CASE_ : Optional[Any] = TextDatasetReader(SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE ).read()
_check_text_dataset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=("train",) ) -> Dict:
assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for split in splits:
SCREAMING_SNAKE_CASE_ : List[Any] = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict:
SCREAMING_SNAKE_CASE_ : Optional[Any] = tmp_path / 'cache'
SCREAMING_SNAKE_CASE_ : Tuple = {'text': 'string'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
SCREAMING_SNAKE_CASE_ : int = TextDatasetReader({'train': text_path} , cache_dir=SCREAMING_SNAKE_CASE , keep_in_memory=SCREAMING_SNAKE_CASE ).read()
_check_text_datasetdict(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
'features' , [
None,
{'text': 'string'},
{'text': 'int32'},
{'text': 'float32'},
] , )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ : List[Any] = tmp_path / 'cache'
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
SCREAMING_SNAKE_CASE_ : Optional[Any] = {'text': 'string'}
SCREAMING_SNAKE_CASE_ : Any = features.copy() if features else default_expected_features
SCREAMING_SNAKE_CASE_ : Tuple = (
Features({feature: Value(SCREAMING_SNAKE_CASE ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE_ : Dict = TextDatasetReader({'train': text_path} , features=SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE ).read()
_check_text_datasetdict(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[str]:
if split:
SCREAMING_SNAKE_CASE_ : Optional[Any] = {split: text_path}
else:
SCREAMING_SNAKE_CASE_ : str = 'train'
SCREAMING_SNAKE_CASE_ : int = {'train': text_path, 'test': text_path}
SCREAMING_SNAKE_CASE_ : List[str] = tmp_path / 'cache'
SCREAMING_SNAKE_CASE_ : Tuple = {'text': 'string'}
SCREAMING_SNAKE_CASE_ : Optional[Any] = TextDatasetReader(SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE ).read()
_check_text_datasetdict(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 345 | 1 |
from __future__ import annotations
import math
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
__snake_case : Optional[int] = u
for i in range(1 , __SCREAMING_SNAKE_CASE ):
__snake_case : Union[str, Any] = temp * (u - i)
return temp
def __lowerCAmelCase ( ):
'''simple docstring'''
__snake_case : int = int(input("""enter the numbers of values: """ ) )
__snake_case : Optional[Any] = []
for _ in range(__SCREAMING_SNAKE_CASE ):
y.append([] )
for i in range(__SCREAMING_SNAKE_CASE ):
for j in range(__SCREAMING_SNAKE_CASE ):
y[i].append(__SCREAMING_SNAKE_CASE )
__snake_case : str = 0
print("""enter the values of parameters in a list: """ )
__snake_case : Optional[int] = list(map(__SCREAMING_SNAKE_CASE , input().split() ) )
print("""enter the values of corresponding parameters: """ )
for i in range(__SCREAMING_SNAKE_CASE ):
__snake_case : Dict = float(input() )
__snake_case : str = int(input("""enter the value to interpolate: """ ) )
__snake_case : int = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , __SCREAMING_SNAKE_CASE ):
for j in range(n - i ):
__snake_case : str = y[j + 1][i - 1] - y[j][i - 1]
__snake_case : List[Any] = y[0][0]
for i in range(1 , __SCREAMING_SNAKE_CASE ):
summ += (ucal(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) * y[0][i]) / math.factorial(__SCREAMING_SNAKE_CASE )
print(F'''the value at {value} is {summ}''' )
if __name__ == "__main__":
main()
| 720 | from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
A : List[Any] = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
A : List[Any] = (
{
"feature-extraction": TFMobileBertModel,
"fill-mask": TFMobileBertForMaskedLM,
"question-answering": TFMobileBertForQuestionAnswering,
"text-classification": TFMobileBertForSequenceClassification,
"token-classification": TFMobileBertForTokenClassification,
"zero-shot": TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
A : List[str] = False
A : List[Any] = False
def snake_case__ ( self : int , _lowerCAmelCase : Dict , _lowerCAmelCase : int , _lowerCAmelCase : Union[str, Any]=False ):
__snake_case : int = super()._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase , return_labels=_lowerCAmelCase )
if return_labels:
if model_class in get_values(_lowerCAmelCase ):
__snake_case : List[Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
def __init__( self : Any , _lowerCAmelCase : int , _lowerCAmelCase : int=13 , _lowerCAmelCase : int=7 , _lowerCAmelCase : Optional[int]=True , _lowerCAmelCase : Any=True , _lowerCAmelCase : List[str]=True , _lowerCAmelCase : Any=True , _lowerCAmelCase : str=99 , _lowerCAmelCase : Any=32 , _lowerCAmelCase : List[Any]=32 , _lowerCAmelCase : Optional[int]=2 , _lowerCAmelCase : Union[str, Any]=4 , _lowerCAmelCase : Any=37 , _lowerCAmelCase : str="gelu" , _lowerCAmelCase : Dict=0.1 , _lowerCAmelCase : List[Any]=0.1 , _lowerCAmelCase : Dict=5_12 , _lowerCAmelCase : str=16 , _lowerCAmelCase : Union[str, Any]=2 , _lowerCAmelCase : Dict=0.02 , _lowerCAmelCase : Tuple=3 , _lowerCAmelCase : Dict=4 , _lowerCAmelCase : Tuple=None , ):
__snake_case : int = parent
__snake_case : Union[str, Any] = batch_size
__snake_case : Dict = seq_length
__snake_case : Optional[int] = is_training
__snake_case : str = use_input_mask
__snake_case : Optional[Any] = use_token_type_ids
__snake_case : Dict = use_labels
__snake_case : Any = vocab_size
__snake_case : List[str] = hidden_size
__snake_case : Union[str, Any] = num_hidden_layers
__snake_case : Dict = num_attention_heads
__snake_case : Union[str, Any] = intermediate_size
__snake_case : Optional[Any] = hidden_act
__snake_case : str = hidden_dropout_prob
__snake_case : Optional[Any] = attention_probs_dropout_prob
__snake_case : Any = max_position_embeddings
__snake_case : List[str] = type_vocab_size
__snake_case : Tuple = type_sequence_label_size
__snake_case : Dict = initializer_range
__snake_case : List[str] = num_labels
__snake_case : str = num_choices
__snake_case : Optional[int] = scope
__snake_case : Any = embedding_size
def snake_case__ ( self : str ):
__snake_case : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case : Dict = None
if self.use_input_mask:
__snake_case : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
__snake_case : Optional[Any] = None
if self.use_token_type_ids:
__snake_case : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__snake_case : str = None
__snake_case : Tuple = None
__snake_case : int = None
if self.use_labels:
__snake_case : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__snake_case : str = ids_tensor([self.batch_size] , self.num_choices )
__snake_case : int = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case__ ( self : Dict , _lowerCAmelCase : int , _lowerCAmelCase : Any , _lowerCAmelCase : Tuple , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple ):
__snake_case : Optional[int] = TFMobileBertModel(config=_lowerCAmelCase )
__snake_case : int = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
__snake_case : Optional[Any] = model(_lowerCAmelCase )
__snake_case : Tuple = [input_ids, input_mask]
__snake_case : Tuple = model(_lowerCAmelCase )
__snake_case : int = model(_lowerCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def snake_case__ ( self : List[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Tuple , _lowerCAmelCase : int , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : str , _lowerCAmelCase : List[str] ):
__snake_case : Union[str, Any] = TFMobileBertForMaskedLM(config=_lowerCAmelCase )
__snake_case : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
__snake_case : List[str] = model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case__ ( self : Optional[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : int , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple ):
__snake_case : str = TFMobileBertForNextSentencePrediction(config=_lowerCAmelCase )
__snake_case : List[str] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
__snake_case : str = model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def snake_case__ ( self : str , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Any , _lowerCAmelCase : int , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[int] ):
__snake_case : Any = TFMobileBertForPreTraining(config=_lowerCAmelCase )
__snake_case : str = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
__snake_case : Optional[int] = model(_lowerCAmelCase )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def snake_case__ ( self : Union[str, Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : str , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : int , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Tuple ):
__snake_case : List[str] = self.num_labels
__snake_case : Optional[int] = TFMobileBertForSequenceClassification(config=_lowerCAmelCase )
__snake_case : Tuple = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
__snake_case : Optional[int] = model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case__ ( self : Optional[int] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : List[str] , _lowerCAmelCase : Dict , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Dict ):
__snake_case : Any = self.num_choices
__snake_case : List[str] = TFMobileBertForMultipleChoice(config=_lowerCAmelCase )
__snake_case : Dict = tf.tile(tf.expand_dims(_lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
__snake_case : Tuple = tf.tile(tf.expand_dims(_lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
__snake_case : int = tf.tile(tf.expand_dims(_lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
__snake_case : Dict = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
__snake_case : int = model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def snake_case__ ( self : List[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : str , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Union[str, Any] ):
__snake_case : List[str] = self.num_labels
__snake_case : Any = TFMobileBertForTokenClassification(config=_lowerCAmelCase )
__snake_case : int = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
__snake_case : List[str] = model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case__ ( self : Any , _lowerCAmelCase : Tuple , _lowerCAmelCase : Dict , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : Dict ):
__snake_case : Tuple = TFMobileBertForQuestionAnswering(config=_lowerCAmelCase )
__snake_case : List[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
__snake_case : Tuple = model(_lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case__ ( self : Optional[Any] ):
__snake_case : Union[str, Any] = self.prepare_config_and_inputs()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) : Dict = config_and_inputs
__snake_case : List[Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
def snake_case__ ( self : int ):
__snake_case : Optional[Any] = TFMobileBertModelTest.TFMobileBertModelTester(self )
__snake_case : int = ConfigTester(self , config_class=_lowerCAmelCase , hidden_size=37 )
def snake_case__ ( self : List[str] ):
self.config_tester.run_common_tests()
def snake_case__ ( self : str ):
__snake_case : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*_lowerCAmelCase )
def snake_case__ ( self : Tuple ):
__snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*_lowerCAmelCase )
def snake_case__ ( self : Any ):
__snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*_lowerCAmelCase )
def snake_case__ ( self : int ):
__snake_case : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*_lowerCAmelCase )
def snake_case__ ( self : Any ):
__snake_case : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*_lowerCAmelCase )
def snake_case__ ( self : Dict ):
__snake_case : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*_lowerCAmelCase )
def snake_case__ ( self : Any ):
__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*_lowerCAmelCase )
def snake_case__ ( self : int ):
__snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*_lowerCAmelCase )
@slow
def snake_case__ ( self : Tuple ):
# for model_name in TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["google/mobilebert-uncased"]:
__snake_case : Dict = TFMobileBertModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
@require_tf
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@slow
def snake_case__ ( self : Optional[Any] ):
__snake_case : int = TFMobileBertForPreTraining.from_pretrained("""google/mobilebert-uncased""" )
__snake_case : Optional[int] = tf.constant([[0, 1, 2, 3, 4, 5]] )
__snake_case : Optional[int] = model(_lowerCAmelCase )[0]
__snake_case : List[str] = [1, 6, 3_05_22]
self.assertEqual(output.shape , _lowerCAmelCase )
__snake_case : List[Any] = tf.constant(
[
[
[-4.5919547, -9.248295, -9.645256],
[-6.7306175, -6.440284, -6.6052837],
[-7.2743506, -6.7847915, -6.024673],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , _lowerCAmelCase , atol=1e-4 )
| 390 | 0 |
'''simple docstring'''
def A_ ( _lowerCamelCase : float , _lowerCamelCase : float ):
if density <= 0:
raise ValueError('Impossible fluid density' )
if bulk_modulus <= 0:
raise ValueError('Impossible bulk modulus' )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 309 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
SCREAMING_SNAKE_CASE = {
'vocab_file': {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model',
},
'tokenizer_file': {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json',
},
}
SCREAMING_SNAKE_CASE = {
'xlnet-base-cased': None,
'xlnet-large-cased': None,
}
SCREAMING_SNAKE_CASE = '▁'
# Segments (not really needed)
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = 4
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = '''left'''
UpperCamelCase_ = XLNetTokenizer
def __init__( self : int , UpperCAmelCase : Dict=None , UpperCAmelCase : str=None , UpperCAmelCase : str=False , UpperCAmelCase : List[Any]=True , UpperCAmelCase : Optional[Any]=False , UpperCAmelCase : int="<s>" , UpperCAmelCase : Optional[int]="</s>" , UpperCAmelCase : str="<unk>" , UpperCAmelCase : Optional[Any]="<sep>" , UpperCAmelCase : Optional[int]="<pad>" , UpperCAmelCase : Optional[Any]="<cls>" , UpperCAmelCase : Dict="<mask>" , UpperCAmelCase : int=["<eop>", "<eod>"] , **UpperCAmelCase : List[Any] , ) -> List[str]:
'''simple docstring'''
lowercase : Dict =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else mask_token
super().__init__(
vocab_file=UpperCAmelCase , tokenizer_file=UpperCAmelCase , do_lower_case=UpperCAmelCase , remove_space=UpperCAmelCase , keep_accents=UpperCAmelCase , bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , pad_token=UpperCAmelCase , cls_token=UpperCAmelCase , mask_token=UpperCAmelCase , additional_special_tokens=UpperCAmelCase , **UpperCAmelCase , )
lowercase : Tuple =3
lowercase : Union[str, Any] =do_lower_case
lowercase : Any =remove_space
lowercase : int =keep_accents
lowercase : int =vocab_file
lowercase : Union[str, Any] =False if not self.vocab_file else True
def A__ ( self : Any , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
lowercase : Union[str, Any] =[self.sep_token_id]
lowercase : Optional[Any] =[self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def A__ ( self : str , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
lowercase : Optional[int] =[self.sep_token_id]
lowercase : Union[str, Any] =[2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def A__ ( self : str , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(UpperCAmelCase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowercase : Dict =os.path.join(
UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase ):
copyfile(self.vocab_file , UpperCAmelCase )
return (out_vocab_file,)
| 94 | 0 |
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
lowerCAmelCase__: Optional[int] = logging.get_logger(__name__)
@add_end_docstrings(
lowerCAmelCase , R'\n top_k (`int`, defaults to 5):\n The number of predictions to return.\n targets (`str` or `List[str]`, *optional*):\n When passed, the model will limit the scores to the passed targets instead of looking up in the whole\n vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting\n token will be used (with a warning, and that might be slower).\n\n ' , )
class snake_case_ ( lowerCAmelCase ):
def __A ( self , __lowerCAmelCase ):
if self.framework == "tf":
SCREAMING_SNAKE_CASE_ : List[Any] = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=__lowerCAmelCase )
else:
raise ValueError('Unsupported framework' )
return masked_index
def __A ( self , __lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : List[Any] = self.get_masked_index(__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[Any] = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
'fill-mask' , self.model.base_model_prefix , F'No mask_token ({self.tokenizer.mask_token}) found on the input' , )
def __A ( self , __lowerCAmelCase ):
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input['input_ids'][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(__lowerCAmelCase )
def __A ( self , __lowerCAmelCase , __lowerCAmelCase=None , **__lowerCAmelCase ):
if return_tensors is None:
SCREAMING_SNAKE_CASE_ : List[Any] = self.framework
SCREAMING_SNAKE_CASE_ : Optional[int] = self.tokenizer(__lowerCAmelCase , return_tensors=__lowerCAmelCase )
self.ensure_exactly_one_mask_token(__lowerCAmelCase )
return model_inputs
def __A ( self , __lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : List[str] = self.model(**__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model_inputs['input_ids']
return model_outputs
def __A ( self , __lowerCAmelCase , __lowerCAmelCase=5 , __lowerCAmelCase=None ):
# Cap top_k if there are targets
if target_ids is not None and target_ids.shape[0] < top_k:
SCREAMING_SNAKE_CASE_ : List[str] = target_ids.shape[0]
SCREAMING_SNAKE_CASE_ : Tuple = model_outputs['input_ids'][0]
SCREAMING_SNAKE_CASE_ : List[str] = model_outputs['logits']
if self.framework == "tf":
SCREAMING_SNAKE_CASE_ : Any = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
SCREAMING_SNAKE_CASE_ : List[str] = outputs.numpy()
SCREAMING_SNAKE_CASE_ : Tuple = outputs[0, masked_index, :]
SCREAMING_SNAKE_CASE_ : Dict = stable_softmax(__lowerCAmelCase , axis=-1 )
if target_ids is not None:
SCREAMING_SNAKE_CASE_ : Tuple = tf.gather_nd(tf.squeeze(__lowerCAmelCase , 0 ) , target_ids.reshape(-1 , 1 ) )
SCREAMING_SNAKE_CASE_ : Tuple = tf.expand_dims(__lowerCAmelCase , 0 )
SCREAMING_SNAKE_CASE_ : List[Any] = tf.math.top_k(__lowerCAmelCase , k=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = topk.values.numpy(), topk.indices.numpy()
else:
SCREAMING_SNAKE_CASE_ : str = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=__lowerCAmelCase ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
SCREAMING_SNAKE_CASE_ : Optional[int] = outputs[0, masked_index, :]
SCREAMING_SNAKE_CASE_ : Any = logits.softmax(dim=-1 )
if target_ids is not None:
SCREAMING_SNAKE_CASE_ : int = probs[..., target_ids]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = probs.topk(__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : str = []
SCREAMING_SNAKE_CASE_ : Optional[int] = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
SCREAMING_SNAKE_CASE_ : List[str] = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
SCREAMING_SNAKE_CASE_ : List[str] = input_ids.numpy().copy()
if target_ids is not None:
SCREAMING_SNAKE_CASE_ : Tuple = target_ids[p].tolist()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = p
# Filter padding out:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.tokenizer.decode(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Dict = {'score': v, 'token': p, 'token_str': self.tokenizer.decode([p] ), 'sequence': sequence}
row.append(__lowerCAmelCase )
result.append(__lowerCAmelCase )
if single_mask:
return result[0]
return result
def __A ( self , __lowerCAmelCase , __lowerCAmelCase=None ):
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Optional[int] = [targets]
try:
SCREAMING_SNAKE_CASE_ : str = self.tokenizer.get_vocab()
except Exception:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {}
SCREAMING_SNAKE_CASE_ : Dict = []
for target in targets:
SCREAMING_SNAKE_CASE_ : Optional[int] = vocab.get(__lowerCAmelCase , __lowerCAmelCase )
if id_ is None:
SCREAMING_SNAKE_CASE_ : List[Any] = self.tokenizer(
__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase , max_length=1 , truncation=__lowerCAmelCase , )['input_ids']
if len(__lowerCAmelCase ) == 0:
logger.warning(
F'The specified target token `{target}` does not exist in the model vocabulary. '
'We cannot replace it with anything meaningful, ignoring it' )
continue
SCREAMING_SNAKE_CASE_ : Dict = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
F'The specified target token `{target}` does not exist in the model vocabulary. '
F'Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.' )
target_ids.append(id_ )
SCREAMING_SNAKE_CASE_ : Tuple = list(set(__lowerCAmelCase ) )
if len(__lowerCAmelCase ) == 0:
raise ValueError('At least one target must be provided when passed.' )
SCREAMING_SNAKE_CASE_ : List[Any] = np.array(__lowerCAmelCase )
return target_ids
def __A ( self , __lowerCAmelCase=None , __lowerCAmelCase=None ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = {}
if targets is not None:
SCREAMING_SNAKE_CASE_ : str = self.get_target_ids(__lowerCAmelCase , __lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : int = target_ids
if top_k is not None:
SCREAMING_SNAKE_CASE_ : List[Any] = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
'fill-mask' , self.model.base_model_prefix , 'The tokenizer does not define a `mask_token`.' )
return {}, {}, postprocess_params
def __call__( self , __lowerCAmelCase , *__lowerCAmelCase , **__lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : List[str] = super().__call__(__lowerCAmelCase , **__lowerCAmelCase )
if isinstance(__lowerCAmelCase , __lowerCAmelCase ) and len(__lowerCAmelCase ) == 1:
return outputs[0]
return outputs
| 311 |
import contextlib
from multiprocessing import Pool, RLock
from tqdm.auto import tqdm
from ..utils import experimental, logging
lowerCAmelCase__: List[Any] = logging.get_logger(__name__)
class snake_case_ :
__lowerCamelCase : Any = None
@experimental
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict:
if ParallelBackendConfig.backend_name is None:
return _map_with_multiprocessing_pool(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return _map_with_joblib(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = num_proc if num_proc <= len(SCREAMING_SNAKE_CASE ) else len(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[str] = [] # We organize the splits ourselve (contiguous splits)
for index in range(SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ : List[str] = len(SCREAMING_SNAKE_CASE ) // num_proc
SCREAMING_SNAKE_CASE_ : Optional[Any] = len(SCREAMING_SNAKE_CASE ) % num_proc
SCREAMING_SNAKE_CASE_ : List[Any] = div * index + min(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[Any] = start + div + (1 if index < mod else 0)
split_kwds.append((function, iterable[start:end], types, index, disable_tqdm, desc) )
if len(SCREAMING_SNAKE_CASE ) != sum(len(i[1] ) for i in split_kwds ):
raise ValueError(
f'Error dividing inputs iterable among processes. '
f'Total number of objects {len(SCREAMING_SNAKE_CASE )}, '
f'length: {sum(len(i[1] ) for i in split_kwds )}' )
logger.info(
f'Spawning {num_proc} processes for {len(SCREAMING_SNAKE_CASE )} objects in slices of {[len(i[1] ) for i in split_kwds]}' )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = None, None
if not disable_tqdm:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = (RLock(),), tqdm.set_lock
with Pool(SCREAMING_SNAKE_CASE , initargs=SCREAMING_SNAKE_CASE , initializer=SCREAMING_SNAKE_CASE ) as pool:
SCREAMING_SNAKE_CASE_ : Optional[int] = pool.map(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
logger.info(f'Finished {num_proc} processes' )
SCREAMING_SNAKE_CASE_ : List[str] = [obj for proc_res in mapped for obj in proc_res]
logger.info(f'Unpacked {len(SCREAMING_SNAKE_CASE )} objects' )
return mapped
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[int]:
# progress bar is not yet supported for _map_with_joblib, because tqdm couldn't accurately be applied to joblib,
# and it requires monkey-patching joblib internal classes which is subject to change
import joblib
with joblib.parallel_backend(ParallelBackendConfig.backend_name , n_jobs=SCREAMING_SNAKE_CASE ):
return joblib.Parallel()(
joblib.delayed(SCREAMING_SNAKE_CASE )((function, obj, types, None, True, None) ) for obj in iterable )
@experimental
@contextlib.contextmanager
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ) -> Tuple:
SCREAMING_SNAKE_CASE_ : str = backend_name
if backend_name == "spark":
from joblibspark import register_spark
register_spark()
# TODO: call create_cache_and_write_probe if "download" in steps
# TODO: raise NotImplementedError when Dataset.map etc is called
try:
yield
finally:
SCREAMING_SNAKE_CASE_ : Dict = None
| 311 | 1 |
'''simple docstring'''
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
A__ : str = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
A__ : list[int] = [ord(letter) for letter in string.ascii_lowercase]
A__ : set[int] = {ord(char) for char in VALID_CHARS}
A__ : list[str] = ["the", "be", "to", "of", "and", "in", "that", "have"]
def UpperCAmelCase__ ( UpperCAmelCase_ : list[int] , UpperCAmelCase_ : tuple[int, ...] ) -> str | None:
__lowerCamelCase : str = ""
__lowerCamelCase : int
__lowerCamelCase : int
__lowerCamelCase : int
for keychar, cipherchar in zip(cycle(UpperCAmelCase_ ) , UpperCAmelCase_ ):
__lowerCamelCase : Union[str, Any] = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(UpperCAmelCase_ )
return decoded
def UpperCAmelCase__ ( UpperCAmelCase_ : list[int] ) -> list[str]:
__lowerCamelCase : list[str] = []
for key in product(UpperCAmelCase_ , repeat=3 ):
__lowerCamelCase : int = try_key(UpperCAmelCase_ , UpperCAmelCase_ )
if encoded is not None:
possibles.append(UpperCAmelCase_ )
return possibles
def UpperCAmelCase__ ( UpperCAmelCase_ : list[str] , UpperCAmelCase_ : str ) -> list[str]:
return [possible for possible in possibles if common_word in possible.lower()]
def UpperCAmelCase__ ( UpperCAmelCase_ : str = "p059_cipher.txt" ) -> int:
__lowerCamelCase : list[int]
__lowerCamelCase : list[str]
__lowerCamelCase : str
__lowerCamelCase : str
__lowerCamelCase : str = Path(UpperCAmelCase_ ).parent.joinpath(UpperCAmelCase_ ).read_text(encoding='utf-8' )
__lowerCamelCase : Tuple = [int(UpperCAmelCase_ ) for number in data.strip().split(',' )]
__lowerCamelCase : Union[str, Any] = filter_valid_chars(UpperCAmelCase_ )
for common_word in COMMON_WORDS:
__lowerCamelCase : Any = filter_common_word(UpperCAmelCase_ , UpperCAmelCase_ )
if len(UpperCAmelCase_ ) == 1:
break
__lowerCamelCase : int = possibles[0]
return sum(ord(UpperCAmelCase_ ) for char in decoded_text )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 13 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {'''ctrl''': '''https://huggingface.co/ctrl/resolve/main/config.json'''}
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = 'ctrl'
lowerCAmelCase__ = ['past_key_values']
lowerCAmelCase__ = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : List[Any] , _A : Dict=246_534 , _A : Optional[Any]=256 , _A : Dict=1_280 , _A : List[str]=8_192 , _A : Tuple=48 , _A : Optional[Any]=16 , _A : List[Any]=0.1 , _A : List[Any]=0.1 , _A : List[str]=1e-6 , _A : Optional[int]=0.0_2 , _A : Tuple=True , **_A : Optional[Any] , ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = vocab_size
UpperCAmelCase__ : Any = n_positions
UpperCAmelCase__ : Optional[Any] = n_embd
UpperCAmelCase__ : List[str] = n_layer
UpperCAmelCase__ : Any = n_head
UpperCAmelCase__ : int = dff
UpperCAmelCase__ : str = resid_pdrop
UpperCAmelCase__ : Tuple = embd_pdrop
UpperCAmelCase__ : int = layer_norm_epsilon
UpperCAmelCase__ : Tuple = initializer_range
UpperCAmelCase__ : Union[str, Any] = use_cache
super().__init__(**_A )
| 75 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
a__ : Union[str, Any] =logging.get_logger(__name__)
a__ : int ='''▁'''
a__ : Tuple ={'''vocab_file''': '''sentencepiece.bpe.model''', '''monolingual_vocab_file''': '''dict.txt'''}
a__ : Optional[Any] ={
'''vocab_file''': {
'''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model''',
},
'''monolingual_vocab_file''': {
'''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt''',
},
}
a__ : int ={'''vinai/bartpho-syllable''': 1_024}
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict =VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : Union[str, Any] =PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : int =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : Tuple =["input_ids", "attention_mask"]
def __init__( self : Optional[Any] , __A : str , __A : int , __A : Any="<s>" , __A : Tuple="</s>" , __A : Optional[Any]="</s>" , __A : str="<s>" , __A : Union[str, Any]="<unk>" , __A : List[str]="<pad>" , __A : Any="<mask>" , __A : Optional[Dict[str, Any]] = None , **__A : Union[str, Any] , ):
# Mask token behave like a normal word, i.e. include the space before it
__UpperCamelCase = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else mask_token
__UpperCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__A , eos_token=__A , unk_token=__A , sep_token=__A , cls_token=__A , pad_token=__A , mask_token=__A , sp_model_kwargs=self.sp_model_kwargs , **__A , )
__UpperCamelCase = vocab_file
__UpperCamelCase = monolingual_vocab_file
__UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__A ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
__UpperCamelCase = {}
__UpperCamelCase = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(__A ) not in self.fairseq_tokens_to_ids:
__UpperCamelCase = cnt
cnt += 1
with open(__A , 'r' , encoding='utf-8' ) as f:
for line in f.readlines():
__UpperCamelCase = line.strip().split()[0]
__UpperCamelCase = len(self.fairseq_tokens_to_ids )
if str(__A ) not in self.fairseq_tokens_to_ids:
__UpperCamelCase = len(self.fairseq_tokens_to_ids )
__UpperCamelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : List[Any] ):
__UpperCamelCase = self.__dict__.copy()
__UpperCamelCase = None
__UpperCamelCase = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Any , __A : List[Any] ):
__UpperCamelCase = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__UpperCamelCase = {}
__UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def _lowerCamelCase ( self : Optional[Any] , __A : List[int] , __A : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__UpperCamelCase = [self.cls_token_id]
__UpperCamelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _lowerCamelCase ( self : List[Any] , __A : List[int] , __A : Optional[List[int]] = None , __A : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__A , token_ids_a=__A , already_has_special_tokens=__A )
if token_ids_a is None:
return [1] + ([0] * len(__A )) + [1]
return [1] + ([0] * len(__A )) + [1, 1] + ([0] * len(__A )) + [1]
def _lowerCamelCase ( self : str , __A : List[int] , __A : Optional[List[int]] = None ):
__UpperCamelCase = [self.sep_token_id]
__UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _lowerCamelCase ( self : Union[str, Any] ):
return len(self.fairseq_ids_to_tokens )
def _lowerCamelCase ( self : List[str] ):
__UpperCamelCase = {self.convert_ids_to_tokens(__A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _lowerCamelCase ( self : int , __A : str ):
return self.sp_model.encode(__A , out_type=__A )
def _lowerCamelCase ( self : int , __A : Optional[int] ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def _lowerCamelCase ( self : Optional[int] , __A : Union[str, Any] ):
return self.fairseq_ids_to_tokens[index]
def _lowerCamelCase ( self : Tuple , __A : Any ):
__UpperCamelCase = ''.join(__A ).replace(__A , ' ' ).strip()
return out_string
def _lowerCamelCase ( self : Any , __A : str , __A : Optional[str] = None ):
if not os.path.isdir(__A ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__UpperCamelCase = os.path.join(
__A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
__UpperCamelCase = os.path.join(
__A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['monolingual_vocab_file'] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __A )
elif not os.path.isfile(self.vocab_file ):
with open(__A , 'wb' ) as fi:
__UpperCamelCase = self.sp_model.serialized_model_proto()
fi.write(__A )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
__A ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , __A )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(__A , 'w' , encoding='utf-8' ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(f'''{str(__A )} \n''' )
return out_vocab_file, out_monolingual_vocab_file | 716 |
'''simple docstring'''
from __future__ import annotations
from math import gcd
def lowercase__ ( __lowercase : int , __lowercase : int = 2 , __lowercase : int = 1 , __lowercase : int = 3 , ) -> int | None:
"""simple docstring"""
if num < 2:
raise ValueError('The input value cannot be less than 2' )
# Because of the relationship between ``f(f(x))`` and ``f(x)``, this
# algorithm struggles to find factors that are divisible by two.
# As a workaround, we specifically check for two and even inputs.
# See: https://math.stackexchange.com/a/2856214/165820
if num > 2 and num % 2 == 0:
return 2
# Pollard's Rho algorithm requires a function that returns pseudorandom
# values between 0 <= X < ``num``. It doesn't need to be random in the
# sense that the output value is cryptographically secure or difficult
# to calculate, it only needs to be random in the sense that all output
# values should be equally likely to appear.
# For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num``
# However, the success of Pollard's algorithm isn't guaranteed and is
# determined in part by the initial seed and the chosen random function.
# To make retries easier, we will instead use ``f(x) = (x**2 + C) % num``
# where ``C`` is a value that we can modify between each attempt.
def rand_fn(__lowercase : int , __lowercase : int , __lowercase : int ) -> int:
return (pow(__lowercase , 2 ) + step) % modulus
for _ in range(__lowercase ):
# These track the position within the cycle detection logic.
__UpperCamelCase = seed
__UpperCamelCase = seed
while True:
# At each iteration, the tortoise moves one step and the hare moves two.
__UpperCamelCase = rand_fn(__lowercase , __lowercase , __lowercase )
__UpperCamelCase = rand_fn(__lowercase , __lowercase , __lowercase )
__UpperCamelCase = rand_fn(__lowercase , __lowercase , __lowercase )
# At some point both the tortoise and the hare will enter a cycle whose
# length ``p`` is a divisor of ``num``. Once in that cycle, at some point
# the tortoise and hare will end up on the same value modulo ``p``.
# We can detect when this happens because the position difference between
# the tortoise and the hare will share a common divisor with ``num``.
__UpperCamelCase = gcd(hare - tortoise , __lowercase )
if divisor == 1:
# No common divisor yet, just keep searching.
continue
else:
# We found a common divisor!
if divisor == num:
# Unfortunately, the divisor is ``num`` itself and is useless.
break
else:
# The divisor is a nontrivial factor of ``num``!
return divisor
# If we made it here, then this attempt failed.
# We need to pick a new starting seed for the tortoise and hare
# in addition to a new step value for the random function.
# To keep this example implementation deterministic, the
# new values will be generated based on currently available
# values instead of using something like ``random.randint``.
# We can use the hare's position as the new seed.
# This is actually what Richard Brent's the "optimized" variant does.
__UpperCamelCase = hare
# The new step value for the random function can just be incremented.
# At first the results will be similar to what the old function would
# have produced, but the value will quickly diverge after a bit.
step += 1
# We haven't found a divisor within the requested number of attempts.
# We were unlucky or ``num`` itself is actually prime.
return None
if __name__ == "__main__":
import argparse
a__ : Optional[Any] =argparse.ArgumentParser()
parser.add_argument(
'''num''',
type=int,
help='''The value to find a divisor of''',
)
parser.add_argument(
'''--attempts''',
type=int,
default=3,
help='''The number of attempts before giving up''',
)
a__ : List[Any] =parser.parse_args()
a__ : str =pollard_rho(args.num, attempts=args.attempts)
if divisor is None:
print(f'{args.num} is probably prime')
else:
a__ : Any =args.num // divisor
print(f'{args.num} = {divisor} * {quotient}')
| 434 | 0 |
"""simple docstring"""
from timeit import timeit
UpperCAmelCase_ : Any = {
'''MALAYALAM''': True,
'''String''': False,
'''rotor''': True,
'''level''': True,
'''A''': True,
'''BB''': True,
'''ABC''': False,
'''amanaplanacanalpanama''': True, # "a man a plan a canal panama"
}
# Ensure our test data is valid
assert all((key == key[::-1]) is value for key, value in test_data.items())
def _lowerCAmelCase(a : str ) -> bool:
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =len(a ) - 1
while start_i < end_i:
if s[start_i] == s[end_i]:
start_i += 1
end_i -= 1
else:
return False
return True
def _lowerCAmelCase(a : str ) -> bool:
_SCREAMING_SNAKE_CASE =len(a ) // 2
_SCREAMING_SNAKE_CASE =len(a )
# We need to traverse till half of the length of string
# as we can get access of the i'th last element from
# i'th index.
# eg: [0,1,2,3,4,5] => 4th index can be accessed
# with the help of 1st index (i==n-i-1)
# where n is length of string
return all(s[i] == s[n - i - 1] for i in range(a ) )
def _lowerCAmelCase(a : str ) -> bool:
if len(a ) <= 2:
return True
if s[0] == s[len(a ) - 1]:
return is_palindrome_recursive(s[1:-1] )
else:
return False
def _lowerCAmelCase(a : str ) -> bool:
return s == s[::-1]
def _lowerCAmelCase(a : str ) -> None:
_SCREAMING_SNAKE_CASE =f"""all({name}(key) is value for key, value in test_data.items())"""
_SCREAMING_SNAKE_CASE =f"""from __main__ import test_data, {name}"""
_SCREAMING_SNAKE_CASE =50_0000
_SCREAMING_SNAKE_CASE =timeit(stmt=a , setup=a , number=a )
print(f"""{name:<35} finished {number:,} runs in {result:.5f} seconds""" )
if __name__ == "__main__":
for key, value in test_data.items():
assert is_palindrome(key) is is_palindrome_recursive(key)
assert is_palindrome(key) is is_palindrome_slice(key)
print(f"{key:21} {value}")
print('''a man a plan a canal panama''')
# finished 500,000 runs in 0.46793 seconds
benchmark_function('''is_palindrome_slice''')
# finished 500,000 runs in 0.85234 seconds
benchmark_function('''is_palindrome''')
# finished 500,000 runs in 1.32028 seconds
benchmark_function('''is_palindrome_recursive''')
# finished 500,000 runs in 2.08679 seconds
benchmark_function('''is_palindrome_traversal''')
| 255 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase_ : List[str] = {
'''configuration_blip''': [
'''BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BlipConfig''',
'''BlipTextConfig''',
'''BlipVisionConfig''',
],
'''processing_blip''': ['''BlipProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Any = ['''BlipImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : List[Any] = [
'''BLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BlipModel''',
'''BlipPreTrainedModel''',
'''BlipForConditionalGeneration''',
'''BlipForQuestionAnswering''',
'''BlipVisionModel''',
'''BlipTextModel''',
'''BlipForImageTextRetrieval''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Any = [
'''TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFBlipModel''',
'''TFBlipPreTrainedModel''',
'''TFBlipForConditionalGeneration''',
'''TFBlipForQuestionAnswering''',
'''TFBlipVisionModel''',
'''TFBlipTextModel''',
'''TFBlipForImageTextRetrieval''',
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
UpperCAmelCase_ : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 255 | 1 |
'''simple docstring'''
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase : Tuple = logging.get_logger(__name__)
lowercase : List[Any] = {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/config.json',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/config.json',
}
class A ( __snake_case ):
__magic_name__ = '''xlnet'''
__magic_name__ = ['''mems''']
__magic_name__ = {
'''n_token''': '''vocab_size''', # Backward compatibility
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , SCREAMING_SNAKE_CASE=32000 , SCREAMING_SNAKE_CASE=1024 , SCREAMING_SNAKE_CASE=24 , SCREAMING_SNAKE_CASE=16 , SCREAMING_SNAKE_CASE=4096 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE="bi" , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=1e-12 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=512 , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=-1 , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE="last" , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE="tanh" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=5 , SCREAMING_SNAKE_CASE=5 , SCREAMING_SNAKE_CASE=5 , SCREAMING_SNAKE_CASE=1 , SCREAMING_SNAKE_CASE=2 , **SCREAMING_SNAKE_CASE , ) -> Optional[int]:
"""simple docstring"""
A : Optional[int] = vocab_size
A : Optional[int] = d_model
A : Union[str, Any] = n_layer
A : List[Any] = n_head
if d_model % n_head != 0:
raise ValueError(F'\'d_model % n_head\' ({d_model % n_head}) should be equal to 0' )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
F'`d_head` ({kwargs["d_head"]}) should be equal to `d_model // n_head` ({d_model // n_head})' )
A : List[str] = d_model // n_head
A : Any = ff_activation
A : str = d_inner
A : List[Any] = untie_r
A : Union[str, Any] = attn_type
A : str = initializer_range
A : Optional[Any] = layer_norm_eps
A : Optional[int] = dropout
A : Tuple = mem_len
A : Optional[int] = reuse_len
A : str = bi_data
A : Union[str, Any] = clamp_len
A : List[str] = same_length
A : List[Any] = summary_type
A : Optional[int] = summary_use_proj
A : Any = summary_activation
A : Optional[Any] = summary_last_dropout
A : Any = start_n_top
A : Dict = end_n_top
A : Optional[Any] = bos_token_id
A : int = pad_token_id
A : Dict = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
'''The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`'''
''' instead.''' , SCREAMING_SNAKE_CASE , )
A : Tuple = kwargs['''use_cache''']
A : Optional[Any] = use_mems_eval
A : Optional[Any] = use_mems_train
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE , bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
@property
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
logger.info(F'The model {self.model_type} is one of the few models that has no sequence length limit.' )
return -1
@max_position_embeddings.setter
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
raise NotImplementedError(
F'The model {self.model_type} is one of the few models that has no sequence length limit.' )
| 343 |
'''simple docstring'''
from __future__ import annotations
import math
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
if len(snake_case__ ) != 2 or len(a[0] ) != 2 or len(snake_case__ ) != 2 or len(b[0] ) != 2:
raise Exception('''Matrices are not 2x2''' )
A : Optional[int] = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(snake_case__ ) )
]
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(snake_case__ ) )
]
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
if len(snake_case__ ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception('''Odd matrices are not supported!''' )
A : Optional[int] = len(snake_case__ )
A : Union[str, Any] = matrix_length // 2
A : Optional[int] = [[a[i][j] for j in range(snake_case__ , snake_case__ )] for i in range(snake_case__ )]
A : Optional[Any] = [
[a[i][j] for j in range(snake_case__ , snake_case__ )] for i in range(snake_case__ , snake_case__ )
]
A : int = [[a[i][j] for j in range(snake_case__ )] for i in range(snake_case__ )]
A : Dict = [[a[i][j] for j in range(snake_case__ )] for i in range(snake_case__ , snake_case__ )]
return top_left, top_right, bot_left, bot_right
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
return len(snake_case__ ), len(matrix[0] )
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
print('''\n'''.join(str(snake_case__ ) for line in matrix ) )
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
if matrix_dimensions(snake_case__ ) == (2, 2):
return default_matrix_multiplication(snake_case__ , snake_case__ )
A, A, A, A : List[str] = split_matrix(snake_case__ )
A, A, A, A : Dict = split_matrix(snake_case__ )
A : List[Any] = actual_strassen(snake_case__ , matrix_subtraction(snake_case__ , snake_case__ ) )
A : List[str] = actual_strassen(matrix_addition(snake_case__ , snake_case__ ) , snake_case__ )
A : Union[str, Any] = actual_strassen(matrix_addition(snake_case__ , snake_case__ ) , snake_case__ )
A : Optional[int] = actual_strassen(snake_case__ , matrix_subtraction(snake_case__ , snake_case__ ) )
A : Union[str, Any] = actual_strassen(matrix_addition(snake_case__ , snake_case__ ) , matrix_addition(snake_case__ , snake_case__ ) )
A : Optional[int] = actual_strassen(matrix_subtraction(snake_case__ , snake_case__ ) , matrix_addition(snake_case__ , snake_case__ ) )
A : List[Any] = actual_strassen(matrix_subtraction(snake_case__ , snake_case__ ) , matrix_addition(snake_case__ , snake_case__ ) )
A : str = matrix_addition(matrix_subtraction(matrix_addition(snake_case__ , snake_case__ ) , snake_case__ ) , snake_case__ )
A : int = matrix_addition(snake_case__ , snake_case__ )
A : Optional[Any] = matrix_addition(snake_case__ , snake_case__ )
A : Any = matrix_subtraction(matrix_subtraction(matrix_addition(snake_case__ , snake_case__ ) , snake_case__ ) , snake_case__ )
# construct the new matrix from our 4 quadrants
A : Tuple = []
for i in range(len(snake_case__ ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(snake_case__ ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
if matrix_dimensions(snake_case__ )[1] != matrix_dimensions(snake_case__ )[0]:
A : List[str] = (
'''Unable to multiply these matrices, please check the dimensions.\n'''
F'Matrix A: {matrixa}\n'
F'Matrix B: {matrixa}'
)
raise Exception(snake_case__ )
A : Optional[int] = matrix_dimensions(snake_case__ )
A : Dict = matrix_dimensions(snake_case__ )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
A : List[Any] = max(*snake_case__ , *snake_case__ )
A : Union[str, Any] = int(math.pow(2 , math.ceil(math.loga(snake_case__ ) ) ) )
A : Dict = matrixa
A : List[str] = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 , snake_case__ ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , snake_case__ ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] , snake_case__ ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
A : Dict = actual_strassen(snake_case__ , snake_case__ )
# Removing the additional zeros
for i in range(0 , snake_case__ ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , snake_case__ ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
lowercase : List[Any] = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
lowercase : Dict = [[0, 2, 1, 1], [16, 2, 3, 3], [2, 2, 7, 7], [13, 11, 22, 4]]
print(strassen(matrixa, matrixa))
| 343 | 1 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
HubertConfig,
HubertForCTC,
HubertModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ : Any = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : str = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
def _a ( lowercase__ : Tuple , lowercase__ : Union[str, Any] , lowercase__ : Optional[int] , lowercase__ : List[str] , lowercase__ : List[Any] ):
'''simple docstring'''
for attribute in key.split('.' ):
SCREAMING_SNAKE_CASE__ : Dict = getattr(lowercase__ , lowercase__ )
if weight_type is not None:
SCREAMING_SNAKE_CASE__ : List[str] = getattr(lowercase__ , lowercase__ ).shape
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = hf_pointer.shape
assert hf_shape == value.shape, (
f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
SCREAMING_SNAKE_CASE__ : str = value
elif weight_type == "weight_g":
SCREAMING_SNAKE_CASE__ : int = value
elif weight_type == "weight_v":
SCREAMING_SNAKE_CASE__ : Optional[int] = value
elif weight_type == "bias":
SCREAMING_SNAKE_CASE__ : List[str] = value
else:
SCREAMING_SNAKE_CASE__ : Tuple = value
logger.info(f'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def _a ( lowercase__ : Any , lowercase__ : str , lowercase__ : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = []
SCREAMING_SNAKE_CASE__ : Dict = fairseq_model.state_dict()
SCREAMING_SNAKE_CASE__ : int = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
SCREAMING_SNAKE_CASE__ : Union[str, Any] = False
if "conv_layers" in name:
load_conv_layer(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , hf_model.config.feat_extract_norm == 'group' , )
SCREAMING_SNAKE_CASE__ : Optional[Any] = True
else:
for key, mapped_key in MAPPING.items():
SCREAMING_SNAKE_CASE__ : int = 'hubert.' + mapped_key if (is_finetuned and mapped_key != 'lm_head') else mapped_key
if key in name or (key.split('w2v_model.' )[-1] == name.split('.' )[0] and not is_finetuned):
SCREAMING_SNAKE_CASE__ : int = True
if "*" in mapped_key:
SCREAMING_SNAKE_CASE__ : Dict = name.split(lowercase__ )[0].split('.' )[-2]
SCREAMING_SNAKE_CASE__ : Optional[int] = mapped_key.replace('*' , lowercase__ )
if "weight_g" in name:
SCREAMING_SNAKE_CASE__ : List[str] = 'weight_g'
elif "weight_v" in name:
SCREAMING_SNAKE_CASE__ : int = 'weight_v'
elif "weight" in name:
SCREAMING_SNAKE_CASE__ : int = 'weight'
elif "bias" in name:
SCREAMING_SNAKE_CASE__ : int = 'bias'
else:
SCREAMING_SNAKE_CASE__ : Any = None
set_recursively(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
continue
if not is_used:
unused_weights.append(lowercase__ )
logger.warning(f'''Unused weights: {unused_weights}''' )
def _a ( lowercase__ : int , lowercase__ : Optional[Any] , lowercase__ : Union[str, Any] , lowercase__ : Union[str, Any] , lowercase__ : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = full_name.split('conv_layers.' )[-1]
SCREAMING_SNAKE_CASE__ : List[Any] = name.split('.' )
SCREAMING_SNAKE_CASE__ : List[str] = int(items[0] )
SCREAMING_SNAKE_CASE__ : int = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
SCREAMING_SNAKE_CASE__ : List[Any] = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
SCREAMING_SNAKE_CASE__ : Optional[int] = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
SCREAMING_SNAKE_CASE__ : str = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(lowercase__ )
@torch.no_grad()
def _a ( lowercase__ : Optional[Any] , lowercase__ : Union[str, Any] , lowercase__ : List[Any]=None , lowercase__ : Dict=None , lowercase__ : Dict=True ):
'''simple docstring'''
if config_path is not None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = HubertConfig.from_pretrained(lowercase__ )
else:
SCREAMING_SNAKE_CASE__ : List[Any] = HubertConfig()
if is_finetuned:
if dict_path:
SCREAMING_SNAKE_CASE__ : int = Dictionary.load(lowercase__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
SCREAMING_SNAKE_CASE__ : List[str] = target_dict.pad_index
SCREAMING_SNAKE_CASE__ : str = target_dict.bos_index
SCREAMING_SNAKE_CASE__ : int = target_dict.eos_index
SCREAMING_SNAKE_CASE__ : Optional[Any] = len(target_dict.symbols )
SCREAMING_SNAKE_CASE__ : Optional[Any] = os.path.join(lowercase__ , 'vocab.json' )
if not os.path.isdir(lowercase__ ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(lowercase__ ) )
return
os.makedirs(lowercase__ , exist_ok=lowercase__ )
with open(lowercase__ , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(target_dict.indices , lowercase__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = WavaVecaCTCTokenizer(
lowercase__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=lowercase__ , )
SCREAMING_SNAKE_CASE__ : int = True if config.feat_extract_norm == 'layer' else False
SCREAMING_SNAKE_CASE__ : Any = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=lowercase__ , return_attention_mask=lowercase__ , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = WavaVecaProcessor(feature_extractor=lowercase__ , tokenizer=lowercase__ )
processor.save_pretrained(lowercase__ )
SCREAMING_SNAKE_CASE__ : List[Any] = HubertForCTC(lowercase__ )
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = HubertModel(lowercase__ )
if is_finetuned:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
SCREAMING_SNAKE_CASE__ : Tuple = model[0].eval()
recursively_load_weights(lowercase__ , lowercase__ , lowercase__ )
hf_wavavec.save_pretrained(lowercase__ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : int = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
SCREAMING_SNAKE_CASE__ : Optional[Any] = parser.parse_args()
convert_hubert_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 85 |
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
__UpperCamelCase : str = False
class __SCREAMING_SNAKE_CASE( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class __SCREAMING_SNAKE_CASE( unittest.TestCase ):
def lowerCAmelCase_ ( self: str ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase_ ( self: Any ) -> Dict:
snake_case__ = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
snake_case__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
snake_case__ = torch.manual_seed(0 )
snake_case__ = pipe.dual_guided(
prompt='first prompt' , image=UpperCamelCase , text_to_image_strength=0.75 , generator=UpperCamelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(UpperCamelCase )
snake_case__ = VersatileDiffusionPipeline.from_pretrained(UpperCamelCase , torch_dtype=torch.floataa )
pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
snake_case__ = generator.manual_seed(0 )
snake_case__ = pipe.dual_guided(
prompt='first prompt' , image=UpperCamelCase , text_to_image_strength=0.75 , generator=UpperCamelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def lowerCAmelCase_ ( self: List[str] ) -> str:
snake_case__ = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
snake_case__ = 'cyberpunk 2077'
snake_case__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
snake_case__ = torch.manual_seed(0 )
snake_case__ = pipe.dual_guided(
prompt=UpperCamelCase , image=UpperCamelCase , text_to_image_strength=0.75 , generator=UpperCamelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' , ).images
snake_case__ = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
snake_case__ = np.array([0.1_448, 0.1_619, 0.1_741, 0.1_086, 0.1_147, 0.1_128, 0.1_199, 0.1_165, 0.1_001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
snake_case__ = 'A painting of a squirrel eating a burger '
snake_case__ = torch.manual_seed(0 )
snake_case__ = pipe.text_to_image(
prompt=UpperCamelCase , generator=UpperCamelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' ).images
snake_case__ = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
snake_case__ = np.array([0.3_367, 0.3_169, 0.2_656, 0.3_870, 0.4_790, 0.3_796, 0.4_009, 0.4_878, 0.4_778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
snake_case__ = pipe.image_variation(UpperCamelCase , generator=UpperCamelCase , output_type='numpy' ).images
snake_case__ = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
snake_case__ = np.array([0.3_076, 0.3_123, 0.3_284, 0.3_782, 0.3_770, 0.3_894, 0.4_297, 0.4_331, 0.4_456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
| 328 | 0 |
'''simple docstring'''
def _snake_case ( A = 1 , A = 1000 ) -> int:
lowerCAmelCase__ = 1
lowerCAmelCase__ = 0
for divide_by_number in range(A , digit + 1 ):
lowerCAmelCase__ = []
lowerCAmelCase__ = numerator
for _ in range(1 , digit + 1 ):
if now_divide in has_been_divided:
if longest_list_length < len(A ):
lowerCAmelCase__ = len(A )
lowerCAmelCase__ = divide_by_number
else:
has_been_divided.append(A )
lowerCAmelCase__ = now_divide * 10 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod() | 98 |
'''simple docstring'''
def _snake_case ( A ) -> bool:
if not isinstance(A , A ):
raise ValueError('''Input series is not valid, valid series - [2, 4, 6]''' )
if len(A ) == 0:
raise ValueError('''Input list must be a non empty list''' )
if len(A ) == 1:
return True
lowerCAmelCase__ = series[1] - series[0]
for index in range(len(A ) - 1 ):
if series[index + 1] - series[index] != common_diff:
return False
return True
def _snake_case ( A ) -> float:
if not isinstance(A , A ):
raise ValueError('''Input series is not valid, valid series - [2, 4, 6]''' )
if len(A ) == 0:
raise ValueError('''Input list must be a non empty list''' )
lowerCAmelCase__ = 0
for val in series:
answer += val
return answer / len(A )
if __name__ == "__main__":
import doctest
doctest.testmod() | 98 | 1 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
SCREAMING_SNAKE_CASE__ : Tuple = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
'EleutherAI/gpt-neo-1.3B': 'https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json',
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class lowerCAmelCase__ ( _UpperCAmelCase ):
a__ : Dict = """gpt_neo"""
a__ : Optional[Any] = ["""past_key_values"""]
a__ : Optional[int] = {"""num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""}
def __init__( self : str , SCREAMING_SNAKE_CASE__ : int=5_02_57 , SCREAMING_SNAKE_CASE__ : List[Any]=20_48 , SCREAMING_SNAKE_CASE__ : Optional[int]=20_48 , SCREAMING_SNAKE_CASE__ : Optional[Any]=24 , SCREAMING_SNAKE_CASE__ : Optional[Any]=[[["global", "local"], 12]] , SCREAMING_SNAKE_CASE__ : Optional[int]=16 , SCREAMING_SNAKE_CASE__ : str=None , SCREAMING_SNAKE_CASE__ : Optional[int]=2_56 , SCREAMING_SNAKE_CASE__ : Optional[Any]="gelu_new" , SCREAMING_SNAKE_CASE__ : Any=0.0 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.0 , SCREAMING_SNAKE_CASE__ : List[str]=0.0 , SCREAMING_SNAKE_CASE__ : List[str]=0.1 , SCREAMING_SNAKE_CASE__ : int=1e-5 , SCREAMING_SNAKE_CASE__ : Any=0.02 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : Optional[int]=5_02_56 , SCREAMING_SNAKE_CASE__ : List[str]=5_02_56 , **SCREAMING_SNAKE_CASE__ : Tuple , ) -> Tuple:
__lowerCamelCase = vocab_size
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = hidden_size
__lowerCamelCase = num_layers
__lowerCamelCase = num_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = window_size
__lowerCamelCase = activation_function
__lowerCamelCase = resid_dropout
__lowerCamelCase = embed_dropout
__lowerCamelCase = attention_dropout
__lowerCamelCase = classifier_dropout
__lowerCamelCase = layer_norm_epsilon
__lowerCamelCase = initializer_range
__lowerCamelCase = use_cache
__lowerCamelCase = bos_token_id
__lowerCamelCase = eos_token_id
__lowerCamelCase = attention_types
__lowerCamelCase = self.expand_attention_types_params(SCREAMING_SNAKE_CASE__ )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
'''Configuration for convolutional module is incorrect. '''
'''It is required that `len(config.attention_layers)` == `config.num_layers` '''
f'''but is `len(config.attention_layers) = {len(self.attention_layers )}`, '''
f'''`config.num_layers = {self.num_layers}`. '''
'''`config.attention_layers` is prepared using `config.attention_types`. '''
'''Please verify the value of `config.attention_types` argument.''' )
super().__init__(bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
@staticmethod
def __A ( SCREAMING_SNAKE_CASE__ : List[Any] ) -> Optional[int]:
__lowerCamelCase = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def __magic_name__ ( __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Dict , __lowerCAmelCase : Tuple ) -> Optional[Any]:
import torch
__lowerCamelCase = input.size()
__lowerCamelCase = len(_A )
__lowerCamelCase = shape[dimension]
__lowerCamelCase = torch.arange(0 , _A , _A )
__lowerCamelCase = torch.div(sizedim - size , _A , rounding_mode='''floor''' ) + 1
__lowerCamelCase = torch.arange(_A ) + low_indices[:min_length][:, None]
__lowerCamelCase = [slice(_A )] * rank
__lowerCamelCase = indices
__lowerCamelCase = input[s]
__lowerCamelCase = list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(_A )
def __magic_name__ ( __lowerCAmelCase : List[str] , __lowerCAmelCase : int ) -> List[Any]:
import torch
__lowerCamelCase = torch.arange(1 , _A )
__lowerCamelCase = torch.remainder(_A , _A )
__lowerCamelCase = remainders == 0
__lowerCamelCase = candidates[divisor_indices]
__lowerCamelCase = torch.max(_A )
return largest_divisor, torch.div(_A , _A , rounding_mode='''floor''' )
class lowerCAmelCase__ ( _UpperCAmelCase ):
@property
def __A ( self : List[Any] ) -> int:
__lowerCamelCase = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
self.fill_with_past_key_values_(SCREAMING_SNAKE_CASE__ , direction='''inputs''' )
__lowerCamelCase = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
__lowerCamelCase = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def __A ( self : Dict ) -> Optional[Any]:
return self._config.num_heads
def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Tuple = -1 , SCREAMING_SNAKE_CASE__ : Optional[Any] = -1 , SCREAMING_SNAKE_CASE__ : List[Any] = False , SCREAMING_SNAKE_CASE__ : Tuple = None , ) -> int:
__lowerCamelCase = super(SCREAMING_SNAKE_CASE__ , self ).generate_dummy_inputs(
SCREAMING_SNAKE_CASE__ , batch_size=SCREAMING_SNAKE_CASE__ , seq_length=SCREAMING_SNAKE_CASE__ , is_pair=SCREAMING_SNAKE_CASE__ , framework=SCREAMING_SNAKE_CASE__ )
# We need to order the input in the way they appears in the forward()
__lowerCamelCase = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
__lowerCamelCase , __lowerCamelCase = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
__lowerCamelCase = seqlen + 2
__lowerCamelCase = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__lowerCamelCase = [
(torch.zeros(SCREAMING_SNAKE_CASE__ ), torch.zeros(SCREAMING_SNAKE_CASE__ )) for _ in range(self.num_layers )
]
__lowerCamelCase = common_inputs['''attention_mask''']
if self.use_past:
__lowerCamelCase = ordered_inputs['''attention_mask'''].dtype
__lowerCamelCase = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , dtype=SCREAMING_SNAKE_CASE__ )] , dim=1 )
return ordered_inputs
@property
def __A ( self : str ) -> List[str]:
return 13
| 298 | import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
A_: Union[str, Any] = 5_0000
A_: str = 5000
A_ , A_: int = os.path.split(__file__)
A_: str = os.path.join(RESULTS_BASEPATH, 'results', RESULTS_FILENAME.replace('.py', '.json'))
@get_duration
def __lowerCAmelCase ( _A ,_A ):
"""simple docstring"""
for i in range(_A ):
_lowercase = dataset[i]
@get_duration
def __lowerCAmelCase ( _A ,_A ,_A ):
"""simple docstring"""
for i in range(0 ,len(_A ) ,_A ):
_lowercase = dataset[i : i + batch_size]
@get_duration
def __lowerCAmelCase ( _A ,_A ,_A ):
"""simple docstring"""
with dataset.formatted_as(type=_A ):
for i in range(_A ):
_lowercase = dataset[i]
@get_duration
def __lowerCAmelCase ( _A ,_A ,_A ,_A ):
"""simple docstring"""
with dataset.formatted_as(type=_A ):
for i in range(0 ,_A ,_A ):
_lowercase = dataset[i : i + batch_size]
def __lowerCAmelCase ( ):
"""simple docstring"""
_lowercase = {"""num examples""": SPEED_TEST_N_EXAMPLES}
_lowercase = [
(read, {"""length""": SMALL_TEST}),
(read, {"""length""": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 10}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 100}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 1_000}),
(read_formatted, {"""type""": """numpy""", """length""": SMALL_TEST}),
(read_formatted, {"""type""": """pandas""", """length""": SMALL_TEST}),
(read_formatted, {"""type""": """torch""", """length""": SMALL_TEST}),
(read_formatted, {"""type""": """tensorflow""", """length""": SMALL_TEST}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 10}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 1_000}),
]
_lowercase = [
(read, {"""length""": SMALL_TEST}),
(read, {"""length""": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 10}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 100}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 1_000}),
(read_formatted, {"""type""": """numpy""", """length""": SMALL_TEST}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 10}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 1_000}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print("""generating dataset""" )
_lowercase = datasets.Features(
{"""list""": datasets.Sequence(datasets.Value("""float32""" ) ), """numbers""": datasets.Value("""float32""" )} )
_lowercase = generate_example_dataset(
os.path.join(_A ,"""dataset.arrow""" ) ,_A ,num_examples=_A ,seq_shapes={"""list""": (100,)} ,)
print("""first set of iterations""" )
for func, kwargs in functions:
print(func.__name__ ,str(_A ) )
_lowercase = func(_A ,**_A )
print("""shuffling dataset""" )
_lowercase = dataset.shuffle()
print("""Second set of iterations (after shuffling""" )
for func, kwargs in functions_shuffled:
print("""shuffled """ ,func.__name__ ,str(_A ) )
_lowercase = func(
_A ,**_A )
with open(_A ,"""wb""" ) as f:
f.write(json.dumps(_A ).encode("""utf-8""" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 398 | 0 |
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def lowerCamelCase ( ) -> Tuple:
raise RuntimeError('CUDA out of memory.' )
class lowerCAmelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : int ) -> Union[str, Any]:
super().__init__()
_lowerCamelCase = nn.Linear(3 , 4 )
_lowerCamelCase = nn.BatchNormad(4 )
_lowerCamelCase = nn.Linear(4 , 5 )
def _snake_case ( self : List[Any] , snake_case__ : List[str] ) -> Optional[Any]:
return self.lineara(self.batchnorm(self.lineara(_a ) ) )
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self : Optional[Any] ) -> Any:
_lowerCamelCase = []
@find_executable_batch_size(starting_batch_size=1_2_8 )
def mock_training_loop_function(snake_case__ : List[str] ):
nonlocal batch_sizes
batch_sizes.append(_a )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(_a , [1_2_8, 6_4, 3_2, 1_6, 8] )
def _snake_case ( self : Any ) -> Union[str, Any]:
_lowerCamelCase = []
@find_executable_batch_size(starting_batch_size=1_2_8 )
def mock_training_loop_function(snake_case__ : int , snake_case__ : List[Any] ):
nonlocal batch_sizes
batch_sizes.append(_a )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
_lowerCamelCase , _lowerCamelCase = mock_training_loop_function('hello' )
self.assertListEqual(_a , [1_2_8, 6_4, 3_2, 1_6, 8] )
self.assertListEqual([bs, arga] , [8, 'hello'] )
def _snake_case ( self : List[str] ) -> int:
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(snake_case__ : Optional[Any] ):
pass
with self.assertRaises(_a ) as cm:
mock_training_loop_function()
self.assertIn('No executable batch size found, reached zero.' , cm.exception.args[0] )
def _snake_case ( self : List[str] ) -> List[Any]:
@find_executable_batch_size(starting_batch_size=1_6 )
def mock_training_loop_function(snake_case__ : str ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(_a ) as cm:
mock_training_loop_function()
self.assertIn('No executable batch size found, reached zero.' , cm.exception.args[0] )
def _snake_case ( self : Optional[int] ) -> int:
@find_executable_batch_size(starting_batch_size=1_2_8 )
def mock_training_loop_function(snake_case__ : Any , snake_case__ : Union[str, Any] , snake_case__ : List[Any] ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(_a ) as cm:
mock_training_loop_function(1_2_8 , 'hello' , 'world' )
self.assertIn('Batch size was passed into `f`' , cm.exception.args[0] )
self.assertIn('`f(arg1=\'hello\', arg2=\'world\')' , cm.exception.args[0] )
def _snake_case ( self : List[str] ) -> Dict:
@find_executable_batch_size(starting_batch_size=1_6 )
def mock_training_loop_function(snake_case__ : Dict ):
raise ValueError('Oops, we had an error!' )
with self.assertRaises(_a ) as cm:
mock_training_loop_function()
self.assertIn('Oops, we had an error!' , cm.exception.args[0] )
@require_cuda
def _snake_case ( self : Optional[int] ) -> Union[str, Any]:
_lowerCamelCase = torch.cuda.memory_allocated()
_lowerCamelCase = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , _a )
_lowerCamelCase = release_memory(_a )
self.assertEqual(torch.cuda.memory_allocated() , _a ) | 721 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
A = {
'configuration_layoutlmv2': ['LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LayoutLMv2Config'],
'processing_layoutlmv2': ['LayoutLMv2Processor'],
'tokenization_layoutlmv2': ['LayoutLMv2Tokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = ['LayoutLMv2TokenizerFast']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = ['LayoutLMv2FeatureExtractor']
A = ['LayoutLMv2ImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST',
'LayoutLMv2ForQuestionAnswering',
'LayoutLMv2ForSequenceClassification',
'LayoutLMv2ForTokenClassification',
'LayoutLMv2Layer',
'LayoutLMv2Model',
'LayoutLMv2PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 234 | 0 |
import argparse
import os
import shutil
from pathlib import Path
import onnx
import torch
from packaging import version
from torch.onnx import export
from diffusers import OnnxRuntimeModel, OnnxStableDiffusionPipeline, StableDiffusionPipeline
UpperCamelCase = version.parse(version.parse(torch.__version__).base_version) < version.parse('1.11')
def _A ( lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : tuple , lowerCAmelCase_ : Path , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Union[str, Any]=False , ):
"""simple docstring"""
output_path.parent.mkdir(parents=lowerCAmelCase_ , exist_ok=lowerCAmelCase_ )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
lowerCAmelCase_ , lowerCAmelCase_ , f=output_path.as_posix() , input_names=lowerCAmelCase_ , output_names=lowerCAmelCase_ , dynamic_axes=lowerCAmelCase_ , do_constant_folding=lowerCAmelCase_ , use_external_data_format=lowerCAmelCase_ , enable_onnx_checker=lowerCAmelCase_ , opset_version=lowerCAmelCase_ , )
else:
export(
lowerCAmelCase_ , lowerCAmelCase_ , f=output_path.as_posix() , input_names=lowerCAmelCase_ , output_names=lowerCAmelCase_ , dynamic_axes=lowerCAmelCase_ , do_constant_folding=lowerCAmelCase_ , opset_version=lowerCAmelCase_ , )
@torch.no_grad()
def _A ( lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : int , lowerCAmelCase_ : bool = False ):
"""simple docstring"""
lowerCAmelCase__ = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
lowerCAmelCase__ = "cuda"
elif fpaa and not torch.cuda.is_available():
raise ValueError("`float16` model export is only supported on GPUs with CUDA" )
else:
lowerCAmelCase__ = "cpu"
lowerCAmelCase__ = StableDiffusionPipeline.from_pretrained(lowerCAmelCase_ , torch_dtype=lowerCAmelCase_ ).to(lowerCAmelCase_ )
lowerCAmelCase__ = Path(lowerCAmelCase_ )
# TEXT ENCODER
lowerCAmelCase__ = pipeline.text_encoder.config.max_position_embeddings
lowerCAmelCase__ = pipeline.text_encoder.config.hidden_size
lowerCAmelCase__ = pipeline.tokenizer(
"A sample prompt" , padding="max_length" , max_length=pipeline.tokenizer.model_max_length , truncation=lowerCAmelCase_ , return_tensors="pt" , )
onnx_export(
pipeline.text_encoder , model_args=(text_input.input_ids.to(device=lowerCAmelCase_ , dtype=torch.intaa )) , output_path=output_path / "text_encoder" / "model.onnx" , ordered_input_names=["input_ids"] , output_names=["last_hidden_state", "pooler_output"] , dynamic_axes={
"input_ids": {0: "batch", 1: "sequence"},
} , opset=lowerCAmelCase_ , )
del pipeline.text_encoder
# UNET
lowerCAmelCase__ = pipeline.unet.config.in_channels
lowerCAmelCase__ = pipeline.unet.config.sample_size
lowerCAmelCase__ = output_path / "unet" / "model.onnx"
onnx_export(
pipeline.unet , model_args=(
torch.randn(2 , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ).to(device=lowerCAmelCase_ , dtype=lowerCAmelCase_ ),
torch.randn(2 ).to(device=lowerCAmelCase_ , dtype=lowerCAmelCase_ ),
torch.randn(2 , lowerCAmelCase_ , lowerCAmelCase_ ).to(device=lowerCAmelCase_ , dtype=lowerCAmelCase_ ),
False,
) , output_path=lowerCAmelCase_ , ordered_input_names=["sample", "timestep", "encoder_hidden_states", "return_dict"] , output_names=["out_sample"] , dynamic_axes={
"sample": {0: "batch", 1: "channels", 2: "height", 3: "width"},
"timestep": {0: "batch"},
"encoder_hidden_states": {0: "batch", 1: "sequence"},
} , opset=lowerCAmelCase_ , use_external_data_format=lowerCAmelCase_ , )
lowerCAmelCase__ = str(unet_path.absolute().as_posix() )
lowerCAmelCase__ = os.path.dirname(lowerCAmelCase_ )
lowerCAmelCase__ = onnx.load(lowerCAmelCase_ )
# clean up existing tensor files
shutil.rmtree(lowerCAmelCase_ )
os.mkdir(lowerCAmelCase_ )
# collate external tensor files into one
onnx.save_model(
lowerCAmelCase_ , lowerCAmelCase_ , save_as_external_data=lowerCAmelCase_ , all_tensors_to_one_file=lowerCAmelCase_ , location="weights.pb" , convert_attribute=lowerCAmelCase_ , )
del pipeline.unet
# VAE ENCODER
lowerCAmelCase__ = pipeline.vae
lowerCAmelCase__ = vae_encoder.config.in_channels
lowerCAmelCase__ = vae_encoder.config.sample_size
# need to get the raw tensor output (sample) from the encoder
lowerCAmelCase__ = lambda lowerCAmelCase_ , lowerCAmelCase_ : vae_encoder.encode(lowerCAmelCase_ , lowerCAmelCase_ )[0].sample()
onnx_export(
lowerCAmelCase_ , model_args=(
torch.randn(1 , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ).to(device=lowerCAmelCase_ , dtype=lowerCAmelCase_ ),
False,
) , output_path=output_path / "vae_encoder" / "model.onnx" , ordered_input_names=["sample", "return_dict"] , output_names=["latent_sample"] , dynamic_axes={
"sample": {0: "batch", 1: "channels", 2: "height", 3: "width"},
} , opset=lowerCAmelCase_ , )
# VAE DECODER
lowerCAmelCase__ = pipeline.vae
lowerCAmelCase__ = vae_decoder.config.latent_channels
lowerCAmelCase__ = vae_decoder.config.out_channels
# forward only through the decoder part
lowerCAmelCase__ = vae_encoder.decode
onnx_export(
lowerCAmelCase_ , model_args=(
torch.randn(1 , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ).to(device=lowerCAmelCase_ , dtype=lowerCAmelCase_ ),
False,
) , output_path=output_path / "vae_decoder" / "model.onnx" , ordered_input_names=["latent_sample", "return_dict"] , output_names=["sample"] , dynamic_axes={
"latent_sample": {0: "batch", 1: "channels", 2: "height", 3: "width"},
} , opset=lowerCAmelCase_ , )
del pipeline.vae
# SAFETY CHECKER
if pipeline.safety_checker is not None:
lowerCAmelCase__ = pipeline.safety_checker
lowerCAmelCase__ = safety_checker.config.vision_config.num_channels
lowerCAmelCase__ = safety_checker.config.vision_config.image_size
lowerCAmelCase__ = safety_checker.forward_onnx
onnx_export(
pipeline.safety_checker , model_args=(
torch.randn(
1 , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ).to(device=lowerCAmelCase_ , dtype=lowerCAmelCase_ ),
torch.randn(1 , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ).to(device=lowerCAmelCase_ , dtype=lowerCAmelCase_ ),
) , output_path=output_path / "safety_checker" / "model.onnx" , ordered_input_names=["clip_input", "images"] , output_names=["out_images", "has_nsfw_concepts"] , dynamic_axes={
"clip_input": {0: "batch", 1: "channels", 2: "height", 3: "width"},
"images": {0: "batch", 1: "height", 2: "width", 3: "channels"},
} , opset=lowerCAmelCase_ , )
del pipeline.safety_checker
lowerCAmelCase__ = OnnxRuntimeModel.from_pretrained(output_path / "safety_checker" )
lowerCAmelCase__ = pipeline.feature_extractor
else:
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = OnnxStableDiffusionPipeline(
vae_encoder=OnnxRuntimeModel.from_pretrained(output_path / "vae_encoder" ) , vae_decoder=OnnxRuntimeModel.from_pretrained(output_path / "vae_decoder" ) , text_encoder=OnnxRuntimeModel.from_pretrained(output_path / "text_encoder" ) , tokenizer=pipeline.tokenizer , unet=OnnxRuntimeModel.from_pretrained(output_path / "unet" ) , scheduler=pipeline.scheduler , safety_checker=lowerCAmelCase_ , feature_extractor=lowerCAmelCase_ , requires_safety_checker=safety_checker is not None , )
onnx_pipeline.save_pretrained(lowerCAmelCase_ )
print("ONNX pipeline saved to" , lowerCAmelCase_ )
del pipeline
del onnx_pipeline
lowerCAmelCase__ = OnnxStableDiffusionPipeline.from_pretrained(lowerCAmelCase_ , provider="CPUExecutionProvider" )
print("ONNX pipeline is loadable" )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
'--model_path',
type=str,
required=True,
help='Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).',
)
parser.add_argument('--output_path', type=str, required=True, help='Path to the output model.')
parser.add_argument(
'--opset',
default=14,
type=int,
help='The version of the ONNX operator set to use.',
)
parser.add_argument('--fp16', action='store_true', default=False, help='Export the models in `float16` mode')
UpperCamelCase = parser.parse_args()
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
| 61 |
'''simple docstring'''
import warnings
from typing import Dict
import numpy as np
from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
def _lowerCAmelCase ( _lowerCAmelCase )-> Optional[Any]:
return 1.0 / (1.0 + np.exp(-_outputs ))
def _lowerCAmelCase ( _lowerCAmelCase )-> str:
__UpperCAmelCase = np.max(_outputs , axis=-1 , keepdims=_lowerCAmelCase )
__UpperCAmelCase = np.exp(_outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=_lowerCAmelCase )
class UpperCAmelCase ( UpperCAmelCase_ ):
_A : List[str] = """sigmoid"""
_A : Optional[Any] = """softmax"""
_A : Optional[int] = """none"""
@add_end_docstrings(
UpperCAmelCase_ , R"""
return_all_scores (`bool`, *optional*, defaults to `False`):
Whether to return all prediction scores or just the one of the predicted class.
function_to_apply (`str`, *optional*, defaults to `\"default\"`):
The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:
- `\"default\"`: if the model has a single label, will apply the sigmoid function on the output. If the model
has several labels, will apply the softmax function on the output.
- `\"sigmoid\"`: Applies the sigmoid function on the output.
- `\"softmax\"`: Applies the softmax function on the output.
- `\"none\"`: Does not apply any function on the output.
""" , )
class UpperCAmelCase ( UpperCAmelCase_ ):
_A : Dict = False
_A : Optional[int] = ClassificationFunction.NONE
def __init__( self , **__A ):
super().__init__(**__A )
self.check_model_type(
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING )
def __lowerCamelCase ( self , __A=None , __A=None , __A="" , **__A ):
# Using "" as default argument because we're going to use `top_k=None` in user code to declare
# "No top_k"
__UpperCAmelCase = tokenizer_kwargs
__UpperCAmelCase = {}
if hasattr(self.model.config , 'return_all_scores' ) and return_all_scores is None:
__UpperCAmelCase = self.model.config.return_all_scores
if isinstance(__A , __A ) or top_k is None:
__UpperCAmelCase = top_k
__UpperCAmelCase = False
elif return_all_scores is not None:
warnings.warn(
'`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of'
' `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.' , __A , )
if return_all_scores:
__UpperCAmelCase = None
else:
__UpperCAmelCase = 1
if isinstance(__A , __A ):
__UpperCAmelCase = ClassificationFunction[function_to_apply.upper()]
if function_to_apply is not None:
__UpperCAmelCase = function_to_apply
return preprocess_params, {}, postprocess_params
def __call__( self , *__A , **__A ):
__UpperCAmelCase = super().__call__(*__A , **__A )
# TODO try and retrieve it in a nicer way from _sanitize_parameters.
__UpperCAmelCase = 'top_k' not in kwargs
if isinstance(args[0] , __A ) and _legacy:
# This pipeline is odd, and return a list when single item is run
return [result]
else:
return result
def __lowerCamelCase ( self , __A , **__A ):
__UpperCAmelCase = self.framework
if isinstance(__A , __A ):
return self.tokenizer(**__A , return_tensors=__A , **__A )
elif isinstance(__A , __A ) and len(__A ) == 1 and isinstance(inputs[0] , __A ) and len(inputs[0] ) == 2:
# It used to be valid to use a list of list of list for text pairs, keeping this path for BC
return self.tokenizer(
text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=__A , **__A )
elif isinstance(__A , __A ):
# This is likely an invalid usage of the pipeline attempting to pass text pairs.
raise ValueError(
'The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a'
' dictionary `{"text": "My text", "text_pair": "My pair"}` in order to send a text pair.' )
return self.tokenizer(__A , return_tensors=__A , **__A )
def __lowerCamelCase ( self , __A ):
return self.model(**__A )
def __lowerCamelCase ( self , __A , __A=None , __A=1 , __A=True ):
# `_legacy` is used to determine if we're running the naked pipeline and in backward
# compatibility mode, or if running the pipeline with `pipeline(..., top_k=1)` we're running
# the more natural result containing the list.
# Default value before `set_parameters`
if function_to_apply is None:
if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1:
__UpperCAmelCase = ClassificationFunction.SIGMOID
elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1:
__UpperCAmelCase = ClassificationFunction.SOFTMAX
elif hasattr(self.model.config , 'function_to_apply' ) and function_to_apply is None:
__UpperCAmelCase = self.model.config.function_to_apply
else:
__UpperCAmelCase = ClassificationFunction.NONE
__UpperCAmelCase = model_outputs['logits'][0]
__UpperCAmelCase = outputs.numpy()
if function_to_apply == ClassificationFunction.SIGMOID:
__UpperCAmelCase = sigmoid(__A )
elif function_to_apply == ClassificationFunction.SOFTMAX:
__UpperCAmelCase = softmax(__A )
elif function_to_apply == ClassificationFunction.NONE:
__UpperCAmelCase = outputs
else:
raise ValueError(f'Unrecognized `function_to_apply` argument: {function_to_apply}' )
if top_k == 1 and _legacy:
return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()}
__UpperCAmelCase = [
{'label': self.model.config.idalabel[i], 'score': score.item()} for i, score in enumerate(__A )
]
if not _legacy:
dict_scores.sort(key=lambda __A : x["score"] , reverse=__A )
if top_k is not None:
__UpperCAmelCase = dict_scores[:top_k]
return dict_scores
| 126 | 0 |
'''simple docstring'''
from collections import deque
class __snake_case :
def __init__( self, A, A, A ):
"""simple docstring"""
lowerCamelCase : str = process_name # process name
lowerCamelCase : Tuple = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
lowerCamelCase : Any = arrival_time
lowerCamelCase : Dict = burst_time # remaining burst time
lowerCamelCase : int = 0 # total time of the process wait in ready queue
lowerCamelCase : str = 0 # time from arrival time to completion time
class __snake_case :
def __init__( self, A, A, A, A, ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = number_of_queues
# time slice of queues that round robin algorithm applied
lowerCamelCase : Optional[int] = time_slices
# unfinished process is in this ready_queue
lowerCamelCase : List[str] = queue
# current time
lowerCamelCase : List[Any] = current_time
# finished process is in this sequence queue
lowerCamelCase : deque[Process] = deque()
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def UpperCAmelCase_ ( self, A ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = []
for i in range(len(A ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def UpperCAmelCase_ ( self, A ):
"""simple docstring"""
lowerCamelCase : Tuple = []
for i in range(len(A ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def UpperCAmelCase_ ( self, A ):
"""simple docstring"""
lowerCamelCase : str = []
for i in range(len(A ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def UpperCAmelCase_ ( self, A ):
"""simple docstring"""
return [q.burst_time for q in queue]
def UpperCAmelCase_ ( self, A ):
"""simple docstring"""
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def UpperCAmelCase_ ( self, A ):
"""simple docstring"""
lowerCamelCase : deque[Process] = deque() # sequence deque of finished process
while len(A ) != 0:
lowerCamelCase : Optional[int] = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(A )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
lowerCamelCase : List[Any] = 0
# set the process's turnaround time because it is finished
lowerCamelCase : List[str] = self.current_time - cp.arrival_time
# set the completion time
lowerCamelCase : Tuple = self.current_time
# add the process to queue that has finished queue
finished.append(A )
self.finish_queue.extend(A ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def UpperCAmelCase_ ( self, A, A ):
"""simple docstring"""
lowerCamelCase : deque[Process] = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(A ) ):
lowerCamelCase : Tuple = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(A )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
lowerCamelCase : List[str] = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(A )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
lowerCamelCase : str = 0
# set the finish time
lowerCamelCase : Tuple = self.current_time
# update the process' turnaround time because it is finished
lowerCamelCase : str = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(A )
self.finish_queue.extend(A ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def UpperCAmelCase_ ( self ):
"""simple docstring"""
for i in range(self.number_of_queues - 1 ):
lowerCamelCase : str = self.round_robin(
self.ready_queue, self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
A = Process('P1', 0, 53)
A = Process('P2', 0, 17)
A = Process('P3', 0, 68)
A = Process('P4', 0, 24)
A = 3
A = [17, 25]
A = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={'queue': deque([Pa, Pa, Pa, Pa])})
A = Process('P1', 0, 53)
A = Process('P2', 0, 17)
A = Process('P3', 0, 68)
A = Process('P4', 0, 24)
A = 3
A = [17, 25]
A = deque([Pa, Pa, Pa, Pa])
A = MLFQ(number_of_queues, time_slices, queue, 0)
A = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
f"""waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print completion times of processes(P1, P2, P3, P4)
print(
f"""completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
f"""turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print sequence of finished processes
print(
f"""sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}"""
)
| 719 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
A = logging.get_logger(__name__)
A = '▁'
A = {'vocab_file': 'spiece.model'}
A = {
'vocab_file': {
'google/reformer-crime-and-punishment': (
'https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model'
)
}
}
A = {
'google/reformer-crime-and-punishment': 52_4288,
}
class __snake_case ( a__):
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = ['''input_ids''', '''attention_mask''']
def __init__( self, A, A="</s>", A="<unk>", A=[], A = None, **A, ):
"""simple docstring"""
lowerCamelCase : str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=A, unk_token=A, additional_special_tokens=A, sp_model_kwargs=self.sp_model_kwargs, **A, )
lowerCamelCase : Any = vocab_file
lowerCamelCase : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(A )
@property
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return self.sp_model.get_piece_size()
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : int = {self.convert_ids_to_tokens(A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = self.__dict__.copy()
lowerCamelCase : List[Any] = None
return state
def __setstate__( self, A ):
"""simple docstring"""
lowerCamelCase : int = d
# for backward compatibility
if not hasattr(self, 'sp_model_kwargs' ):
lowerCamelCase : List[str] = {}
lowerCamelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase_ ( self, A ):
"""simple docstring"""
return self.sp_model.encode(A, out_type=A )
def UpperCAmelCase_ ( self, A ):
"""simple docstring"""
return self.sp_model.piece_to_id(A )
def UpperCAmelCase_ ( self, A ):
"""simple docstring"""
if index < self.sp_model.get_piece_size():
lowerCamelCase : Union[str, Any] = self.sp_model.IdToPiece(A )
return token
def UpperCAmelCase_ ( self, A ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = []
lowerCamelCase : Optional[int] = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(A ) + token
lowerCamelCase : Any = []
else:
current_sub_tokens.append(A )
out_string += self.sp_model.decode(A )
return out_string.strip()
def UpperCAmelCase_ ( self, A, A = None ):
"""simple docstring"""
if not os.path.isdir(A ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCamelCase : str = os.path.join(
A, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file, A )
elif not os.path.isfile(self.vocab_file ):
with open(A, 'wb' ) as fi:
lowerCamelCase : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(A )
return (out_vocab_file,)
| 449 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a__ : List[str] = logging.get_logger(__name__)
a__ : str = {
'''xlm-roberta-base''': '''https://huggingface.co/xlm-roberta-base/resolve/main/config.json''',
'''xlm-roberta-large''': '''https://huggingface.co/xlm-roberta-large/resolve/main/config.json''',
'''xlm-roberta-large-finetuned-conll02-dutch''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json'''
),
'''xlm-roberta-large-finetuned-conll02-spanish''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json'''
),
'''xlm-roberta-large-finetuned-conll03-english''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json'''
),
'''xlm-roberta-large-finetuned-conll03-german''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json'''
),
}
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
snake_case__ : Any = "xlm-roberta"
def __init__( self : int , UpperCAmelCase__ : Union[str, Any]=3_0_5_2_2 , UpperCAmelCase__ : Optional[Any]=7_6_8 , UpperCAmelCase__ : Optional[int]=1_2 , UpperCAmelCase__ : Tuple=1_2 , UpperCAmelCase__ : str=3_0_7_2 , UpperCAmelCase__ : int="gelu" , UpperCAmelCase__ : Dict=0.1 , UpperCAmelCase__ : Any=0.1 , UpperCAmelCase__ : Optional[int]=5_1_2 , UpperCAmelCase__ : Dict=2 , UpperCAmelCase__ : int=0.02 , UpperCAmelCase__ : Optional[int]=1E-12 , UpperCAmelCase__ : Any=1 , UpperCAmelCase__ : Any=0 , UpperCAmelCase__ : str=2 , UpperCAmelCase__ : Any="absolute" , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : List[Any]=None , **UpperCAmelCase__ : int , ) -> Tuple:
super().__init__(pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = type_vocab_size
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = layer_norm_eps
__SCREAMING_SNAKE_CASE = position_embedding_type
__SCREAMING_SNAKE_CASE = use_cache
__SCREAMING_SNAKE_CASE = classifier_dropout
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
@property
def UpperCAmelCase_ ( self : List[str] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
__SCREAMING_SNAKE_CASE = {0: "batch", 1: "choice", 2: "sequence"}
else:
__SCREAMING_SNAKE_CASE = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 682 |
"""simple docstring"""
import os
import pytest
from attr import dataclass
a__ : int = '''us-east-1''' # defaults region
@dataclass
class UpperCamelCase_ :
"""simple docstring"""
snake_case__ : str
snake_case__ : Optional[Any] = "arn:aws:iam::558105141721:role/sagemaker_execution_role"
snake_case__ : Optional[Any] = {
"task_name": "mnli",
"per_device_train_batch_size": 16,
"per_device_eval_batch_size": 16,
"do_train": True,
"do_eval": True,
"do_predict": True,
"output_dir": "/opt/ml/model",
"overwrite_output_dir": True,
"max_steps": 500,
"save_steps": 5500,
}
snake_case__ : Tuple = {**hyperparameters, "max_steps": 1000}
@property
def UpperCAmelCase_ ( self : Any ) -> str:
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def UpperCAmelCase_ ( self : int ) -> str:
return F"""{self.framework}-transfromers-test"""
@property
def UpperCAmelCase_ ( self : List[Any] ) -> str:
return F"""./tests/sagemaker/scripts/{self.framework}"""
@property
def UpperCAmelCase_ ( self : Any ) -> str:
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope="class" )
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = SageMakerTestEnvironment(framework=request.cls.framework )
| 682 | 1 |
from math import sqrt
def __UpperCamelCase ( snake_case ) -> bool:
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(sqrt(__lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __UpperCamelCase ( snake_case = 1_0_0_0_1 ) -> int:
'''simple docstring'''
__A = 0
__A = 1
while count != nth and number < 3:
number += 1
if is_prime(__lowerCAmelCase ):
count += 1
while count != nth:
number += 2
if is_prime(__lowerCAmelCase ):
count += 1
return number
if __name__ == "__main__":
print(F"""{solution() = }""")
| 712 |
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append(""".""")
def __UpperCamelCase ( snake_case ) -> Union[str, Any]:
'''simple docstring'''
__A = test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
'''`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got '''
F"{test_file} instead." )
__A = components[-1]
if not test_fn.endswith('''py''' ):
raise ValueError(F"`test_file` should be a python file. Got {test_fn} instead." )
if not test_fn.startswith('''test_modeling_''' ):
raise ValueError(
F"`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead." )
__A = components[:-1] + [test_fn.replace('''.py''' , '''''' )]
__A = '''.'''.join(snake_case )
return test_module_path
def __UpperCamelCase ( snake_case ) -> Any:
'''simple docstring'''
__A = get_module_path(snake_case )
__A = importlib.import_module(snake_case )
return test_module
def __UpperCamelCase ( snake_case ) -> List[str]:
'''simple docstring'''
__A = []
__A = get_test_module(snake_case )
for attr in dir(snake_case ):
if attr.endswith('''ModelTester''' ):
tester_classes.append(getattr(snake_case , snake_case ) )
# sort with class names
return sorted(snake_case , key=lambda snake_case : x.__name__ )
def __UpperCamelCase ( snake_case ) -> Any:
'''simple docstring'''
__A = []
__A = get_test_module(snake_case )
for attr in dir(snake_case ):
__A = getattr(snake_case , snake_case )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
__A = getattr(snake_case , '''all_model_classes''' , [] )
if len(snake_case ) > 0:
test_classes.append(snake_case )
# sort with class names
return sorted(snake_case , key=lambda snake_case : x.__name__ )
def __UpperCamelCase ( snake_case ) -> str:
'''simple docstring'''
__A = get_test_classes(snake_case )
__A = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(snake_case , key=lambda snake_case : x.__name__ )
def __UpperCamelCase ( snake_case ) -> List[str]:
'''simple docstring'''
__A = test_class()
if hasattr(snake_case , '''setUp''' ):
test.setUp()
__A = None
if hasattr(snake_case , '''model_tester''' ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
__A = test.model_tester.__class__
return model_tester
def __UpperCamelCase ( snake_case , snake_case ) -> Dict:
'''simple docstring'''
__A = get_test_classes(snake_case )
__A = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(snake_case )
# sort with class names
return sorted(snake_case , key=lambda snake_case : x.__name__ )
def __UpperCamelCase ( snake_case , snake_case ) -> Union[str, Any]:
'''simple docstring'''
__A = get_test_classes_for_model(snake_case , snake_case )
__A = []
for test_class in test_classes:
__A = get_model_tester_from_test_class(snake_case )
if tester_class is not None:
tester_classes.append(snake_case )
# sort with class names
return sorted(snake_case , key=lambda snake_case : x.__name__ )
def __UpperCamelCase ( snake_case ) -> Optional[Any]:
'''simple docstring'''
__A = get_test_classes(snake_case )
__A = {test_class: get_model_tester_from_test_class(snake_case ) for test_class in test_classes}
return test_tester_mapping
def __UpperCamelCase ( snake_case ) -> Optional[Any]:
'''simple docstring'''
__A = get_model_classes(snake_case )
__A = {
model_class: get_test_classes_for_model(snake_case , snake_case ) for model_class in model_classes
}
return model_test_mapping
def __UpperCamelCase ( snake_case ) -> Optional[int]:
'''simple docstring'''
__A = get_model_classes(snake_case )
__A = {
model_class: get_tester_classes_for_model(snake_case , snake_case ) for model_class in model_classes
}
return model_to_tester_mapping
def __UpperCamelCase ( snake_case ) -> Tuple:
'''simple docstring'''
if isinstance(snake_case , snake_case ):
return o
elif isinstance(snake_case , snake_case ):
return o.__name__
elif isinstance(snake_case , (list, tuple) ):
return [to_json(snake_case ) for x in o]
elif isinstance(snake_case , snake_case ):
return {to_json(snake_case ): to_json(snake_case ) for k, v in o.items()}
else:
return o
| 341 | 0 |
def __UpperCamelCase ( A , A , A ):
UpperCamelCase__ = len(A )
UpperCamelCase__ = [[0] * n for i in range(A )]
for i in range(A ):
UpperCamelCase__ = y_points[i]
for i in range(2 , A ):
for j in range(A , A ):
UpperCamelCase__ = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 415 | def __UpperCamelCase ( A ):
# bit count represents no. of bits in the gray code
if bit_count < 0:
raise ValueError('''The given input must be positive''' )
# get the generated string sequence
UpperCamelCase__ = gray_code_sequence_string(A )
#
# convert them to integers
for i in range(len(A ) ):
UpperCamelCase__ = int(sequence[i] , 2 )
return sequence
def __UpperCamelCase ( A ):
# The approach is a recursive one
# Base case achieved when either n = 0 or n=1
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
UpperCamelCase__ = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
UpperCamelCase__ = gray_code_sequence_string(bit_count - 1 )
UpperCamelCase__ = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
UpperCamelCase__ = '''0''' + smaller_sequence[i]
sequence.append(A )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
UpperCamelCase__ = '''1''' + smaller_sequence[i]
sequence.append(A )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 415 | 1 |
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
lowerCamelCase : Dict = """<<<<<<< This should probably be modified because it mentions: """
lowerCamelCase : Tuple = """=======
>>>>>>>
"""
lowerCamelCase : List[str] = [
"""TextEncoderConfig""",
"""ByteTextEncoder""",
"""SubwordTextEncoder""",
"""encoder_config""",
"""maybe_build_from_corpus""",
"""manual_dir""",
]
lowerCamelCase : List[str] = [
# (pattern, replacement)
# Order is important here for some replacements
(r"""tfds\.core""", r"""datasets"""),
(r"""tf\.io\.gfile\.GFile""", r"""open"""),
(r"""tf\.([\w\d]+)""", r"""datasets.Value('\1')"""),
(r"""tfds\.features\.Text\(\)""", r"""datasets.Value('string')"""),
(r"""tfds\.features\.Text\(""", r"""datasets.Value('string'),"""),
(r"""features\s*=\s*tfds.features.FeaturesDict\(""", r"""features=datasets.Features("""),
(r"""tfds\.features\.FeaturesDict\(""", r"""dict("""),
(r"""The TensorFlow Datasets Authors""", r"""The TensorFlow Datasets Authors and the HuggingFace Datasets Authors"""),
(r"""tfds\.""", r"""datasets."""),
(r"""dl_manager\.manual_dir""", r"""self.config.data_dir"""),
(r"""self\.builder_config""", r"""self.config"""),
]
def lowercase__( A ):
return ConvertCommand(args.tfds_path , args.datasets_directory )
class snake_case__ ( UpperCamelCase_ ):
@staticmethod
def UpperCAmelCase__ ( _lowerCamelCase : ArgumentParser ):
snake_case__ : Any = parser.add_parser(
'convert' , help='Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.' , )
train_parser.add_argument(
'--tfds_path' , type=_lowerCamelCase , required=_lowerCamelCase , help='Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.' , )
train_parser.add_argument(
'--datasets_directory' , type=_lowerCamelCase , required=_lowerCamelCase , help='Path to the HuggingFace Datasets folder.' )
train_parser.set_defaults(func=_lowerCamelCase )
def __init__( self : Optional[Any] , _lowerCamelCase : str , _lowerCamelCase : str , *_lowerCamelCase : str ):
snake_case__ : int = get_logger('datasets-cli/converting' )
snake_case__ : List[str] = tfds_path
snake_case__ : List[Any] = datasets_directory
def UpperCAmelCase__ ( self : List[Any] ):
if os.path.isdir(self._tfds_path ):
snake_case__ : str = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
snake_case__ : Dict = os.path.dirname(self._tfds_path )
else:
raise ValueError('--tfds_path is neither a directory nor a file. Please check path.' )
snake_case__ : Dict = os.path.abspath(self._datasets_directory )
self._logger.info(F'''Converting datasets from {abs_tfds_path} to {abs_datasets_path}''' )
snake_case__ : List[str] = []
snake_case__ : List[Any] = []
snake_case__ : List[str] = {}
if os.path.isdir(self._tfds_path ):
snake_case__ : List[Any] = os.listdir(_lowerCamelCase )
else:
snake_case__ : Optional[int] = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(F'''Looking at file {f_name}''' )
snake_case__ : List[str] = os.path.join(_lowerCamelCase , _lowerCamelCase )
snake_case__ : Union[str, Any] = os.path.join(_lowerCamelCase , _lowerCamelCase )
if not os.path.isfile(_lowerCamelCase ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info('Skipping file' )
continue
with open(_lowerCamelCase , encoding='utf-8' ) as f:
snake_case__ : str = f.readlines()
snake_case__ : Optional[Any] = []
snake_case__ : int = False
snake_case__ : Dict = False
snake_case__ : int = []
for line in lines:
snake_case__ : Any = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
snake_case__ : str = 'import datasets\n'
elif "import tensorflow" in out_line:
# order is important here
snake_case__ : str = ''
continue
elif "from absl import logging" in out_line:
snake_case__ : Optional[int] = 'from datasets import logging\n'
elif "getLogger" in out_line:
snake_case__ : Any = out_line.replace('getLogger' , 'get_logger' )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
snake_case__ : List[str] = True
snake_case__ : Optional[int] = list(filter(lambda _lowerCamelCase : e in out_line , _lowerCamelCase ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(_lowerCamelCase ) + '\n' )
out_lines.append(_lowerCamelCase )
out_lines.append(_lowerCamelCase )
continue
else:
for pattern, replacement in TO_CONVERT:
snake_case__ : Optional[int] = re.sub(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
snake_case__ : int = re.match(r'from\stensorflow_datasets.*import\s([^\.\r\n]+)' , _lowerCamelCase )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(',' ) )
snake_case__ : Any = 'from . import ' + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(F'''Error converting {out_line.strip()}''' )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
snake_case__ : Dict = True
out_lines.append(_lowerCamelCase )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
snake_case__ : Dict = f_name.replace('.py' , '' )
snake_case__ : Optional[Any] = os.path.join(_lowerCamelCase , _lowerCamelCase )
snake_case__ : Optional[Any] = os.path.join(_lowerCamelCase , _lowerCamelCase )
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
self._logger.info(F'''Adding directory {output_dir}''' )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(_lowerCamelCase )
if needs_manual_update:
with_manual_update.append(_lowerCamelCase )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as f:
f.writelines(_lowerCamelCase )
self._logger.info(F'''Converted in {output_file}''' )
for utils_file in utils_files:
try:
snake_case__ : Tuple = os.path.basename(_lowerCamelCase )
snake_case__ : List[Any] = imports_to_builder_map[f_name.replace('.py' , '' )]
self._logger.info(F'''Moving {dest_folder} to {utils_file}''' )
shutil.copy(_lowerCamelCase , _lowerCamelCase )
except KeyError:
self._logger.error(F'''Cannot find destination folder for {utils_file}. Please copy manually.''' )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
F'''You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.''' )
| 706 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase : Optional[int] = logging.get_logger(__name__)
lowerCamelCase : List[Any] = {
'facebook/xmod-base': 'https://huggingface.co/facebook/xmod-base/resolve/main/config.json',
'facebook/xmod-large-prenorm': 'https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json',
'facebook/xmod-base-13-125k': 'https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json',
'facebook/xmod-base-30-125k': 'https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json',
'facebook/xmod-base-30-195k': 'https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json',
'facebook/xmod-base-60-125k': 'https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json',
'facebook/xmod-base-60-265k': 'https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json',
'facebook/xmod-base-75-125k': 'https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json',
'facebook/xmod-base-75-269k': 'https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json',
}
class snake_case__ ( UpperCamelCase_ ):
_lowerCAmelCase ='xmod'
def __init__( self : Any , _lowerCamelCase : Any=3_0_5_2_2 , _lowerCamelCase : int=7_6_8 , _lowerCamelCase : Union[str, Any]=1_2 , _lowerCamelCase : Any=1_2 , _lowerCamelCase : int=3_0_7_2 , _lowerCamelCase : Any="gelu" , _lowerCamelCase : List[Any]=0.1 , _lowerCamelCase : Optional[Any]=0.1 , _lowerCamelCase : List[Any]=5_1_2 , _lowerCamelCase : List[str]=2 , _lowerCamelCase : Dict=0.02 , _lowerCamelCase : List[str]=1E-12 , _lowerCamelCase : List[Any]=1 , _lowerCamelCase : Tuple=0 , _lowerCamelCase : Union[str, Any]=2 , _lowerCamelCase : str="absolute" , _lowerCamelCase : int=True , _lowerCamelCase : Optional[int]=None , _lowerCamelCase : int=False , _lowerCamelCase : List[str]=2 , _lowerCamelCase : List[str]=False , _lowerCamelCase : List[str]=True , _lowerCamelCase : Tuple=True , _lowerCamelCase : Dict=("en_XX",) , _lowerCamelCase : Tuple=None , **_lowerCamelCase : List[str] , ):
super().__init__(pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , **_lowerCamelCase )
snake_case__ : List[str] = vocab_size
snake_case__ : List[str] = hidden_size
snake_case__ : int = num_hidden_layers
snake_case__ : Optional[int] = num_attention_heads
snake_case__ : Tuple = hidden_act
snake_case__ : Optional[Any] = intermediate_size
snake_case__ : Dict = hidden_dropout_prob
snake_case__ : Any = attention_probs_dropout_prob
snake_case__ : Any = max_position_embeddings
snake_case__ : List[Any] = type_vocab_size
snake_case__ : List[str] = initializer_range
snake_case__ : str = layer_norm_eps
snake_case__ : Optional[int] = position_embedding_type
snake_case__ : Tuple = use_cache
snake_case__ : Dict = classifier_dropout
snake_case__ : Any = pre_norm
snake_case__ : str = adapter_reduction_factor
snake_case__ : Any = adapter_layer_norm
snake_case__ : Optional[int] = adapter_reuse_layer_norm
snake_case__ : List[Any] = ln_before_adapter
snake_case__ : Dict = list(_lowerCamelCase )
snake_case__ : Union[str, Any] = default_language
class snake_case__ ( UpperCamelCase_ ):
@property
def UpperCAmelCase__ ( self : Optional[Any] ):
if self.task == "multiple-choice":
snake_case__ : Any = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
snake_case__ : int = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 303 | 0 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class lowerCamelCase ( metaclass=UpperCamelCase_ ):
'''simple docstring'''
lowerCAmelCase__ = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : Tuple , *UpperCAmelCase__ : Any , **UpperCAmelCase__ : Optional[int] ) ->int:
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowerCAmelCase__ ( cls : Dict , *UpperCAmelCase__ : List[str] , **UpperCAmelCase__ : Optional[int] ) ->Optional[int]:
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowerCAmelCase__ ( cls : int , *UpperCAmelCase__ : Union[str, Any] , **UpperCAmelCase__ : Optional[int] ) ->Union[str, Any]:
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class lowerCamelCase ( metaclass=UpperCamelCase_ ):
'''simple docstring'''
lowerCAmelCase__ = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : List[Any] , *UpperCAmelCase__ : List[str] , **UpperCAmelCase__ : Optional[Any] ) ->List[str]:
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowerCAmelCase__ ( cls : List[str] , *UpperCAmelCase__ : Any , **UpperCAmelCase__ : Optional[int] ) ->Optional[Any]:
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowerCAmelCase__ ( cls : Dict , *UpperCAmelCase__ : Any , **UpperCAmelCase__ : Union[str, Any] ) ->Optional[Any]:
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class lowerCamelCase ( metaclass=UpperCamelCase_ ):
'''simple docstring'''
lowerCAmelCase__ = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : Optional[Any] , *UpperCAmelCase__ : str , **UpperCAmelCase__ : Any ) ->Dict:
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowerCAmelCase__ ( cls : Any , *UpperCAmelCase__ : List[Any] , **UpperCAmelCase__ : int ) ->int:
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowerCAmelCase__ ( cls : str , *UpperCAmelCase__ : List[str] , **UpperCAmelCase__ : List[str] ) ->List[Any]:
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class lowerCamelCase ( metaclass=UpperCamelCase_ ):
'''simple docstring'''
lowerCAmelCase__ = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : int , *UpperCAmelCase__ : List[str] , **UpperCAmelCase__ : Tuple ) ->List[str]:
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowerCAmelCase__ ( cls : Dict , *UpperCAmelCase__ : Union[str, Any] , **UpperCAmelCase__ : List[Any] ) ->Tuple:
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowerCAmelCase__ ( cls : List[str] , *UpperCAmelCase__ : str , **UpperCAmelCase__ : Optional[Any] ) ->Dict:
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class lowerCamelCase ( metaclass=UpperCamelCase_ ):
'''simple docstring'''
lowerCAmelCase__ = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : str , *UpperCAmelCase__ : Optional[int] , **UpperCAmelCase__ : Dict ) ->Tuple:
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowerCAmelCase__ ( cls : Any , *UpperCAmelCase__ : str , **UpperCAmelCase__ : Dict ) ->Tuple:
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowerCAmelCase__ ( cls : Union[str, Any] , *UpperCAmelCase__ : str , **UpperCAmelCase__ : Union[str, Any] ) ->Dict:
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class lowerCamelCase ( metaclass=UpperCamelCase_ ):
'''simple docstring'''
lowerCAmelCase__ = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : List[str] , *UpperCAmelCase__ : int , **UpperCAmelCase__ : List[Any] ) ->int:
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowerCAmelCase__ ( cls : Any , *UpperCAmelCase__ : Dict , **UpperCAmelCase__ : Tuple ) ->List[Any]:
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowerCAmelCase__ ( cls : str , *UpperCAmelCase__ : List[Any] , **UpperCAmelCase__ : Dict ) ->Any:
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
| 390 | import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ : Optional[int] = logging.get_logger(__name__)
def A__ ( snake_case_ : List[Any] ):
SCREAMING_SNAKE_CASE__: str= torch.load(snake_case_ , map_location='''cpu''' )
if "model" in sd.keys():
SCREAMING_SNAKE_CASE__: Any= torch.load(snake_case_ , map_location='''cpu''' )['''model''']
# pop unnecessary weights
SCREAMING_SNAKE_CASE__: List[str]= [
'''decoder.version''',
'''decoder.output_projection.weight''',
]
for key in keys_to_delete:
if key in sd:
sd.pop(snake_case_ )
SCREAMING_SNAKE_CASE__: str= {
'''decoder.project_in_dim.weight''': '''decoder.project_in.weight''',
'''decoder.project_out_dim.weight''': '''decoder.project_out.weight''',
'''decoder.layer_norm.weight''': '''decoder.final_layer_norm.weight''',
'''decoder.layer_norm.bias''': '''decoder.final_layer_norm.bias''',
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
SCREAMING_SNAKE_CASE__: Union[str, Any]= sd.pop(snake_case_ )
SCREAMING_SNAKE_CASE__: int= list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
SCREAMING_SNAKE_CASE__: int= sd[key]
# We split QKV in separate Q,K,V
SCREAMING_SNAKE_CASE__: Optional[Any]= key.replace('''.qkv_proj.''' , '''.q_proj.''' )
SCREAMING_SNAKE_CASE__: Optional[int]= key.replace('''.qkv_proj.''' , '''.k_proj.''' )
SCREAMING_SNAKE_CASE__: List[str]= key.replace('''.qkv_proj.''' , '''.v_proj.''' )
SCREAMING_SNAKE_CASE__: Optional[int]= value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: List[str]= torch.split(snake_case_ , depth // 3 , dim=0 )
SCREAMING_SNAKE_CASE__: List[Any]= q
SCREAMING_SNAKE_CASE__: Any= k
SCREAMING_SNAKE_CASE__: Optional[Any]= v
del sd[key]
return sd
@torch.no_grad()
def A__ ( snake_case_ : Optional[int] , snake_case_ : Optional[int] , snake_case_ : Tuple=None ):
SCREAMING_SNAKE_CASE__: List[str]= load_checkpoint(snake_case_ )
if config is not None:
SCREAMING_SNAKE_CASE__: Any= OPTConfig.from_pretrained(snake_case_ )
else:
SCREAMING_SNAKE_CASE__: Optional[int]= OPTConfig()
SCREAMING_SNAKE_CASE__: Union[str, Any]= OPTModel(snake_case_ ).half().eval()
model.load_state_dict(snake_case_ )
# Check results
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
model.save_pretrained(snake_case_ )
if __name__ == "__main__":
lowercase_ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--fairseq_path',
type=str,
help=(
'path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:'
' https://huggingface.co/models?other=opt_metasq'
),
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--hf_config', default=None, type=str, help='Define HF config.')
lowercase_ : int = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 64 | 0 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class lowerCamelCase__ ( UpperCAmelCase_ , unittest.TestCase ):
lowerCAmelCase = BlenderbotSmallTokenizer
lowerCAmelCase = False
def __a ( self : List[Any] ):
super().setUp()
A = ['__start__', 'adapt', 'act', 'ap@@', 'te', '__end__', '__unk__']
A = dict(zip(_lowercase , range(len(_lowercase ) ) ) )
A = ['#version: 0.2', 'a p', 't e</w>', 'ap t</w>', 'a d', 'ad apt</w>', 'a c', 'ac t</w>', '']
A = {'unk_token': '__unk__', 'bos_token': '__start__', 'eos_token': '__end__'}
A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(_lowercase ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(_lowercase ) )
def __a ( self : Union[str, Any] , **_lowercase : Dict ):
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **_lowercase )
def __a ( self : Union[str, Any] , _lowercase : Union[str, Any] ):
A = 'adapt act apte'
A = 'adapt act apte'
return input_text, output_text
def __a ( self : List[Any] ):
A = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
A = 'adapt act apte'
A = ['adapt', 'act', 'ap@@', 'te']
A = tokenizer.tokenize(_lowercase )
self.assertListEqual(_lowercase , _lowercase )
A = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
A = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowercase ) , _lowercase )
def __a ( self : str ):
A = BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' )
assert tok('sam' ).input_ids == [1_384]
A = 'I am a small frog.'
A = tok([src_text] , padding=_lowercase , truncation=_lowercase )['input_ids']
A = tok.batch_decode(_lowercase , skip_special_tokens=_lowercase , clean_up_tokenization_spaces=_lowercase )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def __a ( self : int ):
A = BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' )
A = 'I am a small frog .'
A = '.'
A = tok(_lowercase )['input_ids']
A = tok(_lowercase )['input_ids']
assert encoded[-1] == encoded_dot[0]
| 91 |
"""simple docstring"""
from datetime import datetime as dt
import os
from github import Github
UpperCamelCase : Union[str, Any] = [
"good first issue",
"good second issue",
"good difficult issue",
"feature request",
"new model",
"wip",
]
def __snake_case ( ) -> int:
"""simple docstring"""
A = Github(os.environ['GITHUB_TOKEN'] )
A = g.get_repo('huggingface/transformers' )
A = repo.get_issues(state='open' )
for issue in open_issues:
A = sorted([comment for comment in issue.get_comments()] , key=lambda UpperCamelCase__ : i.created_at , reverse=UpperCamelCase__ )
A = comments[0] if len(UpperCamelCase__ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state='closed' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
'This issue has been automatically marked as stale because it has not had '
'recent activity. If you think this still needs to be addressed '
'please comment on this thread.\n\nPlease note that issues that do not follow the '
'[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) '
'are likely to be ignored.' )
if __name__ == "__main__":
main()
| 91 | 1 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_lowercase = logging.get_logger(__name__)
_lowercase = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_lowercase = {
'''vocab_file''': {
'''gpt2''': '''https://huggingface.co/gpt2/resolve/main/vocab.json''',
'''gpt2-medium''': '''https://huggingface.co/gpt2-medium/resolve/main/vocab.json''',
'''gpt2-large''': '''https://huggingface.co/gpt2-large/resolve/main/vocab.json''',
'''gpt2-xl''': '''https://huggingface.co/gpt2-xl/resolve/main/vocab.json''',
'''distilgpt2''': '''https://huggingface.co/distilgpt2/resolve/main/vocab.json''',
},
'''merges_file''': {
'''gpt2''': '''https://huggingface.co/gpt2/resolve/main/merges.txt''',
'''gpt2-medium''': '''https://huggingface.co/gpt2-medium/resolve/main/merges.txt''',
'''gpt2-large''': '''https://huggingface.co/gpt2-large/resolve/main/merges.txt''',
'''gpt2-xl''': '''https://huggingface.co/gpt2-xl/resolve/main/merges.txt''',
'''distilgpt2''': '''https://huggingface.co/distilgpt2/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''gpt2''': '''https://huggingface.co/gpt2/resolve/main/tokenizer.json''',
'''gpt2-medium''': '''https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json''',
'''gpt2-large''': '''https://huggingface.co/gpt2-large/resolve/main/tokenizer.json''',
'''gpt2-xl''': '''https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json''',
'''distilgpt2''': '''https://huggingface.co/distilgpt2/resolve/main/tokenizer.json''',
},
}
_lowercase = {
'''gpt2''': 10_24,
'''gpt2-medium''': 10_24,
'''gpt2-large''': 10_24,
'''gpt2-xl''': 10_24,
'''distilgpt2''': 10_24,
}
class __A ( snake_case_ ):
UpperCamelCase :Any = VOCAB_FILES_NAMES
UpperCamelCase :Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase :List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase :Tuple = ["""input_ids""", """attention_mask"""]
UpperCamelCase :str = GPTaTokenizer
def __init__(self , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__="<|endoftext|>" , __magic_name__="<|endoftext|>" , __magic_name__="<|endoftext|>" , __magic_name__=False , **__magic_name__ , ):
super().__init__(
__magic_name__ , __magic_name__ , tokenizer_file=__magic_name__ , unk_token=__magic_name__ , bos_token=__magic_name__ , eos_token=__magic_name__ , add_prefix_space=__magic_name__ , **__magic_name__ , )
lowerCamelCase__ : Optional[Any] = kwargs.pop("""add_bos_token""" , __magic_name__ )
lowerCamelCase__ : str = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , __magic_name__ ) != add_prefix_space:
lowerCamelCase__ : Union[str, Any] = getattr(__magic_name__ , pre_tok_state.pop("""type""" ) )
lowerCamelCase__ : Dict = add_prefix_space
lowerCamelCase__ : List[Any] = pre_tok_class(**__magic_name__ )
lowerCamelCase__ : Optional[int] = add_prefix_space
def _snake_case (self , *__magic_name__ , **__magic_name__ ):
lowerCamelCase__ : List[str] = kwargs.get("""is_split_into_words""" , __magic_name__ )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__magic_name__ , **__magic_name__ )
def _snake_case (self , *__magic_name__ , **__magic_name__ ):
lowerCamelCase__ : str = kwargs.get("""is_split_into_words""" , __magic_name__ )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__magic_name__ , **__magic_name__ )
def _snake_case (self , __magic_name__ , __magic_name__ = None ):
lowerCamelCase__ : Optional[int] = self._tokenizer.model.save(__magic_name__ , name=__magic_name__ )
return tuple(__magic_name__ )
def _snake_case (self , __magic_name__ ):
lowerCamelCase__ : List[Any] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__magic_name__ , add_special_tokens=__magic_name__ ) + [self.eos_token_id] )
if len(__magic_name__ ) > self.model_max_length:
lowerCamelCase__ : Tuple = input_ids[-self.model_max_length :]
return input_ids
| 157 |
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
__a : Dict = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class UpperCAmelCase( snake_case_ ):
"""simple docstring"""
a : bool = field(default=snake_case_ , metadata={"""help""": """Whether to use SortishSampler or not."""} )
a : bool = field(
default=snake_case_ , metadata={"""help""": """Whether to use generate to calculate generative metrics (ROUGE, BLEU)."""} )
a : Optional[int] = field(
default=snake_case_ , metadata={
"""help""": (
"""The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default """
"""to the `max_length` value of the model configuration."""
)
} , )
a : Optional[int] = field(
default=snake_case_ , metadata={
"""help""": (
"""The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default """
"""to the `num_beams` value of the model configuration."""
)
} , )
a : Optional[Union[str, Path, GenerationConfig]] = field(
default=snake_case_ , metadata={
"""help""": """Model id, file path or url pointing to a GenerationConfig json file, to use during prediction."""
} , )
def __a ( self ) -> int:
"""simple docstring"""
lowercase__ : List[str] = super().to_dict()
for k, v in d.items():
if isinstance(lowerCamelCase , lowerCamelCase ):
lowercase__ : Union[str, Any] = v.to_dict()
return d | 397 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {
'''google/bit-50''': '''https://huggingface.co/google/bit-50/resolve/main/config.json''',
}
class snake_case ( __UpperCAmelCase , __UpperCAmelCase ):
lowerCamelCase__ = '''bit'''
lowerCamelCase__ = ['''preactivation''', '''bottleneck''']
lowerCamelCase__ = ['''SAME''', '''VALID''']
def __init__( self :int , _lowerCamelCase :str=3 , _lowerCamelCase :Any=6_4 , _lowerCamelCase :List[str]=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , _lowerCamelCase :List[str]=[3, 4, 6, 3] , _lowerCamelCase :List[str]="preactivation" , _lowerCamelCase :List[str]="relu" , _lowerCamelCase :str=None , _lowerCamelCase :Optional[Any]=3_2 , _lowerCamelCase :Union[str, Any]=0.0 , _lowerCamelCase :Union[str, Any]=False , _lowerCamelCase :str=3_2 , _lowerCamelCase :int=1 , _lowerCamelCase :Optional[Any]=None , _lowerCamelCase :Any=None , **_lowerCamelCase :List[str] , ):
super().__init__(**_lowerCamelCase )
if layer_type not in self.layer_types:
raise ValueError(f'''layer_type={layer_type} is not one of {','.join(self.layer_types )}''' )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
__SCREAMING_SNAKE_CASE : List[Any] = global_padding.upper()
else:
raise ValueError(f'''Padding strategy {global_padding} not supported''' )
__SCREAMING_SNAKE_CASE : List[Any] = num_channels
__SCREAMING_SNAKE_CASE : Tuple = embedding_size
__SCREAMING_SNAKE_CASE : Any = hidden_sizes
__SCREAMING_SNAKE_CASE : str = depths
__SCREAMING_SNAKE_CASE : Tuple = layer_type
__SCREAMING_SNAKE_CASE : Optional[Any] = hidden_act
__SCREAMING_SNAKE_CASE : Optional[Any] = global_padding
__SCREAMING_SNAKE_CASE : Dict = num_groups
__SCREAMING_SNAKE_CASE : Optional[int] = drop_path_rate
__SCREAMING_SNAKE_CASE : Union[str, Any] = embedding_dynamic_padding
__SCREAMING_SNAKE_CASE : str = output_stride
__SCREAMING_SNAKE_CASE : int = width_factor
__SCREAMING_SNAKE_CASE : List[Any] = ['''stem'''] + [f'''stage{idx}''' for idx in range(1 , len(_lowerCamelCase ) + 1 )]
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = get_aligned_output_features_output_indices(
out_features=_lowerCamelCase , out_indices=_lowerCamelCase , stage_names=self.stage_names )
| 401 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class snake_case ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self :Optional[int] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def SCREAMING_SNAKE_CASE_ ( self :Dict ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = 1
__SCREAMING_SNAKE_CASE : str = 3
__SCREAMING_SNAKE_CASE : Optional[int] = (3_2, 3_2)
__SCREAMING_SNAKE_CASE : Any = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(_lowerCamelCase )
return image
@property
def SCREAMING_SNAKE_CASE_ ( self :List[str] ):
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Any = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=3_2 , )
return model
@property
def SCREAMING_SNAKE_CASE_ ( self :Optional[int] ):
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : int = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
return model
@property
def SCREAMING_SNAKE_CASE_ ( self :str ):
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Tuple = RobertaSeriesConfig(
hidden_size=3_2 , project_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_0_0_6 , )
return RobertaSeriesModelWithTransformation(_lowerCamelCase )
@property
def SCREAMING_SNAKE_CASE_ ( self :Optional[Any] ):
def extract(*_lowerCamelCase :List[Any] , **_lowerCamelCase :Optional[Any] ):
class snake_case :
def __init__( self :Any ):
__SCREAMING_SNAKE_CASE : List[str] = torch.ones([0] )
def SCREAMING_SNAKE_CASE_ ( self :str , _lowerCamelCase :Any ):
self.pixel_values.to(_lowerCamelCase )
return self
return Out()
return extract
def SCREAMING_SNAKE_CASE_ ( self :List[str] ):
__SCREAMING_SNAKE_CASE : List[str] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE : str = self.dummy_cond_unet
__SCREAMING_SNAKE_CASE : List[str] = PNDMScheduler(skip_prk_steps=_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Dict = self.dummy_vae
__SCREAMING_SNAKE_CASE : Optional[int] = self.dummy_text_encoder
__SCREAMING_SNAKE_CASE : Dict = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
__SCREAMING_SNAKE_CASE : Tuple = 7_7
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.dummy_image.to(_lowerCamelCase )
__SCREAMING_SNAKE_CASE : int = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
__SCREAMING_SNAKE_CASE : Any = AltDiffusionImgaImgPipeline(
unet=_lowerCamelCase , scheduler=_lowerCamelCase , vae=_lowerCamelCase , text_encoder=_lowerCamelCase , tokenizer=_lowerCamelCase , safety_checker=_lowerCamelCase , feature_extractor=self.dummy_extractor , )
__SCREAMING_SNAKE_CASE : Optional[Any] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Optional[int] = alt_pipe.to(_lowerCamelCase )
alt_pipe.set_progress_bar_config(disable=_lowerCamelCase )
__SCREAMING_SNAKE_CASE : List[Any] = '''A painting of a squirrel eating a burger'''
__SCREAMING_SNAKE_CASE : Optional[int] = torch.Generator(device=_lowerCamelCase ).manual_seed(0 )
__SCREAMING_SNAKE_CASE : Any = alt_pipe(
[prompt] , generator=_lowerCamelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , image=_lowerCamelCase , )
__SCREAMING_SNAKE_CASE : Optional[int] = output.images
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.Generator(device=_lowerCamelCase ).manual_seed(0 )
__SCREAMING_SNAKE_CASE : Any = alt_pipe(
[prompt] , generator=_lowerCamelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , image=_lowerCamelCase , return_dict=_lowerCamelCase , )[0]
__SCREAMING_SNAKE_CASE : Dict = image[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE : str = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
__SCREAMING_SNAKE_CASE : Optional[Any] = np.array([0.4_4_2_7, 0.3_7_3_1, 0.4_2_4_9, 0.4_9_4_1, 0.4_5_4_6, 0.4_1_4_8, 0.4_1_9_3, 0.4_6_6_6, 0.4_4_9_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5e-3
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def SCREAMING_SNAKE_CASE_ ( self :List[Any] ):
__SCREAMING_SNAKE_CASE : str = self.dummy_cond_unet
__SCREAMING_SNAKE_CASE : Optional[Any] = PNDMScheduler(skip_prk_steps=_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Optional[int] = self.dummy_vae
__SCREAMING_SNAKE_CASE : str = self.dummy_text_encoder
__SCREAMING_SNAKE_CASE : Dict = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
__SCREAMING_SNAKE_CASE : str = 7_7
__SCREAMING_SNAKE_CASE : Dict = self.dummy_image.to(_lowerCamelCase )
# put models in fp16
__SCREAMING_SNAKE_CASE : str = unet.half()
__SCREAMING_SNAKE_CASE : List[str] = vae.half()
__SCREAMING_SNAKE_CASE : Optional[int] = bert.half()
# make sure here that pndm scheduler skips prk
__SCREAMING_SNAKE_CASE : Optional[int] = AltDiffusionImgaImgPipeline(
unet=_lowerCamelCase , scheduler=_lowerCamelCase , vae=_lowerCamelCase , text_encoder=_lowerCamelCase , tokenizer=_lowerCamelCase , safety_checker=_lowerCamelCase , feature_extractor=self.dummy_extractor , )
__SCREAMING_SNAKE_CASE : Tuple = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Tuple = alt_pipe.to(_lowerCamelCase )
alt_pipe.set_progress_bar_config(disable=_lowerCamelCase )
__SCREAMING_SNAKE_CASE : str = '''A painting of a squirrel eating a burger'''
__SCREAMING_SNAKE_CASE : Dict = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Union[str, Any] = alt_pipe(
[prompt] , generator=_lowerCamelCase , num_inference_steps=2 , output_type='''np''' , image=_lowerCamelCase , ).images
assert image.shape == (1, 3_2, 3_2, 3)
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def SCREAMING_SNAKE_CASE_ ( self :List[Any] ):
__SCREAMING_SNAKE_CASE : int = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
# resize to resolution that is divisible by 8 but not 16 or 32
__SCREAMING_SNAKE_CASE : Union[str, Any] = init_image.resize((7_6_0, 5_0_4) )
__SCREAMING_SNAKE_CASE : int = '''BAAI/AltDiffusion'''
__SCREAMING_SNAKE_CASE : Union[str, Any] = AltDiffusionImgaImgPipeline.from_pretrained(
_lowerCamelCase , safety_checker=_lowerCamelCase , )
pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
pipe.enable_attention_slicing()
__SCREAMING_SNAKE_CASE : Dict = '''A fantasy landscape, trending on artstation'''
__SCREAMING_SNAKE_CASE : List[Any] = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : int = pipe(
prompt=_lowerCamelCase , image=_lowerCamelCase , strength=0.7_5 , guidance_scale=7.5 , generator=_lowerCamelCase , output_type='''np''' , )
__SCREAMING_SNAKE_CASE : Optional[int] = output.images[0]
__SCREAMING_SNAKE_CASE : Optional[Any] = image[2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert image.shape == (5_0_4, 7_6_0, 3)
__SCREAMING_SNAKE_CASE : Dict = np.array([0.9_3_5_8, 0.9_3_9_7, 0.9_5_9_9, 0.9_9_0_1, 1.0_0_0_0, 1.0_0_0_0, 0.9_8_8_2, 1.0_0_0_0, 1.0_0_0_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class snake_case ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self :Optional[Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE_ ( self :int ):
__SCREAMING_SNAKE_CASE : List[str] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
__SCREAMING_SNAKE_CASE : str = init_image.resize((7_6_8, 5_1_2) )
__SCREAMING_SNAKE_CASE : int = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy''' )
__SCREAMING_SNAKE_CASE : int = '''BAAI/AltDiffusion'''
__SCREAMING_SNAKE_CASE : Optional[Any] = AltDiffusionImgaImgPipeline.from_pretrained(
_lowerCamelCase , safety_checker=_lowerCamelCase , )
pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
pipe.enable_attention_slicing()
__SCREAMING_SNAKE_CASE : Optional[int] = '''A fantasy landscape, trending on artstation'''
__SCREAMING_SNAKE_CASE : int = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : int = pipe(
prompt=_lowerCamelCase , image=_lowerCamelCase , strength=0.7_5 , guidance_scale=7.5 , generator=_lowerCamelCase , output_type='''np''' , )
__SCREAMING_SNAKE_CASE : Union[str, Any] = output.images[0]
assert image.shape == (5_1_2, 7_6_8, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1e-2
| 401 | 1 |
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
_snake_case : Tuple = logging.getLogger(__name__)
@dataclass
class a (_UpperCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : Optional[float] = field(
default=0.0 , metadata={"help": "The label smoothing epsilon to apply (if not zero)."} )
__UpperCAmelCase : bool = field(default=_UpperCAmelCase , metadata={"help": "Whether to SortishSamler or not."} )
__UpperCAmelCase : bool = field(
default=_UpperCAmelCase , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} )
__UpperCAmelCase : bool = field(default=_UpperCAmelCase , metadata={"help": "whether to use adafactor"} )
__UpperCAmelCase : Optional[float] = field(
default=_UpperCAmelCase , metadata={"help": "Encoder layer dropout probability. Goes into model.config."} )
__UpperCAmelCase : Optional[float] = field(
default=_UpperCAmelCase , metadata={"help": "Decoder layer dropout probability. Goes into model.config."} )
__UpperCAmelCase : Optional[float] = field(default=_UpperCAmelCase , metadata={"help": "Dropout probability. Goes into model.config."} )
__UpperCAmelCase : Optional[float] = field(
default=_UpperCAmelCase , metadata={"help": "Attention dropout probability. Goes into model.config."} )
__UpperCAmelCase : Optional[str] = field(
default="linear" , metadata={"help": f"""Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}"""} , )
| 81 |
"""simple docstring"""
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def _UpperCamelCase ( _A , _A=False ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = OmegaConf.load(_A )
if display:
print(yaml.dump(OmegaConf.to_container(_A ) ) )
return config
def _UpperCamelCase ( _A , _A=None , _A=None ) -> List[Any]:
"""simple docstring"""
if conf_path is None:
_UpperCAmelCase = """./model_checkpoints/vqgan_only.yaml"""
_UpperCAmelCase = load_config(_A , display=_A )
_UpperCAmelCase = VQModel(**config.model.params )
if ckpt_path is None:
_UpperCAmelCase = """./model_checkpoints/vqgan_only.pt"""
_UpperCAmelCase = torch.load(_A , map_location=_A )
if ".ckpt" in ckpt_path:
_UpperCAmelCase = sd["""state_dict"""]
model.load_state_dict(_A , strict=_A )
model.to(_A )
del sd
return model
def _UpperCamelCase ( _A , _A ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase = model.encode(_A )
print(F"""VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}""" )
_UpperCAmelCase = model.decode(_A )
return xrec
def _UpperCamelCase ( _A , _A=False ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase ,_UpperCAmelCase = string.rsplit(""".""" , 1 )
if reload:
_UpperCAmelCase = importlib.import_module(_A )
importlib.reload(_A )
return getattr(importlib.import_module(_A , package=_A ) , cls )
def _UpperCamelCase ( _A ) -> str:
"""simple docstring"""
if "target" not in config:
raise KeyError("""Expected key `target` to instantiate.""" )
return get_obj_from_str(config["""target"""] )(**config.get("""params""" , {} ) )
def _UpperCamelCase ( _A , _A , _A=True , _A=True ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = instantiate_from_config(_A )
if sd is not None:
model.load_state_dict(_A )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def _UpperCamelCase ( _A , _A , _A , _A ) -> Optional[Any]:
"""simple docstring"""
if ckpt:
_UpperCAmelCase = torch.load(_A , map_location="""cpu""" )
_UpperCAmelCase = pl_sd["""global_step"""]
print(F"""loaded model from global step {global_step}.""" )
else:
_UpperCAmelCase = {"""state_dict""": None}
_UpperCAmelCase = None
_UpperCAmelCase = load_model_from_config(config.model , pl_sd["""state_dict"""] , gpu=_A , eval_mode=_A )["""model"""]
return model, global_step | 555 | 0 |
'''simple docstring'''
from __future__ import annotations
import time
snake_case__ : Tuple = list[tuple[int, int]]
snake_case__ : Optional[Any] = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
snake_case__ : Any = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class _A :
'''simple docstring'''
def __init__( self : List[Any] , lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : Node | None ):
'''simple docstring'''
__lowercase = pos_x
__lowercase = pos_y
__lowercase = (pos_y, pos_x)
__lowercase = goal_x
__lowercase = goal_y
__lowercase = parent
class _A :
'''simple docstring'''
def __init__( self : int , lowerCamelCase : tuple[int, int] , lowerCamelCase : tuple[int, int] ):
'''simple docstring'''
__lowercase = Node(start[1] , start[0] , goal[1] , goal[0] , lowerCamelCase )
__lowercase = Node(goal[1] , goal[0] , goal[1] , goal[0] , lowerCamelCase )
__lowercase = [self.start]
__lowercase = False
def _snake_case ( self : Optional[int] ):
'''simple docstring'''
while self.node_queue:
__lowercase = self.node_queue.pop(0 )
if current_node.pos == self.target.pos:
__lowercase = True
return self.retrace_path(lowerCamelCase )
__lowercase = self.get_successors(lowerCamelCase )
for node in successors:
self.node_queue.append(lowerCamelCase )
if not self.reached:
return [self.start.pos]
return None
def _snake_case ( self : Dict , lowerCamelCase : Node ):
'''simple docstring'''
__lowercase = []
for action in delta:
__lowercase = parent.pos_x + action[1]
__lowercase = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(lowerCamelCase ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(lowerCamelCase , lowerCamelCase , self.target.pos_y , self.target.pos_x , lowerCamelCase ) )
return successors
def _snake_case ( self : Any , lowerCamelCase : Node | None ):
'''simple docstring'''
__lowercase = node
__lowercase = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
__lowercase = current_node.parent
path.reverse()
return path
class _A :
'''simple docstring'''
def __init__( self : Any , lowerCamelCase : Any , lowerCamelCase : str ):
'''simple docstring'''
__lowercase = BreadthFirstSearch(lowerCamelCase , lowerCamelCase )
__lowercase = BreadthFirstSearch(lowerCamelCase , lowerCamelCase )
__lowercase = False
def _snake_case ( self : Any ):
'''simple docstring'''
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
__lowercase = self.fwd_bfs.node_queue.pop(0 )
__lowercase = self.bwd_bfs.node_queue.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
__lowercase = True
return self.retrace_bidirectional_path(
lowerCamelCase , lowerCamelCase )
__lowercase = current_bwd_node
__lowercase = current_fwd_node
__lowercase = {
self.fwd_bfs: self.fwd_bfs.get_successors(lowerCamelCase ),
self.bwd_bfs: self.bwd_bfs.get_successors(lowerCamelCase ),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(lowerCamelCase )
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def _snake_case ( self : int , lowerCamelCase : Node , lowerCamelCase : Node ):
'''simple docstring'''
__lowercase = self.fwd_bfs.retrace_path(lowerCamelCase )
__lowercase = self.bwd_bfs.retrace_path(lowerCamelCase )
bwd_path.pop()
bwd_path.reverse()
__lowercase = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
snake_case__ : Dict = (0, 0)
snake_case__ : List[Any] = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
snake_case__ : Any = time.time()
snake_case__ : str = BreadthFirstSearch(init, goal)
snake_case__ : int = bfs.search()
snake_case__ : List[str] = time.time() - start_bfs_time
print("""Unidirectional BFS computation time : """, bfs_time)
snake_case__ : List[Any] = time.time()
snake_case__ : Optional[Any] = BidirectionalBreadthFirstSearch(init, goal)
snake_case__ : Union[str, Any] = bd_bfs.search()
snake_case__ : Union[str, Any] = time.time() - start_bd_bfs_time
print("""Bidirectional BFS computation time : """, bd_bfs_time)
| 700 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
snake_case__ : Any = logging.get_logger(__name__)
class _A ( _lowercase , _lowercase ):
'''simple docstring'''
_snake_case : Dict = """maskformer-swin"""
_snake_case : List[str] = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : List[str] , lowerCamelCase : Any=224 , lowerCamelCase : Optional[Any]=4 , lowerCamelCase : Dict=3 , lowerCamelCase : Tuple=96 , lowerCamelCase : str=[2, 2, 6, 2] , lowerCamelCase : Dict=[3, 6, 12, 24] , lowerCamelCase : Optional[Any]=7 , lowerCamelCase : Any=4.0 , lowerCamelCase : Union[str, Any]=True , lowerCamelCase : List[str]=0.0 , lowerCamelCase : Optional[int]=0.0 , lowerCamelCase : List[str]=0.1 , lowerCamelCase : int="gelu" , lowerCamelCase : Optional[int]=False , lowerCamelCase : List[Any]=0.02 , lowerCamelCase : Tuple=1e-5 , lowerCamelCase : Dict=None , lowerCamelCase : Dict=None , **lowerCamelCase : int , ):
'''simple docstring'''
super().__init__(**lowerCamelCase )
__lowercase = image_size
__lowercase = patch_size
__lowercase = num_channels
__lowercase = embed_dim
__lowercase = depths
__lowercase = len(lowerCamelCase )
__lowercase = num_heads
__lowercase = window_size
__lowercase = mlp_ratio
__lowercase = qkv_bias
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = drop_path_rate
__lowercase = hidden_act
__lowercase = use_absolute_embeddings
__lowercase = layer_norm_eps
__lowercase = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__lowercase = int(embed_dim * 2 ** (len(lowerCamelCase ) - 1) )
__lowercase = ["stem"] + [f"""stage{idx}""" for idx in range(1 , len(lowerCamelCase ) + 1 )]
__lowercase , __lowercase = get_aligned_output_features_output_indices(
out_features=lowerCamelCase , out_indices=lowerCamelCase , stage_names=self.stage_names )
| 655 | 0 |
'''simple docstring'''
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
__A : Tuple = 'src/transformers'
__A : int = 'docs/source/en/tasks'
def UpperCAmelCase ( lowerCamelCase_ :Any , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :List[str] ):
'''simple docstring'''
with open(lowercase__ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
snake_case_ : int = f.readlines()
# Find the start prompt.
snake_case_ : Optional[int] = 0
while not lines[start_index].startswith(lowercase__ ):
start_index += 1
start_index += 1
snake_case_ : Optional[int] = start_index
while not lines[end_index].startswith(lowercase__ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
__A : Optional[Any] = direct_transformers_import(TRANSFORMERS_PATH)
__A : str = {
'asr.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
'audio_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
'language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
'image_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
'masked_language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
'multiple_choice.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
'object_detection.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
'question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
'semantic_segmentation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
'sequence_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
'summarization.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'token_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
'translation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'video_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
'document_question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
'monocular_depth_estimation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
__A : Any = {
'summarization.md': ('nllb',),
'translation.md': ('nllb',),
}
def UpperCAmelCase ( lowerCamelCase_ :int ):
'''simple docstring'''
snake_case_ : str = TASK_GUIDE_TO_MODELS[task_guide]
snake_case_ : Any = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(lowercase__ , set() )
snake_case_ : str = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([F'''[{name}](../model_doc/{code})''' for code, name in model_names.items()] ) + "\n"
def UpperCAmelCase ( lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :Any=False ):
'''simple docstring'''
snake_case_ , snake_case_ , snake_case_ , snake_case_ : List[Any] = _find_text_in_file(
filename=os.path.join(lowercase__ , lowercase__ ) , start_prompt="""<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->""" , end_prompt="""<!--End of the generated tip-->""" , )
snake_case_ : Optional[int] = get_model_list_for_task(lowercase__ )
if current_list != new_list:
if overwrite:
with open(os.path.join(lowercase__ , lowercase__ ) , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
F'''The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`'''
""" to fix this.""" )
if __name__ == "__main__":
__A : Dict = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
__A : List[str] = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite) | 334 |
'''simple docstring'''
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class A ( unittest.TestCase ):
def A__ ( self ) -> List[str]:
'''simple docstring'''
lowercase__ = """hf-internal-testing/tiny-random-t5"""
lowercase__ = AutoTokenizer.from_pretrained(lowerCamelCase__ )
lowercase__ = AutoModelForSeqaSeqLM.from_pretrained(lowerCamelCase__ )
lowercase__ = tokenizer("""This is me""" , return_tensors="""pt""" )
lowercase__ = model.to_bettertransformer()
self.assertTrue(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) )
lowercase__ = model.generate(**lowerCamelCase__ )
lowercase__ = model.reverse_bettertransformer()
self.assertFalse(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCamelCase__ )
lowercase__ = AutoModelForSeqaSeqLM.from_pretrained(lowerCamelCase__ )
self.assertFalse(
any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
lowercase__ = model_reloaded.generate(**lowerCamelCase__ )
self.assertTrue(torch.allclose(lowerCamelCase__ , lowerCamelCase__ ) )
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase__ = """hf-internal-testing/tiny-random-t5"""
lowercase__ = AutoModelForSeqaSeqLM.from_pretrained(lowerCamelCase__ )
lowercase__ = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(lowerCamelCase__ ):
model.save_pretrained(lowerCamelCase__ )
lowercase__ = model.reverse_bettertransformer()
model.save_pretrained(lowerCamelCase__ )
| 325 | 0 |
"""simple docstring"""
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
SCREAMING_SNAKE_CASE = """sshleifer/bart-tiny-random"""
SCREAMING_SNAKE_CASE = """patrickvonplaten/t5-tiny-random"""
@require_torch
class __a ( unittest.TestCase ):
@cached_property
def _SCREAMING_SNAKE_CASE ( self : int )-> List[str]:
"""simple docstring"""
return AutoConfig.from_pretrained(UpperCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : str )-> Optional[int]:
"""simple docstring"""
UpperCamelCase , *UpperCamelCase = create_student_by_copying_alternating_layers(UpperCAmelCase_ , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.num_hidden_layers , 1 )
def _SCREAMING_SNAKE_CASE ( self : Any )-> Union[str, Any]:
"""simple docstring"""
UpperCamelCase , *UpperCamelCase = create_student_by_copying_alternating_layers(UpperCAmelCase_ , tempfile.mkdtemp() , e=1 , d=UpperCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : List[str] )-> Any:
"""simple docstring"""
UpperCamelCase , *UpperCamelCase = create_student_by_copying_alternating_layers(UpperCAmelCase_ , tempfile.mkdtemp() , e=1 , d=UpperCAmelCase_ )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers )
def _SCREAMING_SNAKE_CASE ( self : Dict )-> str:
"""simple docstring"""
UpperCamelCase , *UpperCamelCase = create_student_by_copying_alternating_layers(UpperCAmelCase_ , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , 1 )
def _SCREAMING_SNAKE_CASE ( self : List[str] )-> Optional[Any]:
"""simple docstring"""
with self.assertRaises(UpperCAmelCase_ ):
create_student_by_copying_alternating_layers(UpperCAmelCase_ , tempfile.mkdtemp() , e=UpperCAmelCase_ , d=UpperCAmelCase_ )
| 556 |
"""simple docstring"""
from __future__ import annotations
from typing import Generic, TypeVar
SCREAMING_SNAKE_CASE = TypeVar("""T""")
class __a ( Generic[T] ):
def __init__( self : List[Any] , UpperCAmelCase_ : T )-> None:
"""simple docstring"""
UpperCamelCase = data
UpperCamelCase = self
UpperCamelCase = 0
class __a ( Generic[T] ):
def __init__( self : int )-> None:
"""simple docstring"""
# map from node name to the node object
UpperCamelCase = {}
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , UpperCAmelCase_ : T )-> None:
"""simple docstring"""
# create a new set with x as its member
UpperCamelCase = DisjointSetTreeNode(UpperCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase_ : T )-> DisjointSetTreeNode[T]:
"""simple docstring"""
# find the set x belongs to (with path-compression)
UpperCamelCase = self.map[data]
if elem_ref != elem_ref.parent:
UpperCamelCase = self.find_set(elem_ref.parent.data )
return elem_ref.parent
def _SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase_ : DisjointSetTreeNode[T] , UpperCAmelCase_ : DisjointSetTreeNode[T] )-> None:
"""simple docstring"""
# helper function for union operation
if nodea.rank > nodea.rank:
UpperCamelCase = nodea
else:
UpperCamelCase = nodea
if nodea.rank == nodea.rank:
nodea.rank += 1
def _SCREAMING_SNAKE_CASE ( self : int , UpperCAmelCase_ : T , UpperCAmelCase_ : T )-> None:
"""simple docstring"""
# merge 2 disjoint sets
self.link(self.find_set(UpperCAmelCase_ ) , self.find_set(UpperCAmelCase_ ) )
class __a ( Generic[T] ):
def __init__( self : Dict )-> None:
"""simple docstring"""
# connections: map from the node to the neighbouring nodes (with weights)
UpperCamelCase = {}
def _SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase_ : T )-> None:
"""simple docstring"""
# add a node ONLY if its not present in the graph
if node not in self.connections:
UpperCamelCase = {}
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase_ : T , UpperCAmelCase_ : T , UpperCAmelCase_ : int )-> None:
"""simple docstring"""
# add an edge with the given weight
self.add_node(UpperCAmelCase_ )
self.add_node(UpperCAmelCase_ )
UpperCamelCase = weight
UpperCamelCase = weight
def _SCREAMING_SNAKE_CASE ( self : Tuple )-> GraphUndirectedWeighted[T]:
"""simple docstring"""
UpperCamelCase = []
UpperCamelCase = set()
for start in self.connections:
for end in self.connections[start]:
if (start, end) not in seen:
seen.add((end, start) )
edges.append((start, end, self.connections[start][end]) )
edges.sort(key=lambda UpperCAmelCase_ : x[2] )
# creating the disjoint set
UpperCamelCase = DisjointSetTree[T]()
for node in self.connections:
disjoint_set.make_set(UpperCAmelCase_ )
# MST generation
UpperCamelCase = 0
UpperCamelCase = 0
UpperCamelCase = GraphUndirectedWeighted[T]()
while num_edges < len(self.connections ) - 1:
UpperCamelCase , UpperCamelCase , UpperCamelCase = edges[index]
index += 1
UpperCamelCase = disjoint_set.find_set(UpperCAmelCase_ )
UpperCamelCase = disjoint_set.find_set(UpperCAmelCase_ )
if parent_u != parent_v:
num_edges += 1
graph.add_edge(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
disjoint_set.union(UpperCAmelCase_ , UpperCAmelCase_ )
return graph
| 556 | 1 |
def __lowerCAmelCase ( __magic_name__ , __magic_name__ ):
# Check if the input is valid
if not len(a__ ) == len(a__ ) == 3:
raise ValueError("Please enter a valid equation." )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError("Both a & b of two equations can\'t be zero." )
# Extract the coefficients
_lowercase , _lowercase , _lowercase: str = equationa
_lowercase , _lowercase , _lowercase: List[Any] = equationa
# Calculate the determinants of the matrices
_lowercase: Optional[Any] = aa * ba - aa * ba
_lowercase: Union[str, Any] = ca * ba - ca * ba
_lowercase: Optional[Any] = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError("Infinite solutions. (Consistent system)" )
else:
raise ValueError("No solution. (Inconsistent system)" )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
_lowercase: str = determinant_x / determinant
_lowercase: str = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 226 |
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
A : Dict = logging.get_logger(__name__)
@add_end_docstrings(a )
class __A( a ):
def __init__( self , *_snake_case , **_snake_case ) -> Optional[int]:
'''simple docstring'''
super().__init__(*_snake_case , **_snake_case )
requires_backends(self , '''vision''' )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == '''tf''' else MODEL_FOR_VISION_2_SEQ_MAPPING )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case=None , _snake_case=None , _snake_case=None ) -> Tuple:
'''simple docstring'''
__a = {}
__a = {}
if prompt is not None:
__a = prompt
if generate_kwargs is not None:
__a = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
__a = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
'''\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,'''
''' please use only one''' )
__a = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self , _snake_case , **_snake_case ) -> List[Any]:
'''simple docstring'''
return super().__call__(_snake_case , **_snake_case )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case=None ) -> Optional[int]:
'''simple docstring'''
__a = load_image(_snake_case )
if prompt is not None:
if not isinstance(_snake_case , _snake_case ):
raise ValueError(
F"""Received an invalid text input, got - {type(_snake_case )} - but expected a single string. """
'''Note also that one single text can be provided for conditional image to text generation.''' )
__a = self.model.config.model_type
if model_type == "git":
__a = self.image_processor(images=_snake_case , return_tensors=self.framework )
__a = self.tokenizer(text=_snake_case , add_special_tokens=_snake_case ).input_ids
__a = [self.tokenizer.cls_token_id] + input_ids
__a = torch.tensor(_snake_case ).unsqueeze(0 )
model_inputs.update({'''input_ids''': input_ids} )
elif model_type == "pix2struct":
__a = self.image_processor(images=_snake_case , header_text=_snake_case , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
__a = self.image_processor(images=_snake_case , return_tensors=self.framework )
__a = self.tokenizer(_snake_case , return_tensors=self.framework )
model_inputs.update(_snake_case )
else:
raise ValueError(F"""Model type {model_type} does not support conditional text generation""" )
else:
__a = self.image_processor(images=_snake_case , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
__a = None
return model_inputs
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case=None ) -> str:
'''simple docstring'''
if (
"input_ids" in model_inputs
and isinstance(model_inputs['''input_ids'''] , _snake_case )
and all(x is None for x in model_inputs['''input_ids'''] )
):
__a = None
if generate_kwargs is None:
__a = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
__a = model_inputs.pop(self.model.main_input_name )
__a = self.model.generate(_snake_case , **_snake_case , **_snake_case )
return model_outputs
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> Dict:
'''simple docstring'''
__a = []
for output_ids in model_outputs:
__a = {
'''generated_text''': self.tokenizer.decode(
_snake_case , skip_special_tokens=_snake_case , )
}
records.append(_snake_case )
return records | 219 | 0 |
"""simple docstring"""
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
A , A = len(__a ), len(grid[0] )
if (
min(__a , __a ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
A = 0
count += depth_first_search(__a , row + 1 , __a , __a )
count += depth_first_search(__a , row - 1 , __a , __a )
count += depth_first_search(__a , __a , col + 1 , __a )
count += depth_first_search(__a , __a , col - 1 , __a )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 720 | """simple docstring"""
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = MobileBertConfig.from_json_file(lowerCamelCase__ )
print(f"""Building PyTorch model from configuration: {config}""" )
lowerCAmelCase__ = MobileBertForPreTraining(lowerCamelCase__ )
# Load weights from tf checkpoint
lowerCAmelCase__ = load_tf_weights_in_mobilebert(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , lowerCamelCase__ )
if __name__ == "__main__":
__lowerCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--mobilebert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained MobileBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__lowerCAmelCase : Optional[int] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 674 | 0 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.