code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
from sklearn.metrics import fa_score
import datasets
snake_case__ : str = """
The F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:
F1 = 2 * (precision * recall) / (precision + recall)
"""
snake_case__ : Union[str, Any] = """
Args:
predictions (`list` of `int`): Predicted labels.
references (`list` of `int`): Ground truth labels.
labels (`list` of `int`): The set of labels to include when `average` is not set to `\'binary\'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.
pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.
average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.
- \'binary\': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.
- \'micro\': Calculate metrics globally by counting the total true positives, false negatives and false positives.
- \'macro\': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- \'weighted\': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.
- \'samples\': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
sample_weight (`list` of `float`): Sample weights Defaults to None.
Returns:
f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.
Examples:
Example 1-A simple binary example
>>> f1_metric = datasets.load_metric(\"f1\")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])
>>> print(results)
{\'f1\': 0.5}
Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.
>>> f1_metric = datasets.load_metric(\"f1\")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)
>>> print(round(results[\'f1\'], 2))
0.67
Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.
>>> f1_metric = datasets.load_metric(\"f1\")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])
>>> print(round(results[\'f1\'], 2))
0.35
Example 4-A multiclass example, with different values for the `average` input.
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = f1_metric.compute(predictions=predictions, references=references, average=\"macro\")
>>> print(round(results[\'f1\'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average=\"micro\")
>>> print(round(results[\'f1\'], 2))
0.33
>>> results = f1_metric.compute(predictions=predictions, references=references, average=\"weighted\")
>>> print(round(results[\'f1\'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{\'f1\': array([0.8, 0. , 0. ])}
"""
snake_case__ : Optional[Any] = """
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _A ( datasets.Metric ):
'''simple docstring'''
def _snake_case ( self : Union[str, Any] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("int32" ) ),
"references": datasets.Sequence(datasets.Value("int32" ) ),
}
if self.config_name == "multilabel"
else {
"predictions": datasets.Value("int32" ),
"references": datasets.Value("int32" ),
} ) , reference_urls=["https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html"] , )
def _snake_case ( self : List[Any] , lowerCamelCase : Tuple , lowerCamelCase : Any , lowerCamelCase : int=None , lowerCamelCase : Tuple=1 , lowerCamelCase : Optional[int]="binary" , lowerCamelCase : Dict=None ):
'''simple docstring'''
__lowercase = fa_score(
UpperCamelCase_ , UpperCamelCase_ , labels=UpperCamelCase_ , pos_label=UpperCamelCase_ , average=UpperCamelCase_ , sample_weight=UpperCamelCase_ )
return {"f1": float(UpperCamelCase_ ) if score.size == 1 else score}
| 402 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__snake_case = {
'''configuration_clip''': [
'''CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''CLIPConfig''',
'''CLIPOnnxConfig''',
'''CLIPTextConfig''',
'''CLIPVisionConfig''',
],
'''processing_clip''': ['''CLIPProcessor'''],
'''tokenization_clip''': ['''CLIPTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ['''CLIPTokenizerFast''']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ['''CLIPFeatureExtractor''']
__snake_case = ['''CLIPImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CLIPModel''',
'''CLIPPreTrainedModel''',
'''CLIPTextModel''',
'''CLIPTextModelWithProjection''',
'''CLIPVisionModel''',
'''CLIPVisionModelWithProjection''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFCLIPModel''',
'''TFCLIPPreTrainedModel''',
'''TFCLIPTextModel''',
'''TFCLIPVisionModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''FlaxCLIPModel''',
'''FlaxCLIPPreTrainedModel''',
'''FlaxCLIPTextModel''',
'''FlaxCLIPTextPreTrainedModel''',
'''FlaxCLIPVisionModel''',
'''FlaxCLIPVisionPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 189 | 0 |
"""simple docstring"""
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
__UpperCamelCase : List[Any] = logging.get_logger(__name__)
class UpperCAmelCase_ :
def __init__( self : int , _lowercase : Dict , _lowercase : List[Any] ) -> Optional[int]:
_lowercase = question_encoder
_lowercase = generator
_lowercase = self.question_encoder
def _lowerCamelCase ( self : int , _lowercase : Any ) -> str:
if os.path.isfile(_lowercase ):
raise ValueError(f"""Provided path ({save_directory}) should be a directory, not a file""" )
os.makedirs(_lowercase , exist_ok=_lowercase )
_lowercase = os.path.join(_lowercase , "question_encoder_tokenizer" )
_lowercase = os.path.join(_lowercase , "generator_tokenizer" )
self.question_encoder.save_pretrained(_lowercase )
self.generator.save_pretrained(_lowercase )
@classmethod
def _lowerCamelCase ( cls : Union[str, Any] , _lowercase : Optional[Any] , **_lowercase : Dict ) -> List[str]:
# dynamically import AutoTokenizer
from ..auto.tokenization_auto import AutoTokenizer
_lowercase = kwargs.pop("config" , _lowercase )
if config is None:
_lowercase = RagConfig.from_pretrained(_lowercase )
_lowercase = AutoTokenizer.from_pretrained(
_lowercase , config=config.question_encoder , subfolder="question_encoder_tokenizer" )
_lowercase = AutoTokenizer.from_pretrained(
_lowercase , config=config.generator , subfolder="generator_tokenizer" )
return cls(question_encoder=_lowercase , generator=_lowercase )
def __call__( self : str , *_lowercase : Tuple , **_lowercase : Optional[Any] ) -> str:
return self.current_tokenizer(*_lowercase , **_lowercase )
def _lowerCamelCase ( self : str , *_lowercase : Any , **_lowercase : Optional[int] ) -> List[str]:
return self.generator.batch_decode(*_lowercase , **_lowercase )
def _lowerCamelCase ( self : List[Any] , *_lowercase : Dict , **_lowercase : str ) -> Optional[Any]:
return self.generator.decode(*_lowercase , **_lowercase )
def _lowerCamelCase ( self : List[Any] ) -> Optional[Any]:
_lowercase = self.question_encoder
def _lowerCamelCase ( self : Dict ) -> int:
_lowercase = self.generator
def _lowerCamelCase ( self : int , _lowercase : List[str] , _lowercase : Optional[List[str]] = None , _lowercase : Optional[int] = None , _lowercase : Optional[int] = None , _lowercase : str = "longest" , _lowercase : str = None , _lowercase : bool = True , **_lowercase : List[Any] , ) -> BatchEncoding:
warnings.warn(
"`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the "
"regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` "
"context manager to prepare your targets. See the documentation of your specific tokenizer for more "
"details" , _lowercase , )
if max_length is None:
_lowercase = self.current_tokenizer.model_max_length
_lowercase = self(
_lowercase , add_special_tokens=_lowercase , return_tensors=_lowercase , max_length=_lowercase , padding=_lowercase , truncation=_lowercase , **_lowercase , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
_lowercase = self.current_tokenizer.model_max_length
_lowercase = self(
text_target=_lowercase , add_special_tokens=_lowercase , return_tensors=_lowercase , padding=_lowercase , max_length=_lowercase , truncation=_lowercase , **_lowercase , )
_lowercase = labels["input_ids"]
return model_inputs | 715 | """simple docstring"""
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
__UpperCamelCase : List[str] = logging.getLogger(__name__)
__UpperCamelCase : List[Any] = "Hello world! cécé herlolip"
__UpperCamelCase : Any = namedtuple(
"BertAbsConfig",
[
"temp_dir",
"large",
"use_bert_emb",
"finetune_bert",
"encoder",
"share_emb",
"max_pos",
"enc_layers",
"enc_hidden_size",
"enc_heads",
"enc_ff_size",
"enc_dropout",
"dec_layers",
"dec_hidden_size",
"dec_heads",
"dec_ff_size",
"dec_dropout",
],
)
def __UpperCAmelCase ( _snake_case : Union[str, Any], _snake_case : str ):
_lowercase = BertAbsConfig(
temp_dir=".", finetune_bert=_snake_case, large=_snake_case, share_emb=_snake_case, use_bert_emb=_snake_case, encoder="bert", max_pos=5_1_2, enc_layers=6, enc_hidden_size=5_1_2, enc_heads=8, enc_ff_size=5_1_2, enc_dropout=0.2, dec_layers=6, dec_hidden_size=7_6_8, dec_heads=8, dec_ff_size=2_0_4_8, dec_dropout=0.2, )
_lowercase = torch.load(_snake_case, lambda _snake_case, _snake_case : storage )
_lowercase = AbsSummarizer(_snake_case, torch.device("cpu" ), _snake_case )
original.eval()
_lowercase = BertAbsSummarizer(_snake_case, torch.device("cpu" ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info("convert the model" )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info("Make sure that the models' outputs are identical" )
_lowercase = BertTokenizer.from_pretrained("bert-base-uncased" )
# prepare the model inputs
_lowercase = tokenizer.encode("This is sample éàalj'-." )
encoder_input_ids.extend([tokenizer.pad_token_id] * (5_1_2 - len(_snake_case )) )
_lowercase = torch.tensor(_snake_case ).unsqueeze(0 )
_lowercase = tokenizer.encode("This is sample 3 éàalj'-." )
decoder_input_ids.extend([tokenizer.pad_token_id] * (5_1_2 - len(_snake_case )) )
_lowercase = torch.tensor(_snake_case ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
_lowercase = encoder_input_ids
_lowercase = decoder_input_ids
_lowercase = _lowercase = None
_lowercase = None
_lowercase = _lowercase = None
_lowercase = _lowercase = None
_lowercase = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
_lowercase = original(_snake_case, _snake_case, _snake_case, _snake_case, _snake_case, _snake_case, _snake_case )[0]
_lowercase = original.generator(_snake_case )
_lowercase = new_model(
_snake_case, _snake_case, _snake_case, _snake_case, _snake_case )[0]
_lowercase = new_model.generator(_snake_case )
_lowercase = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print("Maximum absolute difference beween weights: {:.2f}".format(_snake_case ) )
_lowercase = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print("Maximum absolute difference beween weights: {:.2f}".format(_snake_case ) )
_lowercase = torch.allclose(_snake_case, _snake_case, atol=1e-3 )
if are_identical:
logging.info("all weights are equal up to 1e-3" )
else:
raise ValueError("the weights are different. The new model is likely different from the original one." )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info("saving the model's state dictionary" )
torch.save(
new_model.state_dict(), "./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin" )
if __name__ == "__main__":
__UpperCamelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument(
"--bertabs_checkpoint_path",
default=None,
type=str,
required=True,
help="Path the official PyTorch dump.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the output PyTorch model.",
)
__UpperCamelCase : List[Any] = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
) | 227 | 0 |
'''simple docstring'''
import inspect
import os
import sys
import unittest
import accelerate
from accelerate.test_utils import execute_subprocess_async, require_tpu
class __snake_case ( unittest.TestCase):
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = inspect.getfile(accelerate.test_utils )
lowerCamelCase : Optional[int] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_script.py'] )
lowerCamelCase : Optional[int] = os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] )
@require_tpu
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : Optional[int] = F'''
{self.test_dir}/xla_spawn.py
--num_cores 8
{self.test_file_path}
'''.split()
lowerCamelCase : List[str] = [sys.executable] + distributed_args
execute_subprocess_async(A, env=os.environ.copy() )
| 320 |
'''simple docstring'''
def UpperCAmelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : int):
return [sentence[i : i + ngram_size] for i in range(len(UpperCAmelCase__) - ngram_size + 1)]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 320 | 1 |
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def A ( _UpperCAmelCase : Any , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Tuple=0 ) -> Optional[Any]:
'''simple docstring'''
# Format the message.
if name is None:
_UpperCAmelCase = None
else:
_UpperCAmelCase = '.' * max(0 , spaces - 2 ) + '# {:' + str(50 - spaces ) + 's}'
_UpperCAmelCase = fmt.format(_UpperCAmelCase )
# Print and recurse (if needed).
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
if msg is not None:
print(_UpperCAmelCase )
for k in val.keys():
recursive_print(_UpperCAmelCase , val[k] , spaces + 2 )
elif isinstance(_UpperCAmelCase , torch.Tensor ):
print(_UpperCAmelCase , ':' , val.size() )
else:
print(_UpperCAmelCase , ':' , _UpperCAmelCase )
def A ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any] ) -> Any:
'''simple docstring'''
# Permutes layout of param tensor to [num_splits * num_heads * hidden_size, :]
# for compatibility with later versions of NVIDIA Megatron-LM.
# The inverse operation is performed inside Megatron-LM to read checkpoints:
# https://github.com/NVIDIA/Megatron-LM/blob/v2.4/megatron/checkpointing.py#L209
# If param is the weight tensor of the self-attention block, the returned tensor
# will have to be transposed one more time to be read by HuggingFace GPT2.
_UpperCAmelCase = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
_UpperCAmelCase = (num_heads, hidden_size, num_splits) + input_shape[1:]
_UpperCAmelCase = param.view(*_UpperCAmelCase )
_UpperCAmelCase = param.transpose(0 , 2 )
_UpperCAmelCase = param.transpose(1 , 2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
_UpperCAmelCase = (num_heads, num_splits, hidden_size) + input_shape[1:]
_UpperCAmelCase = param.view(*_UpperCAmelCase )
_UpperCAmelCase = param.transpose(0 , 1 ).contiguous()
_UpperCAmelCase = param.view(*_UpperCAmelCase )
return param
def A ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Dict ) -> Optional[Any]:
'''simple docstring'''
# The converted output model.
_UpperCAmelCase = {}
# old versions did not store training args
_UpperCAmelCase = input_state_dict.get('args' , _UpperCAmelCase )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
_UpperCAmelCase = ds_args.padded_vocab_size
_UpperCAmelCase = ds_args.max_position_embeddings
_UpperCAmelCase = ds_args.hidden_size
_UpperCAmelCase = ds_args.num_layers
_UpperCAmelCase = ds_args.num_attention_heads
_UpperCAmelCase = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
_UpperCAmelCase = config.n_head
# The hidden_size per head.
_UpperCAmelCase = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
_UpperCAmelCase = input_state_dict['checkpoint_version']
else:
_UpperCAmelCase = 0.0
# The model.
_UpperCAmelCase = input_state_dict['model']
# The language model.
_UpperCAmelCase = model['language_model']
# The embeddings.
_UpperCAmelCase = lm['embedding']
# The word embeddings.
_UpperCAmelCase = embeddings['word_embeddings']['weight']
# Truncate the embedding table to vocab_size rows.
_UpperCAmelCase = word_embeddings[: config.vocab_size, :]
_UpperCAmelCase = word_embeddings
# The position embeddings.
_UpperCAmelCase = embeddings['position_embeddings']['weight']
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
_UpperCAmelCase = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
F"pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don't match" )
# Store the position embeddings.
_UpperCAmelCase = pos_embeddings
# The transformer.
_UpperCAmelCase = lm['transformer'] if 'transformer' in lm.keys() else lm['encoder']
# The regex to extract layer names.
_UpperCAmelCase = re.compile(R'layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)' )
# The simple map of names for "automated" rules.
_UpperCAmelCase = {
'attention.dense': '.attn.c_proj.',
'self_attention.dense': '.attn.c_proj.',
'mlp.dense_h_to_4h': '.mlp.c_fc.',
'mlp.dense_4h_to_h': '.mlp.c_proj.',
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
_UpperCAmelCase = layer_re.match(_UpperCAmelCase )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
_UpperCAmelCase = int(m.group(1 ) )
# The name of the operation.
_UpperCAmelCase = m.group(2 )
# Is it a weight or a bias?
_UpperCAmelCase = m.group(3 )
# The name of the layer.
_UpperCAmelCase = F"transformer.h.{layer_idx}"
# For layernorm(s), simply store the layer norm.
if op_name.endswith('layernorm' ):
_UpperCAmelCase = 'ln_1' if op_name.startswith('input' ) else 'ln_2'
_UpperCAmelCase = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
_UpperCAmelCase = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view(
1 , 1 , _UpperCAmelCase , _UpperCAmelCase )
_UpperCAmelCase = causal_mask
# Insert a "dummy" tensor for masked_bias.
_UpperCAmelCase = torch.tensor(-1E4 , dtype=torch.floataa )
_UpperCAmelCase = masked_bias
_UpperCAmelCase = fix_query_key_value_ordering(_UpperCAmelCase , _UpperCAmelCase , 3 , _UpperCAmelCase , _UpperCAmelCase )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
_UpperCAmelCase = out_val.transpose(0 , 1 ).contiguous()
# Store.
_UpperCAmelCase = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
_UpperCAmelCase = fix_query_key_value_ordering(_UpperCAmelCase , _UpperCAmelCase , 3 , _UpperCAmelCase , _UpperCAmelCase )
# Store. No change of shape.
_UpperCAmelCase = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
_UpperCAmelCase = megatron_to_transformers[op_name]
_UpperCAmelCase = val.transpose(0 , 1 )
# Copy the bias.
elif weight_or_bias == "bias":
_UpperCAmelCase = megatron_to_transformers[op_name]
_UpperCAmelCase = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
_UpperCAmelCase = transformer['final_layernorm.weight']
_UpperCAmelCase = transformer['final_layernorm.bias']
# For LM head, transformers' wants the matrix to weight embeddings.
_UpperCAmelCase = word_embeddings
# It should be done!
return output_state_dict
def A ( ) -> Dict:
'''simple docstring'''
# Create the argument parser.
_UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('--print-checkpoint-structure' , action='store_true' )
parser.add_argument(
'path_to_checkpoint' , type=_UpperCAmelCase , help='Path to the checkpoint file (.zip archive or direct .pt file)' , )
parser.add_argument(
'--config_file' , default='' , type=_UpperCAmelCase , help='An optional config json file describing the pre-trained model.' , )
_UpperCAmelCase = parser.parse_args()
# Extract the basename.
_UpperCAmelCase = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(F"Extracting PyTorch state dictionary from {args.path_to_checkpoint}" )
if args.path_to_checkpoint.endswith('.zip' ):
with zipfile.ZipFile(args.path_to_checkpoint , 'r' ) as checkpoint:
with checkpoint.open('release/mp_rank_00/model_optim_rng.pt' ) as pytorch_dict:
_UpperCAmelCase = torch.load(_UpperCAmelCase , map_location='cpu' )
else:
_UpperCAmelCase = torch.load(args.path_to_checkpoint , map_location='cpu' )
_UpperCAmelCase = input_state_dict.get('args' , _UpperCAmelCase )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
_UpperCAmelCase = 'gelu_fast'
elif ds_args.openai_gelu:
_UpperCAmelCase = 'gelu_new'
else:
_UpperCAmelCase = 'gelu'
else:
# in the very early days this used to be "gelu_new"
_UpperCAmelCase = 'gelu_new'
# Spell out all parameters in case the defaults change.
_UpperCAmelCase = GPTaConfig(
vocab_size=50_257 , n_positions=1_024 , n_embd=1_024 , n_layer=24 , n_head=16 , n_inner=4_096 , activation_function=_UpperCAmelCase , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1E-5 , initializer_range=0.02 , summary_type='cls_index' , summary_use_proj=_UpperCAmelCase , summary_activation=_UpperCAmelCase , summary_proj_to_labels=_UpperCAmelCase , summary_first_dropout=0.1 , scale_attn_weights=_UpperCAmelCase , use_cache=_UpperCAmelCase , bos_token_id=50_256 , eos_token_id=50_256 , )
else:
_UpperCAmelCase = GPTaConfig.from_json_file(args.config_file )
_UpperCAmelCase = ['GPT2LMHeadModel']
# Convert.
print('Converting' )
_UpperCAmelCase = convert_megatron_checkpoint(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(_UpperCAmelCase , _UpperCAmelCase )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
_UpperCAmelCase = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
_UpperCAmelCase = 'gpt2'
elif tokenizer_type == "PretrainedFromHF":
_UpperCAmelCase = ds_args.tokenizer_name_or_path
else:
raise ValueError(F"Unrecognized tokenizer_type {tokenizer_type}" )
else:
_UpperCAmelCase = 'gpt2'
_UpperCAmelCase = AutoTokenizer.from_pretrained(_UpperCAmelCase )
_UpperCAmelCase = type(_UpperCAmelCase ).__name__
_UpperCAmelCase = tokenizer_class
# Store the config to file.
print('Saving config' )
config.save_pretrained(_UpperCAmelCase )
# Save tokenizer based on args
print(F"Adding {tokenizer_class} tokenizer files" )
tokenizer.save_pretrained(_UpperCAmelCase )
# Store the state_dict to file.
_UpperCAmelCase = os.path.join(_UpperCAmelCase , 'pytorch_model.bin' )
print(F"Saving checkpoint to \"{output_checkpoint_file}\"" )
torch.save(_UpperCAmelCase , _UpperCAmelCase )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 639 |
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __lowerCAmelCase ( A , A , unittest.TestCase ):
UpperCamelCase = IFInpaintingPipeline
UpperCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''}
UpperCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
UpperCamelCase = PipelineTesterMixin.required_optional_params - {'''latents'''}
def _lowerCamelCase ( self : List[str]) -> Tuple:
"""simple docstring"""
return self._get_dummy_components()
def _lowerCamelCase ( self : Any , A : int , A : Dict=0) -> Tuple:
"""simple docstring"""
if str(A).startswith('mps'):
_UpperCAmelCase = torch.manual_seed(A)
else:
_UpperCAmelCase = torch.Generator(device=A).manual_seed(A)
_UpperCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(A)).to(A)
_UpperCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(A)).to(A)
_UpperCAmelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def _lowerCamelCase ( self : List[str]) -> Union[str, Any]:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3)
def _lowerCamelCase ( self : Optional[Any]) -> Optional[int]:
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA')
def _lowerCamelCase ( self : List[str]) -> Any:
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1)
def _lowerCamelCase ( self : Optional[int]) -> Tuple:
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2)
def _lowerCamelCase ( self : str) -> List[str]:
"""simple docstring"""
self._test_save_load_local()
def _lowerCamelCase ( self : int) -> Tuple:
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 639 | 1 |
'''simple docstring'''
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
__A = logging.get_logger(__name__)
__A = {
"linear": get_linear_schedule_with_warmup,
"cosine": get_cosine_schedule_with_warmup,
"cosine_w_restarts": get_cosine_with_hard_restarts_schedule_with_warmup,
"polynomial": get_polynomial_decay_schedule_with_warmup,
"constant": get_constant_schedule,
"constant_w_warmup": get_constant_schedule_with_warmup,
}
class A ( __UpperCAmelCase ):
def __init__( self , lowerCamelCase__=None , lowerCamelCase__=None , *lowerCamelCase__ , **lowerCamelCase__ ) -> Optional[int]:
'''simple docstring'''
super().__init__(*lowerCamelCase__ , **lowerCamelCase__ )
if config is None:
assert isinstance(self.model , lowerCamelCase__ ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
F''' {self.model.__class__}'''
)
lowercase__ = self.model.config
else:
lowercase__ = config
lowercase__ = data_args
lowercase__ = self.config.tgt_vocab_size if isinstance(self.config , lowerCamelCase__ ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
F'''The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for'''
""" padding..""" )
if self.args.label_smoothing == 0:
lowercase__ = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
lowercase__ = label_smoothed_nll_loss
def A__ ( self , lowerCamelCase__ ) -> int:
'''simple docstring'''
if self.optimizer is None:
lowercase__ = ["""bias""", """LayerNorm.weight"""]
lowercase__ = [
{
"""params""": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
"""weight_decay""": self.args.weight_decay,
},
{
"""params""": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
"""weight_decay""": 0.0,
},
]
lowercase__ = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
lowercase__ = Adafactor
lowercase__ = {"""scale_parameter""": False, """relative_step""": False}
else:
lowercase__ = AdamW
lowercase__ = {
"""betas""": (self.args.adam_betaa, self.args.adam_betaa),
"""eps""": self.args.adam_epsilon,
}
lowercase__ = self.args.learning_rate
if self.sharded_ddp:
lowercase__ = OSS(
params=lowerCamelCase__ , optim=lowerCamelCase__ , **lowerCamelCase__ , )
else:
lowercase__ = optimizer_cls(lowerCamelCase__ , **lowerCamelCase__ )
if self.lr_scheduler is None:
lowercase__ = self._get_lr_scheduler(lowerCamelCase__ )
else: # ignoring --lr_scheduler
logger.warning("""scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.""" )
def A__ ( self , lowerCamelCase__ ) -> Any:
'''simple docstring'''
lowercase__ = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
lowercase__ = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
lowercase__ = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
lowercase__ = schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=lowerCamelCase__ )
return scheduler
def A__ ( self ) -> Optional[torch.utils.data.Sampler]:
'''simple docstring'''
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def A__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> str:
'''simple docstring'''
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
lowercase__ = model(**lowerCamelCase__ , use_cache=lowerCamelCase__ )[0]
lowercase__ = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
lowercase__ , lowercase__ = model(**lowerCamelCase__ , labels=lowerCamelCase__ , use_cache=lowerCamelCase__ )[:2]
else:
# compute label smoothed loss
lowercase__ = model(**lowerCamelCase__ , use_cache=lowerCamelCase__ )[0]
lowercase__ = torch.nn.functional.log_softmax(lowerCamelCase__ , dim=-1 )
lowercase__ , lowercase__ = self.loss_fn(lowerCamelCase__ , lowerCamelCase__ , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def A__ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> Tuple:
'''simple docstring'''
lowercase__ = inputs.pop("""labels""" )
lowercase__ , lowercase__ = self._compute_loss(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
return loss
def A__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , ) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:
'''simple docstring'''
lowercase__ = self._prepare_inputs(lowerCamelCase__ )
lowercase__ = {
"""max_length""": self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
"""num_beams""": self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
lowercase__ = self.model.generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , **lowerCamelCase__ , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
lowercase__ = self._pad_tensors_to_max_len(lowerCamelCase__ , gen_kwargs["""max_length"""] )
lowercase__ = inputs.pop("""labels""" )
with torch.no_grad():
# compute loss on predict data
lowercase__ , lowercase__ = self._compute_loss(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
lowercase__ = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
lowercase__ = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
lowercase__ = self._pad_tensors_to_max_len(lowerCamelCase__ , gen_kwargs["""max_length"""] )
return (loss, logits, labels)
def A__ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> List[str]:
'''simple docstring'''
lowercase__ = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
"""Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be"""
F''' padded to `max_length`={max_length}''' )
lowercase__ = pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
lowercase__ = tensor
return padded_tensor
| 325 |
'''simple docstring'''
class A : # Public class to implement a graph
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> None:
'''simple docstring'''
lowercase__ = row
lowercase__ = col
lowercase__ = graph
def A__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> bool:
'''simple docstring'''
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def A__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> None:
'''simple docstring'''
lowercase__ = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
lowercase__ = [-1, 0, 1, -1, 1, -1, 0, 1]
lowercase__ = True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , lowerCamelCase__ ):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , lowerCamelCase__ )
def A__ ( self ) -> int: # And finally, count all islands.
'''simple docstring'''
lowercase__ = [[False for j in range(self.COL )] for i in range(self.ROW )]
lowercase__ = 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
count += 1
return count
| 325 | 1 |
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = TypeVar('DatasetType', Dataset, IterableDataset)
def lowerCamelCase__ ( A__ : List[DatasetType] , A__ : Optional[List[float]] = None , A__ : Optional[int] = None , A__ : Optional[DatasetInfo] = None , A__ : Optional[NamedSplit] = None , A__ : Literal["first_exhausted", "all_exhausted"] = "first_exhausted" , ):
'''simple docstring'''
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError("""Unable to interleave an empty list of datasets.""" )
for i, dataset in enumerate(A__ ):
if not isinstance(A__ , (Dataset, IterableDataset) ):
if isinstance(A__ , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '
"""is an empty dataset dictionary.""" )
raise ValueError(
f'Dataset at position {i} has at least one split: {list(A__ )}\n'
f'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(A__ ) )}\']' )
raise ValueError(
f'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(A__ ).__name__}.' )
if i == 0:
__lowerCamelCase, __lowerCamelCase = (
(Dataset, IterableDataset) if isinstance(A__ , A__ ) else (IterableDataset, Dataset)
)
elif not isinstance(A__ , A__ ):
raise ValueError(
f'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(f'{stopping_strategy} is not supported. Please enter a valid stopping_strategy.' )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
A__ , A__ , A__ , info=A__ , split=A__ , stopping_strategy=A__ )
else:
return _interleave_iterable_datasets(
A__ , A__ , A__ , info=A__ , split=A__ , stopping_strategy=A__ )
def lowerCamelCase__ ( A__ : List[DatasetType] , A__ : Optional[DatasetInfo] = None , A__ : Optional[NamedSplit] = None , A__ : int = 0 , ):
'''simple docstring'''
if not dsets:
raise ValueError("""Unable to concatenate an empty list of datasets.""" )
for i, dataset in enumerate(A__ ):
if not isinstance(A__ , (Dataset, IterableDataset) ):
if isinstance(A__ , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '
"""is an empty dataset dictionary.""" )
raise ValueError(
f'Dataset at position {i} has at least one split: {list(A__ )}\n'
f'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(A__ ) )}\']' )
raise ValueError(
f'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(A__ ).__name__}.' )
if i == 0:
__lowerCamelCase, __lowerCamelCase = (
(Dataset, IterableDataset) if isinstance(A__ , A__ ) else (IterableDataset, Dataset)
)
elif not isinstance(A__ , A__ ):
raise ValueError(
f'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(A__ , info=A__ , split=A__ , axis=A__ )
else:
return _concatenate_iterable_datasets(A__ , info=A__ , split=A__ , axis=A__ )
| 80 |
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 80 | 1 |
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class __lowercase (unittest.TestCase ):
"""simple docstring"""
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : int = UNetaDModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = self.dummy_uncond_unet
SCREAMING_SNAKE_CASE_ : Dict = ScoreSdeVeScheduler()
SCREAMING_SNAKE_CASE_ : Optional[int] = ScoreSdeVePipeline(unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ )
sde_ve.to(lowerCAmelCase__ )
sde_ve.set_progress_bar_config(disable=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[str] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = sde_ve(num_inference_steps=2 , output_type='numpy' , generator=lowerCAmelCase__ ).images
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Any = sde_ve(num_inference_steps=2 , output_type='numpy' , generator=lowerCAmelCase__ , return_dict=lowerCAmelCase__ )[
0
]
SCREAMING_SNAKE_CASE_ : Tuple = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_ : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
SCREAMING_SNAKE_CASE_ : List[Any] = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class __lowercase (unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = 'google/ncsnpp-church-256'
SCREAMING_SNAKE_CASE_ : Optional[Any] = UNetaDModel.from_pretrained(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : int = ScoreSdeVeScheduler.from_pretrained(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Any = ScoreSdeVePipeline(unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ )
sde_ve.to(lowerCAmelCase__ )
sde_ve.set_progress_bar_config(disable=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : int = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Optional[int] = sde_ve(num_inference_steps=1_0 , output_type='numpy' , generator=lowerCAmelCase__ ).images
SCREAMING_SNAKE_CASE_ : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_5_6, 2_5_6, 3)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 101 |
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def a__ ( A__, A__, A__, A__, A__=True, A__="pt" ):
SCREAMING_SNAKE_CASE_ : Tuple = {'add_prefix_space': True} if isinstance(A__, A__ ) and not line.startswith(' ' ) else {}
SCREAMING_SNAKE_CASE_ : Dict = padding_side
return tokenizer(
[line], max_length=A__, padding='max_length' if pad_to_max_length else None, truncation=A__, return_tensors=A__, add_special_tokens=A__, **A__, )
def a__ ( A__, A__, A__=None, ):
SCREAMING_SNAKE_CASE_ : int = input_ids.ne(A__ ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class __lowercase (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__="train" , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__="" , ):
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE_ : Optional[Any] = Path(lowerCAmelCase__ ).joinpath(type_path + '.source' )
SCREAMING_SNAKE_CASE_ : Optional[int] = Path(lowerCAmelCase__ ).joinpath(type_path + '.target' )
SCREAMING_SNAKE_CASE_ : int = self.get_char_lens(self.src_file )
SCREAMING_SNAKE_CASE_ : int = max_source_length
SCREAMING_SNAKE_CASE_ : Dict = max_target_length
assert min(self.src_lens ) > 0, F'''found empty line in {self.src_file}'''
SCREAMING_SNAKE_CASE_ : Any = tokenizer
SCREAMING_SNAKE_CASE_ : List[str] = prefix
if n_obs is not None:
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.src_lens[:n_obs]
SCREAMING_SNAKE_CASE_ : Optional[Any] = src_lang
SCREAMING_SNAKE_CASE_ : List[Any] = tgt_lang
def __len__( self ):
"""simple docstring"""
return len(self.src_lens )
def __getitem__( self , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = index + 1 # linecache starts at 1
SCREAMING_SNAKE_CASE_ : List[str] = self.prefix + linecache.getline(str(self.src_file ) , lowerCAmelCase__ ).rstrip('\n' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = linecache.getline(str(self.tgt_file ) , lowerCAmelCase__ ).rstrip('\n' )
assert source_line, F'''empty source line for index {index}'''
assert tgt_line, F'''empty tgt line for index {index}'''
# Need to add eos token manually for T5
if isinstance(self.tokenizer , lowerCAmelCase__ ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
SCREAMING_SNAKE_CASE_ : Any = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , lowerCAmelCase__ ) else self.tokenizer
)
SCREAMING_SNAKE_CASE_ : List[Any] = self.tokenizer.generator if isinstance(self.tokenizer , lowerCAmelCase__ ) else self.tokenizer
SCREAMING_SNAKE_CASE_ : Dict = encode_line(lowerCAmelCase__ , lowerCAmelCase__ , self.max_source_length , 'right' )
SCREAMING_SNAKE_CASE_ : Tuple = encode_line(lowerCAmelCase__ , lowerCAmelCase__ , self.max_target_length , 'right' )
SCREAMING_SNAKE_CASE_ : List[str] = source_inputs['input_ids'].squeeze()
SCREAMING_SNAKE_CASE_ : Dict = target_inputs['input_ids'].squeeze()
SCREAMING_SNAKE_CASE_ : str = source_inputs['attention_mask'].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def UpperCamelCase__ ( lowerCAmelCase__ ):
"""simple docstring"""
return [len(lowerCAmelCase__ ) for x in Path(lowerCAmelCase__ ).open().readlines()]
def UpperCamelCase__ ( self , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = torch.stack([x['input_ids'] for x in batch] )
SCREAMING_SNAKE_CASE_ : int = torch.stack([x['attention_mask'] for x in batch] )
SCREAMING_SNAKE_CASE_ : List[str] = torch.stack([x['decoder_input_ids'] for x in batch] )
SCREAMING_SNAKE_CASE_ : Dict = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , lowerCAmelCase__ )
else self.tokenizer.pad_token_id
)
SCREAMING_SNAKE_CASE_ : List[Any] = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , lowerCAmelCase__ )
else self.tokenizer.pad_token_id
)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = trim_batch(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = trim_batch(lowerCAmelCase__ , lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Tuple = {
'input_ids': source_ids,
'attention_mask': source_mask,
'decoder_input_ids': y,
}
return batch
lowerCAmelCase__ : Tuple =getLogger(__name__)
def a__ ( A__ ):
return list(itertools.chain.from_iterable(A__ ) )
def a__ ( A__ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = get_git_info()
save_json(A__, os.path.join(A__, 'git_log.json' ) )
def a__ ( A__, A__, A__=4, **A__ ):
with open(A__, 'w' ) as f:
json.dump(A__, A__, indent=A__, **A__ )
def a__ ( A__ ):
with open(A__ ) as f:
return json.load(A__ )
def a__ ( ):
SCREAMING_SNAKE_CASE_ : List[str] = git.Repo(search_parent_directories=A__ )
SCREAMING_SNAKE_CASE_ : Any = {
'repo_id': str(A__ ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
'hostname': str(socket.gethostname() ),
}
return repo_infos
def a__ ( A__, A__ ):
return list(map(A__, A__ ) )
def a__ ( A__, A__ ):
with open(A__, 'wb' ) as f:
return pickle.dump(A__, A__ )
def a__ ( A__ ):
def remove_articles(A__ ):
return re.sub(r'\b(a|an|the)\b', ' ', A__ )
def white_space_fix(A__ ):
return " ".join(text.split() )
def remove_punc(A__ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(A__ ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(A__ ) ) ) )
def a__ ( A__, A__ ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = normalize_answer(A__ ).split()
SCREAMING_SNAKE_CASE_ : List[str] = normalize_answer(A__ ).split()
SCREAMING_SNAKE_CASE_ : Optional[int] = Counter(A__ ) & Counter(A__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = sum(common.values() )
if num_same == 0:
return 0
SCREAMING_SNAKE_CASE_ : List[Any] = 1.0 * num_same / len(A__ )
SCREAMING_SNAKE_CASE_ : int = 1.0 * num_same / len(A__ )
SCREAMING_SNAKE_CASE_ : List[str] = (2 * precision * recall) / (precision + recall)
return fa
def a__ ( A__, A__ ):
return normalize_answer(A__ ) == normalize_answer(A__ )
def a__ ( A__, A__ ):
assert len(A__ ) == len(A__ )
SCREAMING_SNAKE_CASE_ : Any = 0
for hypo, pred in zip(A__, A__ ):
em += exact_match_score(A__, A__ )
if len(A__ ) > 0:
em /= len(A__ )
return {"em": em}
def a__ ( A__ ):
return model_prefix.startswith('rag' )
def a__ ( A__, A__, A__ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
SCREAMING_SNAKE_CASE_ : List[Any] = 'dropout_rate'
for p in extra_params:
if getattr(A__, A__, A__ ):
if not hasattr(A__, A__ ) and not hasattr(A__, equivalent_param[p] ):
logger.info('config doesn\'t have a `{}` attribute'.format(A__ ) )
delattr(A__, A__ )
continue
SCREAMING_SNAKE_CASE_ : Any = p if hasattr(A__, A__ ) else equivalent_param[p]
setattr(A__, A__, getattr(A__, A__ ) )
delattr(A__, A__ )
return hparams, config
| 101 | 1 |
'''simple docstring'''
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class UpperCamelCase__ :
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = None
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
UpperCAmelCase : Dict = json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase : Tuple = os.path.join(snake_case , "feat_extract.json" )
feat_extract_first.to_json_file(snake_case )
UpperCAmelCase : int = self.feature_extraction_class.from_json_file(snake_case )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase : Optional[Any] = feat_extract_first.save_pretrained(snake_case )[0]
check_json_file_has_correct_format(snake_case )
UpperCAmelCase : int = self.feature_extraction_class.from_pretrained(snake_case )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Any = self.feature_extraction_class()
self.assertIsNotNone(snake_case )
| 609 |
'''simple docstring'''
from collections.abc import Callable
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , snake_case = None ):
'''simple docstring'''
UpperCAmelCase : list = []
# Stores indexes of each item for supporting updates and deletion.
UpperCAmelCase : dict = {}
# Stores current size of heap.
UpperCAmelCase : Tuple = 0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
UpperCAmelCase : Tuple = key or (lambda snake_case : x)
def A_ ( self , snake_case ):
'''simple docstring'''
return int((i - 1) / 2 ) if i > 0 else None
def A_ ( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : List[str] = int(2 * i + 1 )
return left if 0 < left < self.size else None
def A_ ( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = int(2 * i + 2 )
return right if 0 < right < self.size else None
def A_ ( self , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : List[Any] = (
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
UpperCAmelCase , UpperCAmelCase : Optional[int] = self.arr[j], self.arr[i]
def A_ ( self , snake_case , snake_case ):
'''simple docstring'''
return self.arr[i][1] < self.arr[j][1]
def A_ ( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : int = self._left(snake_case )
UpperCAmelCase : Any = self._right(snake_case )
UpperCAmelCase : str = i
if left is not None and not self._cmp(snake_case , snake_case ):
UpperCAmelCase : Optional[Any] = left
if right is not None and not self._cmp(snake_case , snake_case ):
UpperCAmelCase : Optional[int] = right
return valid_parent
def A_ ( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = self._parent(snake_case )
while parent is not None and not self._cmp(snake_case , snake_case ):
self._swap(snake_case , snake_case )
UpperCAmelCase , UpperCAmelCase : Optional[int] = parent, self._parent(snake_case )
def A_ ( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = self._get_valid_parent(snake_case )
while valid_parent != index:
self._swap(snake_case , snake_case )
UpperCAmelCase , UpperCAmelCase : Optional[int] = valid_parent, self._get_valid_parent(snake_case )
def A_ ( self , snake_case , snake_case ):
'''simple docstring'''
if item not in self.pos_map:
return
UpperCAmelCase : Optional[int] = self.pos_map[item]
UpperCAmelCase : Dict = [item, self.key(snake_case )]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(snake_case )
self._heapify_down(snake_case )
def A_ ( self , snake_case ):
'''simple docstring'''
if item not in self.pos_map:
return
UpperCAmelCase : Optional[Any] = self.pos_map[item]
del self.pos_map[item]
UpperCAmelCase : Dict = self.arr[self.size - 1]
UpperCAmelCase : Any = index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(snake_case )
self._heapify_down(snake_case )
def A_ ( self , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : str = len(self.arr )
if arr_len == self.size:
self.arr.append([item, self.key(snake_case )] )
else:
UpperCAmelCase : Tuple = [item, self.key(snake_case )]
UpperCAmelCase : Any = self.size
self.size += 1
self._heapify_up(self.size - 1 )
def A_ ( self ):
'''simple docstring'''
return self.arr[0] if self.size else None
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Any = self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0] )
return top_item_tuple
def lowercase ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 609 | 1 |
'''simple docstring'''
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def A (__lowerCamelCase :str , __lowerCamelCase :Any , __lowerCamelCase :str=None ):
# set parameter of one layer
assert torch_layer.weight.shape == weight.shape, f'{torch_layer} layer.weight does not match'
_lowerCAmelCase = nn.Parameter(__lowerCamelCase )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, f'{torch_layer} layer.bias does not match'
_lowerCAmelCase = nn.Parameter(__lowerCamelCase )
def A (__lowerCamelCase :Dict , __lowerCamelCase :Optional[int] , __lowerCamelCase :Tuple ):
# set torch weights for 1-to-1 comparison
_lowerCAmelCase = np.asarray(weights[0] )
_lowerCAmelCase = np.asarray(weights[1] )
_lowerCAmelCase = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(__lowerCamelCase ).transpose(1 , 2 ).contiguous().view(-1 , __lowerCamelCase ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(__lowerCamelCase ).transpose(1 , 2 ).contiguous().view(-1 , __lowerCamelCase ) , )
set_param(
torch_layer.output.dense , torch.tensor(__lowerCamelCase ).view(-1 , __lowerCamelCase ).contiguous().transpose(0 , 1 ) , )
def A (__lowerCamelCase :Dict , __lowerCamelCase :List[str] , __lowerCamelCase :str ):
# set torch weights for 1-to-1 comparison
_lowerCAmelCase = np.asarray(weights[0] )
_lowerCAmelCase = np.asarray(weights[1] )
_lowerCAmelCase = np.asarray(weights[2] )
_lowerCAmelCase = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(__lowerCamelCase ).transpose(1 , 2 ).contiguous().view(-1 , __lowerCamelCase ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(__lowerCamelCase ).transpose(1 , 2 ).contiguous().view(-1 , __lowerCamelCase ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(__lowerCamelCase ).transpose(1 , 2 ).contiguous().view(-1 , __lowerCamelCase ) , )
set_param(
torch_layer.output.dense , torch.tensor(__lowerCamelCase ).view(-1 , __lowerCamelCase ).contiguous().transpose(0 , 1 ) , )
def A (__lowerCamelCase :Union[str, Any] , __lowerCamelCase :Dict , __lowerCamelCase :Any ):
# layernorm 1
_lowerCAmelCase = weights[0][0][0]
_lowerCAmelCase = np.asarray(layer_norm_a[0] )
_lowerCAmelCase = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(__lowerCamelCase ) , torch.tensor(__lowerCamelCase ) , )
# lsh weights + output
_lowerCAmelCase = weights[0][1]
if len(__lowerCamelCase ) < 4:
set_layer_weights_in_torch_lsh(__lowerCamelCase , torch_block.attention , __lowerCamelCase )
else:
set_layer_weights_in_torch_local(__lowerCamelCase , torch_block.attention , __lowerCamelCase )
# intermediate weighs
_lowerCAmelCase = weights[2][0][1][2]
# Chunked Feed Forward
if len(__lowerCamelCase ) == 4:
_lowerCAmelCase = intermediate_weights[2]
# layernorm 2
_lowerCAmelCase = np.asarray(intermediate_weights[0][0] )
_lowerCAmelCase = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(__lowerCamelCase ) , torch.tensor(__lowerCamelCase ) , )
# intermediate dense
_lowerCAmelCase = np.asarray(intermediate_weights[1][0] )
_lowerCAmelCase = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(__lowerCamelCase ).transpose(0 , 1 ).contiguous() , torch.tensor(__lowerCamelCase ) , )
# intermediate out
_lowerCAmelCase = np.asarray(intermediate_weights[4][0] )
_lowerCAmelCase = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(__lowerCamelCase ).transpose(0 , 1 ).contiguous() , torch.tensor(__lowerCamelCase ) , )
def A (__lowerCamelCase :List[str] , __lowerCamelCase :Optional[Any] , __lowerCamelCase :Dict ):
# reformer model
_lowerCAmelCase = torch_model.reformer
# word embeds
_lowerCAmelCase = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(__lowerCamelCase ) , )
if isinstance(weights[3] , __lowerCamelCase ):
_lowerCAmelCase = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
_lowerCAmelCase = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), f'{position_embeddings[emb_idx]} emb does not match'
_lowerCAmelCase = nn.Parameter(torch.tensor(__lowerCamelCase ) )
_lowerCAmelCase = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
__lowerCamelCase ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
_lowerCAmelCase = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# output layer norm
_lowerCAmelCase = np.asarray(weights[7][0] )
_lowerCAmelCase = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(__lowerCamelCase ) , torch.tensor(__lowerCamelCase ) , )
# output embeddings
_lowerCAmelCase = np.asarray(weights[9][0] )
_lowerCAmelCase = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(__lowerCamelCase ).transpose(0 , 1 ).contiguous() , torch.tensor(__lowerCamelCase ) , )
def A (__lowerCamelCase :List[str] , __lowerCamelCase :List[Any] , __lowerCamelCase :Tuple ):
# Initialise PyTorch model
_lowerCAmelCase = ReformerConfig.from_json_file(__lowerCamelCase )
print(f'Building PyTorch model from configuration: {config}' )
_lowerCAmelCase = ReformerModelWithLMHead(__lowerCamelCase )
with open(__lowerCamelCase , """rb""" ) as f:
_lowerCAmelCase = pickle.load(__lowerCamelCase )["""weights"""]
set_model_weights_in_torch(__lowerCamelCase , __lowerCamelCase , config.hidden_size )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , __lowerCamelCase )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--trax_model_pkl_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained Reformer model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
_lowercase = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 5 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
lowerCAmelCase_ = logging.get_logger(__name__)
class __A ( A_ ):
'''simple docstring'''
def __init__( self : Dict ,*_snake_case : Optional[int] ,**_snake_case : List[Any] ) -> None:
"""simple docstring"""
warnings.warn(
'''The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use GLPNImageProcessor instead.''' ,_snake_case ,)
super().__init__(*_snake_case ,**_snake_case )
| 560 | 0 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__)
def a (_lowerCAmelCase , _lowerCAmelCase=False ):
SCREAMING_SNAKE_CASE_ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"blocks.{i}.norm1.weight", F"deit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"blocks.{i}.norm1.bias", F"deit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append((F"blocks.{i}.attn.proj.weight", F"deit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((F"blocks.{i}.attn.proj.bias", F"deit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((F"blocks.{i}.norm2.weight", F"deit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"blocks.{i}.norm2.bias", F"deit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((F"blocks.{i}.mlp.fc1.weight", F"deit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((F"blocks.{i}.mlp.fc1.bias", F"deit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((F"blocks.{i}.mlp.fc2.weight", F"deit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"blocks.{i}.mlp.fc2.bias", F"deit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
('''cls_token''', '''deit.embeddings.cls_token'''),
('''dist_token''', '''deit.embeddings.distillation_token'''),
('''patch_embed.proj.weight''', '''deit.embeddings.patch_embeddings.projection.weight'''),
('''patch_embed.proj.bias''', '''deit.embeddings.patch_embeddings.projection.bias'''),
('''pos_embed''', '''deit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
('''pre_logits.fc.weight''', '''pooler.dense.weight'''),
('''pre_logits.fc.bias''', '''pooler.dense.bias'''),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
SCREAMING_SNAKE_CASE_ = [(pair[0], pair[1][4:]) if pair[1].startswith('''deit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
('''norm.weight''', '''deit.layernorm.weight'''),
('''norm.bias''', '''deit.layernorm.bias'''),
('''head.weight''', '''cls_classifier.weight'''),
('''head.bias''', '''cls_classifier.bias'''),
('''head_dist.weight''', '''distillation_classifier.weight'''),
('''head_dist.bias''', '''distillation_classifier.bias'''),
] )
return rename_keys
def a (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False ):
for i in range(config.num_hidden_layers ):
if base_model:
SCREAMING_SNAKE_CASE_ = ''''''
else:
SCREAMING_SNAKE_CASE_ = '''deit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
SCREAMING_SNAKE_CASE_ = state_dict.pop(F"blocks.{i}.attn.qkv.weight" )
SCREAMING_SNAKE_CASE_ = state_dict.pop(F"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE_ = in_proj_weight[
: config.hidden_size, :
]
SCREAMING_SNAKE_CASE_ = in_proj_bias[: config.hidden_size]
SCREAMING_SNAKE_CASE_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
SCREAMING_SNAKE_CASE_ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
SCREAMING_SNAKE_CASE_ = in_proj_weight[
-config.hidden_size :, :
]
SCREAMING_SNAKE_CASE_ = in_proj_bias[-config.hidden_size :]
def a (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = dct.pop(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = val
def a ():
SCREAMING_SNAKE_CASE_ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
SCREAMING_SNAKE_CASE_ = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def a (_lowerCAmelCase , _lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = DeiTConfig()
# all deit models have fine-tuned heads
SCREAMING_SNAKE_CASE_ = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
SCREAMING_SNAKE_CASE_ = 1_0_0_0
SCREAMING_SNAKE_CASE_ = '''huggingface/label-files'''
SCREAMING_SNAKE_CASE_ = '''imagenet-1k-id2label.json'''
SCREAMING_SNAKE_CASE_ = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
SCREAMING_SNAKE_CASE_ = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_ = idalabel
SCREAMING_SNAKE_CASE_ = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_ = int(deit_name[-6:-4] )
SCREAMING_SNAKE_CASE_ = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith('''tiny''' ):
SCREAMING_SNAKE_CASE_ = 1_9_2
SCREAMING_SNAKE_CASE_ = 7_6_8
SCREAMING_SNAKE_CASE_ = 1_2
SCREAMING_SNAKE_CASE_ = 3
elif deit_name[9:].startswith('''small''' ):
SCREAMING_SNAKE_CASE_ = 3_8_4
SCREAMING_SNAKE_CASE_ = 1_5_3_6
SCREAMING_SNAKE_CASE_ = 1_2
SCREAMING_SNAKE_CASE_ = 6
if deit_name[9:].startswith('''base''' ):
pass
elif deit_name[4:].startswith('''large''' ):
SCREAMING_SNAKE_CASE_ = 1_0_2_4
SCREAMING_SNAKE_CASE_ = 4_0_9_6
SCREAMING_SNAKE_CASE_ = 2_4
SCREAMING_SNAKE_CASE_ = 1_6
# load original model from timm
SCREAMING_SNAKE_CASE_ = timm.create_model(_lowerCAmelCase , pretrained=_lowerCAmelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
SCREAMING_SNAKE_CASE_ = timm_model.state_dict()
SCREAMING_SNAKE_CASE_ = create_rename_keys(_lowerCAmelCase , _lowerCAmelCase )
for src, dest in rename_keys:
rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
read_in_q_k_v(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# load HuggingFace model
SCREAMING_SNAKE_CASE_ = DeiTForImageClassificationWithTeacher(_lowerCAmelCase ).eval()
model.load_state_dict(_lowerCAmelCase )
# Check outputs on an image, prepared by DeiTImageProcessor
SCREAMING_SNAKE_CASE_ = int(
(2_5_6 / 2_2_4) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
SCREAMING_SNAKE_CASE_ = DeiTImageProcessor(size=_lowerCAmelCase , crop_size=config.image_size )
SCREAMING_SNAKE_CASE_ = image_processor(images=prepare_img() , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE_ = encoding['''pixel_values''']
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = timm_model(_lowerCAmelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_lowerCAmelCase , outputs.logits , atol=1e-3 )
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
print(F"Saving model {deit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_lowerCAmelCase )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--deit_name""",
default="""vit_deit_base_distilled_patch16_224""",
type=str,
help="""Name of the DeiT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
__SCREAMING_SNAKE_CASE =parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 89 |
def a (_lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = [], []
while len(_lowerCAmelCase ) > 1:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = min(_lowerCAmelCase ), max(_lowerCAmelCase )
start.append(_lowerCAmelCase )
end.append(_lowerCAmelCase )
collection.remove(_lowerCAmelCase )
collection.remove(_lowerCAmelCase )
end.reverse()
return start + collection + end
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE =input("""Enter numbers separated by a comma:\n""").strip()
__SCREAMING_SNAKE_CASE =[int(item) for item in user_input.split(""",""")]
print(*merge_sort(unsorted), sep=""",""")
| 89 | 1 |
"""simple docstring"""
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class __a ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self : List[Any] )-> Tuple:
"""simple docstring"""
UpperCamelCase = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertTrue(is_safetensors_compatible(UpperCAmelCase_ ) )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] )-> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = [
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertTrue(is_safetensors_compatible(UpperCAmelCase_ ) )
def _SCREAMING_SNAKE_CASE ( self : Dict )-> List[Any]:
"""simple docstring"""
UpperCamelCase = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
"unet/diffusion_pytorch_model.bin",
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(UpperCAmelCase_ ) )
def _SCREAMING_SNAKE_CASE ( self : Dict )-> Optional[int]:
"""simple docstring"""
UpperCamelCase = [
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
]
self.assertTrue(is_safetensors_compatible(UpperCAmelCase_ ) )
def _SCREAMING_SNAKE_CASE ( self : List[Any] )-> List[str]:
"""simple docstring"""
UpperCamelCase = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
# Removed: 'text_encoder/model.safetensors',
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertFalse(is_safetensors_compatible(UpperCAmelCase_ ) )
def _SCREAMING_SNAKE_CASE ( self : Tuple )-> int:
"""simple docstring"""
UpperCamelCase = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
UpperCamelCase = "fp16"
self.assertTrue(is_safetensors_compatible(UpperCAmelCase_ , variant=UpperCAmelCase_ ) )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] )-> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = [
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
UpperCamelCase = "fp16"
self.assertTrue(is_safetensors_compatible(UpperCAmelCase_ , variant=UpperCAmelCase_ ) )
def _SCREAMING_SNAKE_CASE ( self : List[str] )-> str:
"""simple docstring"""
# pass variant but use the non-variant filenames
UpperCamelCase = [
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
UpperCamelCase = "fp16"
self.assertTrue(is_safetensors_compatible(UpperCAmelCase_ , variant=UpperCAmelCase_ ) )
def _SCREAMING_SNAKE_CASE ( self : Tuple )-> int:
"""simple docstring"""
UpperCamelCase = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
"unet/diffusion_pytorch_model.fp16.bin",
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
UpperCamelCase = "fp16"
self.assertFalse(is_safetensors_compatible(UpperCAmelCase_ , variant=UpperCAmelCase_ ) )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] )-> Optional[int]:
"""simple docstring"""
UpperCamelCase = [
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
]
UpperCamelCase = "fp16"
self.assertTrue(is_safetensors_compatible(UpperCAmelCase_ , variant=UpperCAmelCase_ ) )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] )-> Union[str, Any]:
"""simple docstring"""
# pass variant but use the non-variant filenames
UpperCamelCase = [
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
]
UpperCamelCase = "fp16"
self.assertTrue(is_safetensors_compatible(UpperCAmelCase_ , variant=UpperCAmelCase_ ) )
def _SCREAMING_SNAKE_CASE ( self : List[str] )-> str:
"""simple docstring"""
UpperCamelCase = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
# 'text_encoder/model.fp16.safetensors',
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
UpperCamelCase = "fp16"
self.assertFalse(is_safetensors_compatible(UpperCAmelCase_ , variant=UpperCAmelCase_ ) )
| 554 |
"""simple docstring"""
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
@add_end_docstrings(_lowerCAmelCase )
class __a ( _lowerCAmelCase ):
def __init__( self : str , **UpperCAmelCase_ : List[str] )-> Tuple:
"""simple docstring"""
super().__init__(**UpperCAmelCase_ )
requires_backends(self , "vision" )
requires_backends(self , "torch" )
if self.framework != "pt":
raise ValueError(f"The {self.__class__} is only available in PyTorch." )
self.check_model_type(UpperCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Tuple , **UpperCAmelCase_ : int )-> List[str]:
"""simple docstring"""
UpperCamelCase = {}
UpperCamelCase = {}
UpperCamelCase = {}
# preprocess args
if "points_per_batch" in kwargs:
UpperCamelCase = kwargs["points_per_batch"]
if "points_per_crop" in kwargs:
UpperCamelCase = kwargs["points_per_crop"]
if "crops_n_layers" in kwargs:
UpperCamelCase = kwargs["crops_n_layers"]
if "crop_overlap_ratio" in kwargs:
UpperCamelCase = kwargs["crop_overlap_ratio"]
if "crop_n_points_downscale_factor" in kwargs:
UpperCamelCase = kwargs["crop_n_points_downscale_factor"]
# postprocess args
if "pred_iou_thresh" in kwargs:
UpperCamelCase = kwargs["pred_iou_thresh"]
if "stability_score_offset" in kwargs:
UpperCamelCase = kwargs["stability_score_offset"]
if "mask_threshold" in kwargs:
UpperCamelCase = kwargs["mask_threshold"]
if "stability_score_thresh" in kwargs:
UpperCamelCase = kwargs["stability_score_thresh"]
if "crops_nms_thresh" in kwargs:
UpperCamelCase = kwargs["crops_nms_thresh"]
if "output_rle_mask" in kwargs:
UpperCamelCase = kwargs["output_rle_mask"]
if "output_bboxes_mask" in kwargs:
UpperCamelCase = kwargs["output_bboxes_mask"]
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__( self : Optional[int] , UpperCAmelCase_ : Optional[int] , *UpperCAmelCase_ : Tuple , UpperCAmelCase_ : int=None , UpperCAmelCase_ : Tuple=None , **UpperCAmelCase_ : str )-> Any:
"""simple docstring"""
return super().__call__(UpperCAmelCase_ , *UpperCAmelCase_ , num_workers=UpperCAmelCase_ , batch_size=UpperCAmelCase_ , **UpperCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : str=64 , UpperCAmelCase_ : int = 0 , UpperCAmelCase_ : float = 512 / 1_500 , UpperCAmelCase_ : Optional[int] = 32 , UpperCAmelCase_ : Optional[int] = 1 , )-> List[Any]:
"""simple docstring"""
UpperCamelCase = load_image(UpperCAmelCase_ )
UpperCamelCase = self.image_processor.size["longest_edge"]
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = self.image_processor.generate_crop_boxes(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
UpperCamelCase = self.image_processor(images=UpperCAmelCase_ , return_tensors="pt" )
with self.device_placement():
if self.framework == "pt":
UpperCamelCase = self.get_inference_context()
with inference_context():
UpperCamelCase = self._ensure_tensor_on_device(UpperCAmelCase_ , device=self.device )
UpperCamelCase = self.model.get_image_embeddings(model_inputs.pop("pixel_values" ) )
UpperCamelCase = image_embeddings
UpperCamelCase = grid_points.shape[1]
UpperCamelCase = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
"Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. "
"To return all points at once, set points_per_batch to None" )
for i in range(0 , UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCamelCase = grid_points[:, i : i + points_per_batch, :, :]
UpperCamelCase = input_labels[:, i : i + points_per_batch]
UpperCamelCase = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : int=0.88 , UpperCAmelCase_ : Optional[int]=0.95 , UpperCAmelCase_ : Dict=0 , UpperCAmelCase_ : int=1 , )-> Any:
"""simple docstring"""
UpperCamelCase = model_inputs.pop("input_boxes" )
UpperCamelCase = model_inputs.pop("is_last" )
UpperCamelCase = model_inputs.pop("original_sizes" ).tolist()
UpperCamelCase = model_inputs.pop("reshaped_input_sizes" ).tolist()
UpperCamelCase = self.model(**UpperCAmelCase_ )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
UpperCamelCase = model_outputs["pred_masks"]
UpperCamelCase = self.image_processor.post_process_masks(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , binarize=UpperCAmelCase_ )
UpperCamelCase = model_outputs["iou_scores"]
UpperCamelCase , UpperCamelCase , UpperCamelCase = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def _SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[str]=False , UpperCAmelCase_ : str=False , UpperCAmelCase_ : Optional[int]=0.7 , )-> Dict:
"""simple docstring"""
UpperCamelCase = []
UpperCamelCase = []
UpperCamelCase = []
for model_output in model_outputs:
all_scores.append(model_output.pop("iou_scores" ) )
all_masks.extend(model_output.pop("masks" ) )
all_boxes.append(model_output.pop("boxes" ) )
UpperCamelCase = torch.cat(UpperCAmelCase_ )
UpperCamelCase = torch.cat(UpperCAmelCase_ )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = self.image_processor.post_process_for_mask_generation(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
UpperCamelCase = defaultdict(UpperCAmelCase_ )
for output in model_outputs:
for k, v in output.items():
extra[k].append(UpperCAmelCase_ )
UpperCamelCase = {}
if output_rle_mask:
UpperCamelCase = rle_mask
if output_bboxes_mask:
UpperCamelCase = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 554 | 1 |
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
UpperCAmelCase_ = logging.get_logger(__name__)
@add_end_docstrings(
snake_case_ ,R'''
top_k (`int`, defaults to 5):
The number of predictions to return.
targets (`str` or `List[str]`, *optional*):
When passed, the model will limit the scores to the passed targets instead of looking up in the whole
vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting
token will be used (with a warning, and that might be slower).
''' ,)
class UpperCAmelCase ( snake_case_ ):
def __lowerCAmelCase ( self , _lowerCAmelCase ):
if self.framework == "tf":
_lowerCAmelCase = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
_lowerCAmelCase = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_lowerCAmelCase )
else:
raise ValueError('''Unsupported framework''' )
return masked_index
def __lowerCAmelCase ( self , _lowerCAmelCase ):
_lowerCAmelCase = self.get_masked_index(_lowerCAmelCase )
_lowerCAmelCase = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
'''fill-mask''' , self.model.base_model_prefix , F'''No mask_token ({self.tokenizer.mask_token}) found on the input''' , )
def __lowerCAmelCase ( self , _lowerCAmelCase ):
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input['''input_ids'''][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(_lowerCAmelCase )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase=None , **_lowerCAmelCase ):
if return_tensors is None:
_lowerCAmelCase = self.framework
_lowerCAmelCase = self.tokenizer(_lowerCAmelCase , return_tensors=_lowerCAmelCase )
self.ensure_exactly_one_mask_token(_lowerCAmelCase )
return model_inputs
def __lowerCAmelCase ( self , _lowerCAmelCase ):
_lowerCAmelCase = self.model(**_lowerCAmelCase )
_lowerCAmelCase = model_inputs['''input_ids''']
return model_outputs
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase=5 , _lowerCAmelCase=None ):
# Cap top_k if there are targets
if target_ids is not None and target_ids.shape[0] < top_k:
_lowerCAmelCase = target_ids.shape[0]
_lowerCAmelCase = model_outputs['''input_ids'''][0]
_lowerCAmelCase = model_outputs['''logits''']
if self.framework == "tf":
_lowerCAmelCase = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
_lowerCAmelCase = outputs.numpy()
_lowerCAmelCase = outputs[0, masked_index, :]
_lowerCAmelCase = stable_softmax(_lowerCAmelCase , axis=-1 )
if target_ids is not None:
_lowerCAmelCase = tf.gather_nd(tf.squeeze(_lowerCAmelCase , 0 ) , target_ids.reshape(-1 , 1 ) )
_lowerCAmelCase = tf.expand_dims(_lowerCAmelCase , 0 )
_lowerCAmelCase = tf.math.top_k(_lowerCAmelCase , k=_lowerCAmelCase )
_lowerCAmelCase , _lowerCAmelCase = topk.values.numpy(), topk.indices.numpy()
else:
_lowerCAmelCase = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_lowerCAmelCase ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
_lowerCAmelCase = outputs[0, masked_index, :]
_lowerCAmelCase = logits.softmax(dim=-1 )
if target_ids is not None:
_lowerCAmelCase = probs[..., target_ids]
_lowerCAmelCase , _lowerCAmelCase = probs.topk(_lowerCAmelCase )
_lowerCAmelCase = []
_lowerCAmelCase = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
_lowerCAmelCase = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
_lowerCAmelCase = input_ids.numpy().copy()
if target_ids is not None:
_lowerCAmelCase = target_ids[p].tolist()
_lowerCAmelCase = p
# Filter padding out:
_lowerCAmelCase = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
_lowerCAmelCase = self.tokenizer.decode(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase )
_lowerCAmelCase = {'''score''': v, '''token''': p, '''token_str''': self.tokenizer.decode([p] ), '''sequence''': sequence}
row.append(_lowerCAmelCase )
result.append(_lowerCAmelCase )
if single_mask:
return result[0]
return result
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase=None ):
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = [targets]
try:
_lowerCAmelCase = self.tokenizer.get_vocab()
except Exception:
_lowerCAmelCase = {}
_lowerCAmelCase = []
for target in targets:
_lowerCAmelCase = vocab.get(_lowerCAmelCase , _lowerCAmelCase )
if id_ is None:
_lowerCAmelCase = self.tokenizer(
_lowerCAmelCase , add_special_tokens=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , return_token_type_ids=_lowerCAmelCase , max_length=1 , truncation=_lowerCAmelCase , )['''input_ids''']
if len(_lowerCAmelCase ) == 0:
logger.warning(
F'''The specified target token `{target}` does not exist in the model vocabulary. '''
'''We cannot replace it with anything meaningful, ignoring it''' )
continue
_lowerCAmelCase = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
F'''The specified target token `{target}` does not exist in the model vocabulary. '''
F'''Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.''' )
target_ids.append(id_ )
_lowerCAmelCase = list(set(_lowerCAmelCase ) )
if len(_lowerCAmelCase ) == 0:
raise ValueError('''At least one target must be provided when passed.''' )
_lowerCAmelCase = np.array(_lowerCAmelCase )
return target_ids
def __lowerCAmelCase ( self , _lowerCAmelCase=None , _lowerCAmelCase=None ):
_lowerCAmelCase = {}
if targets is not None:
_lowerCAmelCase = self.get_target_ids(_lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase = target_ids
if top_k is not None:
_lowerCAmelCase = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
'''fill-mask''' , self.model.base_model_prefix , '''The tokenizer does not define a `mask_token`.''' )
return {}, {}, postprocess_params
def __call__( self , _lowerCAmelCase , *_lowerCAmelCase , **_lowerCAmelCase ):
_lowerCAmelCase = super().__call__(_lowerCAmelCase , **_lowerCAmelCase )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ) and len(_lowerCAmelCase ) == 1:
return outputs[0]
return outputs | 664 |
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
UpperCAmelCase_ = "\n@inproceedings{xu-etal-2016-optimizing,\n title = {Optimizing Statistical Machine Translation for Text Simplification},\n authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},\n journal = {Transactions of the Association for Computational Linguistics},\n volume = {4},\n year={2016},\n url = {https://www.aclweb.org/anthology/Q16-1029},\n pages = {401--415\n},\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n"
UpperCAmelCase_ = "\\nWIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU\nIt can be used to evaluate the quality of machine-generated texts.\n"
UpperCAmelCase_ = "\nCalculates sari score (between 0 and 100) given a list of source and predicted\nsentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.\nArgs:\n sources: list of source sentences where each sentence should be a string.\n predictions: list of predicted sentences where each sentence should be a string.\n references: list of lists of reference sentences where each sentence should be a string.\nReturns:\n sari: sari score\n sacrebleu: sacrebleu score\n exact: exact score\n\nExamples:\n >>> sources=[\"About 95 species are currently accepted .\"]\n >>> predictions=[\"About 95 you now get in .\"]\n >>> references=[[\"About 95 species are currently known .\"]]\n >>> wiki_split = datasets.load_metric(\"wiki_split\")\n >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)\n >>> print(results)\n {'sari': 21.805555555555557, 'sacrebleu': 14.535768424205482, 'exact': 0.0}\n"
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[Any] )->Optional[Any]:
def remove_articles(_SCREAMING_SNAKE_CASE : List[str] ):
_lowerCAmelCase = re.compile(r'''\b(a|an|the)\b''' , re.UNICODE )
return re.sub(_SCREAMING_SNAKE_CASE , ''' ''' , _SCREAMING_SNAKE_CASE )
def white_space_fix(_SCREAMING_SNAKE_CASE : List[Any] ):
return " ".join(text.split() )
def remove_punc(_SCREAMING_SNAKE_CASE : Optional[Any] ):
_lowerCAmelCase = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_SCREAMING_SNAKE_CASE : Optional[int] ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_SCREAMING_SNAKE_CASE ) ) ) )
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : List[Any] )->Any:
return int(normalize_answer(_SCREAMING_SNAKE_CASE ) == normalize_answer(_SCREAMING_SNAKE_CASE ) )
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : str )->int:
_lowerCAmelCase = [any(compute_exact(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for ref in refs ) for pred, refs in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )]
return (sum(_SCREAMING_SNAKE_CASE ) / len(_SCREAMING_SNAKE_CASE )) * 1_0_0
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : List[str] )->Optional[int]:
_lowerCAmelCase = [rgram for rgrams in rgramslist for rgram in rgrams]
_lowerCAmelCase = Counter(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = Counter(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = Counter()
for sgram, scount in sgramcounter.items():
_lowerCAmelCase = scount * numref
_lowerCAmelCase = Counter(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = Counter()
for cgram, ccount in cgramcounter.items():
_lowerCAmelCase = ccount * numref
# KEEP
_lowerCAmelCase = sgramcounter_rep & cgramcounter_rep
_lowerCAmelCase = keepgramcounter_rep & rgramcounter
_lowerCAmelCase = sgramcounter_rep & rgramcounter
_lowerCAmelCase = 0
_lowerCAmelCase = 0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_lowerCAmelCase = 1
_lowerCAmelCase = 1
if len(_SCREAMING_SNAKE_CASE ) > 0:
_lowerCAmelCase = keeptmpscorea / len(_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
_lowerCAmelCase = keeptmpscorea / sum(keepgramcounterall_rep.values() )
_lowerCAmelCase = 0
if keepscore_precision > 0 or keepscore_recall > 0:
_lowerCAmelCase = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
_lowerCAmelCase = sgramcounter_rep - cgramcounter_rep
_lowerCAmelCase = delgramcounter_rep - rgramcounter
_lowerCAmelCase = sgramcounter_rep - rgramcounter
_lowerCAmelCase = 0
_lowerCAmelCase = 0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_lowerCAmelCase = 1
if len(_SCREAMING_SNAKE_CASE ) > 0:
_lowerCAmelCase = deltmpscorea / len(_SCREAMING_SNAKE_CASE )
# ADDITION
_lowerCAmelCase = set(_SCREAMING_SNAKE_CASE ) - set(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = set(_SCREAMING_SNAKE_CASE ) & set(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = set(_SCREAMING_SNAKE_CASE ) - set(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = 0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_lowerCAmelCase = 1
_lowerCAmelCase = 1
if len(_SCREAMING_SNAKE_CASE ) > 0:
_lowerCAmelCase = addtmpscore / len(_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 0:
_lowerCAmelCase = addtmpscore / len(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = 0
if addscore_precision > 0 or addscore_recall > 0:
_lowerCAmelCase = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : str )->List[Any]:
_lowerCAmelCase = len(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = ssent.split(''' ''' )
_lowerCAmelCase = csent.split(''' ''' )
_lowerCAmelCase = []
_lowerCAmelCase = []
_lowerCAmelCase = []
_lowerCAmelCase = []
_lowerCAmelCase = []
_lowerCAmelCase = []
_lowerCAmelCase = []
_lowerCAmelCase = []
_lowerCAmelCase = []
_lowerCAmelCase = []
for rsent in rsents:
_lowerCAmelCase = rsent.split(''' ''' )
_lowerCAmelCase = []
_lowerCAmelCase = []
_lowerCAmelCase = []
ragramslist.append(_SCREAMING_SNAKE_CASE )
for i in range(0 , len(_SCREAMING_SNAKE_CASE ) - 1 ):
if i < len(_SCREAMING_SNAKE_CASE ) - 1:
_lowerCAmelCase = ragrams[i] + ''' ''' + ragrams[i + 1]
ragrams.append(_SCREAMING_SNAKE_CASE )
if i < len(_SCREAMING_SNAKE_CASE ) - 2:
_lowerCAmelCase = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2]
ragrams.append(_SCREAMING_SNAKE_CASE )
if i < len(_SCREAMING_SNAKE_CASE ) - 3:
_lowerCAmelCase = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2] + ''' ''' + ragrams[i + 3]
ragrams.append(_SCREAMING_SNAKE_CASE )
ragramslist.append(_SCREAMING_SNAKE_CASE )
ragramslist.append(_SCREAMING_SNAKE_CASE )
ragramslist.append(_SCREAMING_SNAKE_CASE )
for i in range(0 , len(_SCREAMING_SNAKE_CASE ) - 1 ):
if i < len(_SCREAMING_SNAKE_CASE ) - 1:
_lowerCAmelCase = sagrams[i] + ''' ''' + sagrams[i + 1]
sagrams.append(_SCREAMING_SNAKE_CASE )
if i < len(_SCREAMING_SNAKE_CASE ) - 2:
_lowerCAmelCase = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2]
sagrams.append(_SCREAMING_SNAKE_CASE )
if i < len(_SCREAMING_SNAKE_CASE ) - 3:
_lowerCAmelCase = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2] + ''' ''' + sagrams[i + 3]
sagrams.append(_SCREAMING_SNAKE_CASE )
for i in range(0 , len(_SCREAMING_SNAKE_CASE ) - 1 ):
if i < len(_SCREAMING_SNAKE_CASE ) - 1:
_lowerCAmelCase = cagrams[i] + ''' ''' + cagrams[i + 1]
cagrams.append(_SCREAMING_SNAKE_CASE )
if i < len(_SCREAMING_SNAKE_CASE ) - 2:
_lowerCAmelCase = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2]
cagrams.append(_SCREAMING_SNAKE_CASE )
if i < len(_SCREAMING_SNAKE_CASE ) - 3:
_lowerCAmelCase = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2] + ''' ''' + cagrams[i + 3]
cagrams.append(_SCREAMING_SNAKE_CASE )
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) = SARIngram(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) = SARIngram(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) = SARIngram(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) = SARIngram(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_lowerCAmelCase = sum([keepascore, keepascore, keepascore, keepascore] ) / 4
_lowerCAmelCase = sum([delascore, delascore, delascore, delascore] ) / 4
_lowerCAmelCase = sum([addascore, addascore, addascore, addascore] ) / 4
_lowerCAmelCase = (avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : bool = True , _SCREAMING_SNAKE_CASE : str = "13a" , _SCREAMING_SNAKE_CASE : bool = True )->int:
# Normalization is requried for the ASSET dataset (one of the primary
# datasets in sentence simplification) to allow using space
# to split the sentence. Even though Wiki-Auto and TURK datasets,
# do not require normalization, we do it for consistency.
# Code adapted from the EASSE library [1] written by the authors of the ASSET dataset.
# [1] https://github.com/feralvam/easse/blob/580bba7e1378fc8289c663f864e0487188fe8067/easse/utils/preprocessing.py#L7
if lowercase:
_lowerCAmelCase = sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__ ).major >= 2:
_lowerCAmelCase = sacrebleu.metrics.bleu._get_tokenizer(_SCREAMING_SNAKE_CASE )()(_SCREAMING_SNAKE_CASE )
else:
_lowerCAmelCase = sacrebleu.TOKENIZERS[tokenizer]()(_SCREAMING_SNAKE_CASE )
elif tokenizer == "moses":
_lowerCAmelCase = sacremoses.MosesTokenizer().tokenize(_SCREAMING_SNAKE_CASE , return_str=_SCREAMING_SNAKE_CASE , escape=_SCREAMING_SNAKE_CASE )
elif tokenizer == "penn":
_lowerCAmelCase = sacremoses.MosesTokenizer().penn_tokenize(_SCREAMING_SNAKE_CASE , return_str=_SCREAMING_SNAKE_CASE )
else:
_lowerCAmelCase = sentence
if not return_str:
_lowerCAmelCase = normalized_sent.split()
return normalized_sent
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : List[str] )->str:
if not (len(_SCREAMING_SNAKE_CASE ) == len(_SCREAMING_SNAKE_CASE ) == len(_SCREAMING_SNAKE_CASE )):
raise ValueError('''Sources length must match predictions and references lengths.''' )
_lowerCAmelCase = 0
for src, pred, refs in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
sari_score += SARIsent(normalize(_SCREAMING_SNAKE_CASE ) , normalize(_SCREAMING_SNAKE_CASE ) , [normalize(_SCREAMING_SNAKE_CASE ) for sent in refs] )
_lowerCAmelCase = sari_score / len(_SCREAMING_SNAKE_CASE )
return 1_0_0 * sari_score
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Optional[Any]="exp" , _SCREAMING_SNAKE_CASE : Optional[int]=None , _SCREAMING_SNAKE_CASE : Optional[int]=False , _SCREAMING_SNAKE_CASE : str=False , _SCREAMING_SNAKE_CASE : int=False , )->str:
_lowerCAmelCase = len(references[0] )
if any(len(_SCREAMING_SNAKE_CASE ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
_lowerCAmelCase = [[refs[i] for refs in references] for i in range(_SCREAMING_SNAKE_CASE )]
_lowerCAmelCase = sacrebleu.corpus_bleu(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , smooth_method=_SCREAMING_SNAKE_CASE , smooth_value=_SCREAMING_SNAKE_CASE , force=_SCREAMING_SNAKE_CASE , lowercase=_SCREAMING_SNAKE_CASE , use_effective_order=_SCREAMING_SNAKE_CASE , )
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class UpperCAmelCase ( datasets.Metric ):
def __lowerCAmelCase ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=[
'''https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py''',
'''https://github.com/cocoxu/simplification/blob/master/SARI.py''',
'''https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py''',
'''https://github.com/mjpost/sacreBLEU''',
] , reference_urls=[
'''https://www.aclweb.org/anthology/Q16-1029.pdf''',
'''https://github.com/mjpost/sacreBLEU''',
'''https://en.wikipedia.org/wiki/BLEU''',
'''https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213''',
] , )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = {}
result.update({'''sari''': compute_sari(sources=_lowerCAmelCase , predictions=_lowerCAmelCase , references=_lowerCAmelCase )} )
result.update({'''sacrebleu''': compute_sacrebleu(predictions=_lowerCAmelCase , references=_lowerCAmelCase )} )
result.update({'''exact''': compute_em(predictions=_lowerCAmelCase , references=_lowerCAmelCase )} )
return result | 664 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {
'facebook/vit-mae-base': 'https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json',
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class UpperCAmelCase_ ( __lowerCAmelCase ):
"""simple docstring"""
UpperCamelCase_ = '''vit_mae'''
def __init__( self : Union[str, Any] , UpperCAmelCase : Optional[int]=768 , UpperCAmelCase : Tuple=12 , UpperCAmelCase : Optional[Any]=12 , UpperCAmelCase : Optional[int]=3072 , UpperCAmelCase : Any="gelu" , UpperCAmelCase : Optional[Any]=0.0 , UpperCAmelCase : Optional[int]=0.0 , UpperCAmelCase : Dict=0.0_2 , UpperCAmelCase : List[Any]=1e-12 , UpperCAmelCase : Optional[Any]=224 , UpperCAmelCase : int=16 , UpperCAmelCase : str=3 , UpperCAmelCase : Tuple=True , UpperCAmelCase : int=16 , UpperCAmelCase : str=512 , UpperCAmelCase : int=8 , UpperCAmelCase : List[Any]=2048 , UpperCAmelCase : Optional[Any]=0.7_5 , UpperCAmelCase : List[str]=False , **UpperCAmelCase : Union[str, Any] , ) -> Dict:
'''simple docstring'''
super().__init__(**_UpperCAmelCase )
lowercase : List[str] =hidden_size
lowercase : Union[str, Any] =num_hidden_layers
lowercase : Any =num_attention_heads
lowercase : Tuple =intermediate_size
lowercase : str =hidden_act
lowercase : Optional[int] =hidden_dropout_prob
lowercase : Optional[int] =attention_probs_dropout_prob
lowercase : Optional[Any] =initializer_range
lowercase : str =layer_norm_eps
lowercase : List[Any] =image_size
lowercase : str =patch_size
lowercase : int =num_channels
lowercase : List[Any] =qkv_bias
lowercase : int =decoder_num_attention_heads
lowercase : str =decoder_hidden_size
lowercase : Tuple =decoder_num_hidden_layers
lowercase : str =decoder_intermediate_size
lowercase : Dict =mask_ratio
lowercase : Any =norm_pix_loss
| 94 |
"""simple docstring"""
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = (KDPMaDiscreteScheduler,)
UpperCAmelCase : Any = 10
def lowerCAmelCase_ ( self : Dict , **_UpperCAmelCase : Optional[Any] ):
_A = {
'num_train_timesteps': 1_100,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
}
config.update(**_UpperCAmelCase )
return config
def lowerCAmelCase_ ( self : Any ):
for timesteps in [10, 50, 100, 1_000]:
self.check_over_configs(num_train_timesteps=_UpperCAmelCase )
def lowerCAmelCase_ ( self : Dict ):
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=_UpperCAmelCase , beta_end=_UpperCAmelCase )
def lowerCAmelCase_ ( self : Tuple ):
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_UpperCAmelCase )
def lowerCAmelCase_ ( self : Optional[int] ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_UpperCAmelCase )
def lowerCAmelCase_ ( self : Optional[int] ):
_A = self.scheduler_classes[0]
_A = self.get_scheduler_config(prediction_type='v_prediction' )
_A = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(self.num_inference_steps )
_A = self.dummy_model()
_A = self.dummy_sample_deter * scheduler.init_noise_sigma
_A = sample.to(_UpperCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
_A = scheduler.scale_model_input(_UpperCAmelCase , _UpperCAmelCase )
_A = model(_UpperCAmelCase , _UpperCAmelCase )
_A = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
_A = output.prev_sample
_A = torch.sum(torch.abs(_UpperCAmelCase ) )
_A = torch.mean(torch.abs(_UpperCAmelCase ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6_9_3_4E-0_7 ) < 1E-2
assert abs(result_mean.item() - 6.1_1_1_2E-1_0 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 4.6_9_3_4_2_8_6_5_0_1_7_0_9_7_2E-0_7 ) < 1E-2
assert abs(result_mean.item() - 0.0002 ) < 1E-3
def lowerCAmelCase_ ( self : Optional[Any] ):
if torch_device == "mps":
return
_A = self.scheduler_classes[0]
_A = self.get_scheduler_config()
_A = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(self.num_inference_steps )
_A = self.dummy_model()
_A = self.dummy_sample_deter * scheduler.init_noise_sigma
_A = sample.to(_UpperCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
_A = scheduler.scale_model_input(_UpperCAmelCase , _UpperCAmelCase )
_A = model(_UpperCAmelCase , _UpperCAmelCase )
_A = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
_A = output.prev_sample
_A = torch.sum(torch.abs(_UpperCAmelCase ) )
_A = torch.mean(torch.abs(_UpperCAmelCase ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.4125 ) < 1E-2
assert abs(result_mean.item() - 0.0266 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.4125 ) < 1E-2
assert abs(result_mean.item() - 0.0266 ) < 1E-3
def lowerCAmelCase_ ( self : Any ):
if torch_device == "mps":
return
_A = self.scheduler_classes[0]
_A = self.get_scheduler_config()
_A = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(self.num_inference_steps , device=_UpperCAmelCase )
_A = self.dummy_model()
_A = self.dummy_sample_deter.to(_UpperCAmelCase ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
_A = scheduler.scale_model_input(_UpperCAmelCase , _UpperCAmelCase )
_A = model(_UpperCAmelCase , _UpperCAmelCase )
_A = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
_A = output.prev_sample
_A = torch.sum(torch.abs(_UpperCAmelCase ) )
_A = torch.mean(torch.abs(_UpperCAmelCase ) )
if str(_UpperCAmelCase ).startswith('cpu' ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.4125 ) < 1E-2
assert abs(result_mean.item() - 0.0266 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.4125 ) < 1E-2
assert abs(result_mean.item() - 0.0266 ) < 1E-3
| 7 | 0 |
import argparse
import torch
from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
from transformers.utils import logging
logging.set_verbosity_info()
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Dict:
"""simple docstring"""
lowerCamelCase__ : Optional[int] = FunnelConfig.from_json_file(__snake_case )
print(f"Building PyTorch model from configuration: {config}" )
lowerCamelCase__ : Tuple = FunnelBaseModel(__snake_case ) if base_model else FunnelModel(__snake_case )
# Load weights from tf checkpoint
load_tf_weights_in_funnel(__snake_case , __snake_case , __snake_case )
# Save pytorch-model
print(f"Save PyTorch model to {pytorch_dump_path}" )
torch.save(model.state_dict() , __snake_case )
if __name__ == "__main__":
_A : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--base_model', action='store_true', help='Whether you want just the base model (no decoder) or not.'
)
_A : Any = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model
)
| 713 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
_A : Union[str, Any] = logging.get_logger(__name__)
_A : int = {
'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json',
'allenai/longformer-large-4096': 'https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json',
'allenai/longformer-large-4096-finetuned-triviaqa': (
'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json'
),
'allenai/longformer-base-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json'
),
'allenai/longformer-large-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json'
),
}
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
_UpperCAmelCase : List[str] = "longformer"
def __init__( self : Union[str, Any] , A : Union[List[int], int] = 5_1_2 , A : int = 2 , A : int = 1 , A : int = 0 , A : int = 2 , A : int = 3_0_5_2_2 , A : int = 7_6_8 , A : int = 1_2 , A : int = 1_2 , A : int = 3_0_7_2 , A : str = "gelu" , A : float = 0.1 , A : float = 0.1 , A : int = 5_1_2 , A : int = 2 , A : float = 0.02 , A : float = 1e-12 , A : bool = False , **A : Optional[Any] , ) ->Dict:
super().__init__(pad_token_id=A , **A )
lowerCamelCase__ : Optional[Any] = attention_window
lowerCamelCase__ : List[Any] = sep_token_id
lowerCamelCase__ : int = bos_token_id
lowerCamelCase__ : Union[str, Any] = eos_token_id
lowerCamelCase__ : int = vocab_size
lowerCamelCase__ : List[str] = hidden_size
lowerCamelCase__ : List[str] = num_hidden_layers
lowerCamelCase__ : str = num_attention_heads
lowerCamelCase__ : int = hidden_act
lowerCamelCase__ : Optional[int] = intermediate_size
lowerCamelCase__ : str = hidden_dropout_prob
lowerCamelCase__ : Tuple = attention_probs_dropout_prob
lowerCamelCase__ : Union[str, Any] = max_position_embeddings
lowerCamelCase__ : List[Any] = type_vocab_size
lowerCamelCase__ : List[str] = initializer_range
lowerCamelCase__ : str = layer_norm_eps
lowerCamelCase__ : List[Any] = onnx_export
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
def __init__( self : Tuple , A : "PretrainedConfig" , A : str = "default" , A : "List[PatchingSpec]" = None ) ->Any:
super().__init__(A , A , A )
lowerCamelCase__ : Any = True
@property
def __lowerCamelCase ( self : int ) ->Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
lowerCamelCase__ : int = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowerCamelCase__ : Tuple = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''global_attention_mask''', dynamic_axis),
] )
@property
def __lowerCamelCase ( self : str ) ->Mapping[str, Mapping[int, str]]:
lowerCamelCase__ : Dict = super().outputs
if self.task == "default":
lowerCamelCase__ : str = {0: '''batch'''}
return outputs
@property
def __lowerCamelCase ( self : Optional[Any] ) ->float:
return 1e-4
@property
def __lowerCamelCase ( self : int ) ->int:
# needs to be >= 14 to support tril operator
return max(super().default_onnx_opset , 1_4 )
def __lowerCamelCase ( self : Union[str, Any] , A : "PreTrainedTokenizerBase" , A : int = -1 , A : int = -1 , A : bool = False , A : Optional[TensorType] = None , ) ->Mapping[str, Any]:
lowerCamelCase__ : Dict = super().generate_dummy_inputs(
preprocessor=A , batch_size=A , seq_length=A , is_pair=A , framework=A )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
lowerCamelCase__ : Any = torch.zeros_like(inputs['''input_ids'''] )
# make every second token global
lowerCamelCase__ : List[str] = 1
return inputs
| 130 | 0 |
'''simple docstring'''
from statistics import mean
import numpy as np
def UpperCAmelCase__ ( UpperCAmelCase_ : list , UpperCAmelCase_ : list , UpperCAmelCase_ : list , UpperCAmelCase_ : int ) -> list:
__lowerCamelCase : Tuple = 0
# Number of processes finished
__lowerCamelCase : Union[str, Any] = 0
# Displays the finished process.
# If it is 0, the performance is completed if it is 1, before the performance.
__lowerCamelCase : List[Any] = [0] * no_of_process
# List to include calculation results
__lowerCamelCase : str = [0] * no_of_process
# Sort by arrival time.
__lowerCamelCase : Tuple = [burst_time[i] for i in np.argsort(UpperCAmelCase_ )]
__lowerCamelCase : List[str] = [process_name[i] for i in np.argsort(UpperCAmelCase_ )]
arrival_time.sort()
while no_of_process > finished_process_count:
__lowerCamelCase : Tuple = 0
while finished_process[i] == 1:
i += 1
if current_time < arrival_time[i]:
__lowerCamelCase : Optional[Any] = arrival_time[i]
__lowerCamelCase : Any = 0
# Index showing the location of the process being performed
__lowerCamelCase : Tuple = 0
# Saves the current response ratio.
__lowerCamelCase : List[str] = 0
for i in range(0 , UpperCAmelCase_ ):
if finished_process[i] == 0 and arrival_time[i] <= current_time:
__lowerCamelCase : Any = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[
i
]
if response_ratio < temp:
__lowerCamelCase : Optional[int] = temp
__lowerCamelCase : List[Any] = i
# Calculate the turn around time
__lowerCamelCase : Optional[Any] = current_time + burst_time[loc] - arrival_time[loc]
current_time += burst_time[loc]
# Indicates that the process has been performed.
__lowerCamelCase : List[Any] = 1
# Increase finished_process_count by 1
finished_process_count += 1
return turn_around_time
def UpperCAmelCase__ ( UpperCAmelCase_ : list , UpperCAmelCase_ : list , UpperCAmelCase_ : list , UpperCAmelCase_ : int ) -> list:
__lowerCamelCase : Any = [0] * no_of_process
for i in range(0 , UpperCAmelCase_ ):
__lowerCamelCase : Dict = turn_around_time[i] - burst_time[i]
return waiting_time
if __name__ == "__main__":
A__ : Dict = 5
A__ : str = ["""A""", """B""", """C""", """D""", """E"""]
A__ : Optional[int] = [1, 2, 3, 4, 5]
A__ : Optional[int] = [1, 2, 3, 4, 5]
A__ : Optional[Any] = calculate_turn_around_time(
process_name, arrival_time, burst_time, no_of_process
)
A__ : Union[str, Any] = calculate_waiting_time(
process_name, turn_around_time, burst_time, no_of_process
)
print("""Process name \tArrival time \tBurst time \tTurn around time \tWaiting time""")
for i in range(0, no_of_process):
print(
f'''{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t'''
f'''{turn_around_time[i]}\t\t\t{waiting_time[i]}'''
)
print(f'''average waiting time : {mean(waiting_time):.5f}''')
print(f'''average turn around time : {mean(turn_around_time):.5f}''')
| 13 |
"""simple docstring"""
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
SCREAMING_SNAKE_CASE__ = [
"cross_validation.py",
"gradient_accumulation.py",
"local_sgd.py",
"multi_process_metrics.py",
"memory.py",
"automatic_gradient_accumulation.py",
"fsdp_with_peak_mem_tracking.py",
"deepspeed_with_config_support.py",
"megatron_lm_gpt_pretraining.py",
]
class lowercase ( unittest.TestCase ):
def _snake_case ( self , lowercase , lowercase , lowercase = None , lowercase = None ) -> Dict:
lowerCAmelCase = None
lowerCAmelCase = os.path.abspath(os.path.join("""examples""" , """by_feature""" ) )
lowerCAmelCase = os.path.abspath("""examples""" )
for item in os.listdir(lowercase ):
if item not in EXCLUDE_EXAMPLES:
lowerCAmelCase = os.path.join(lowercase , lowercase )
if os.path.isfile(lowercase ) and ".py" in item_path:
with self.subTest(
tested_script=lowercase , feature_script=lowercase , tested_section="""main()""" if parser_only else """training_function()""" , ):
lowerCAmelCase = compare_against_test(
os.path.join(lowercase , lowercase ) , lowercase , lowercase , lowercase )
lowerCAmelCase = """\n""".join(lowercase )
if special_strings is not None:
for string in special_strings:
lowerCAmelCase = diff.replace(lowercase , """""" )
self.assertEqual(lowercase , """""" )
def _snake_case ( self ) -> List[Any]:
self.one_complete_example("""complete_nlp_example.py""" , lowercase )
self.one_complete_example("""complete_nlp_example.py""" , lowercase )
def _snake_case ( self ) -> Any:
lowerCAmelCase = os.path.abspath(os.path.join("""examples""" , """cv_example.py""" ) )
lowerCAmelCase = [
""" """ * 16 + """{\n\n""",
""" """ * 20 + """\"accuracy\": eval_metric[\"accuracy\"],\n\n""",
""" """ * 20 + """\"f1\": eval_metric[\"f1\"],\n\n""",
""" """ * 20 + """\"train_loss\": total_loss.item() / len(train_dataloader),\n\n""",
""" """ * 20 + """\"epoch\": epoch,\n\n""",
""" """ * 16 + """},\n\n""",
""" """ * 16 + """step=epoch,\n""",
""" """ * 12,
""" """ * 8 + """for step, batch in enumerate(active_dataloader):\n""",
]
self.one_complete_example("""complete_cv_example.py""" , lowercase , lowercase , lowercase )
self.one_complete_example("""complete_cv_example.py""" , lowercase , lowercase , lowercase )
@mock.patch.dict(os.environ , {'TESTING_MOCKED_DATALOADERS': '1'} )
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = False
@classmethod
def _snake_case ( cls ) -> Optional[int]:
super().setUpClass()
lowerCAmelCase = tempfile.mkdtemp()
lowerCAmelCase = os.path.join(cls._tmpdir , """default_config.yml""" )
write_basic_config(save_location=cls.configPath )
lowerCAmelCase = ["""accelerate""", """launch""", """--config_file""", cls.configPath]
@classmethod
def _snake_case ( cls ) -> Optional[int]:
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def _snake_case ( self ) -> str:
lowerCAmelCase = f'\n examples/by_feature/checkpointing.py\n --checkpointing_steps epoch\n --output_dir {self.tmpdir}\n '.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , """epoch_0""" ) ) )
def _snake_case ( self ) -> Optional[int]:
lowerCAmelCase = f'\n examples/by_feature/checkpointing.py\n --checkpointing_steps 1\n --output_dir {self.tmpdir}\n '.split()
lowerCAmelCase = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , """step_2""" ) ) )
def _snake_case ( self ) -> List[str]:
lowerCAmelCase = f'\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , "epoch_0" )}\n '.split()
lowerCAmelCase = run_command(self._launch_args + testargs , return_stdout=lowercase )
self.assertNotIn("""epoch 0:""" , lowercase )
self.assertIn("""epoch 1:""" , lowercase )
def _snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase = f'\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , "step_2" )}\n '.split()
lowerCAmelCase = run_command(self._launch_args + testargs , return_stdout=lowercase )
if torch.cuda.is_available():
lowerCAmelCase = torch.cuda.device_count()
else:
lowerCAmelCase = 1
if num_processes > 1:
self.assertNotIn("""epoch 0:""" , lowercase )
self.assertIn("""epoch 1:""" , lowercase )
else:
self.assertIn("""epoch 0:""" , lowercase )
self.assertIn("""epoch 1:""" , lowercase )
@slow
def _snake_case ( self ) -> Tuple:
lowerCAmelCase = """
examples/by_feature/cross_validation.py
--num_folds 2
""".split()
with mock.patch.dict(os.environ , {"""TESTING_MOCKED_DATALOADERS""": """0"""} ):
lowerCAmelCase = run_command(self._launch_args + testargs , return_stdout=lowercase )
lowerCAmelCase = re.findall("""({.+})""" , lowercase )
lowerCAmelCase = [r for r in results if """accuracy""" in r][-1]
lowerCAmelCase = ast.literal_eval(lowercase )
self.assertGreaterEqual(results["""accuracy"""] , 0.75 )
def _snake_case ( self ) -> int:
lowerCAmelCase = ["""examples/by_feature/multi_process_metrics.py"""]
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def _snake_case ( self ) -> Any:
with tempfile.TemporaryDirectory() as tmpdir:
lowerCAmelCase = f'\n examples/by_feature/tracking.py\n --with_tracking\n --project_dir {tmpdir}\n '.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(lowercase , """tracking""" ) ) )
def _snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase = ["""examples/by_feature/gradient_accumulation.py"""]
run_command(self._launch_args + testargs )
def _snake_case ( self ) -> int:
lowerCAmelCase = ["""examples/by_feature/local_sgd.py"""]
run_command(self._launch_args + testargs )
| 532 | 0 |
"""simple docstring"""
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _UpperCamelCase ( _UpperCamelCase ):
"""simple docstring"""
snake_case_ = 'ClapFeatureExtractor'
snake_case_ = ('RobertaTokenizer', 'RobertaTokenizerFast')
def __init__( self : List[Any] , snake_case : List[Any] , snake_case : Any ) -> List[str]:
'''simple docstring'''
super().__init__(_UpperCAmelCase , _UpperCAmelCase )
def __call__( self : List[str] , snake_case : Dict=None , snake_case : List[str]=None , snake_case : Optional[Any]=None , **snake_case : Optional[Any] ) -> str:
'''simple docstring'''
__magic_name__ : int = kwargs.pop('''sampling_rate''' , _UpperCAmelCase )
if text is None and audios is None:
raise ValueError('''You have to specify either text or audios. Both cannot be none.''' )
if text is not None:
__magic_name__ : Union[str, Any] = self.tokenizer(_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )
if audios is not None:
__magic_name__ : Union[str, Any] = self.feature_extractor(
_UpperCAmelCase , sampling_rate=_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )
if text is not None and audios is not None:
__magic_name__ : int = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_UpperCAmelCase ) , tensor_type=_UpperCAmelCase )
def _UpperCAmelCase ( self : List[Any] , *snake_case : Optional[Any] , **snake_case : List[Any] ) -> int:
'''simple docstring'''
return self.tokenizer.batch_decode(*_UpperCAmelCase , **_UpperCAmelCase )
def _UpperCAmelCase ( self : int , *snake_case : List[str] , **snake_case : List[Any] ) -> int:
'''simple docstring'''
return self.tokenizer.decode(*_UpperCAmelCase , **_UpperCAmelCase )
@property
def _UpperCAmelCase ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
__magic_name__ : int = self.tokenizer.model_input_names
__magic_name__ : Dict = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 713 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
A = TypeVar("""T""")
A = TypeVar("""U""")
class _UpperCamelCase ( Generic[T, U] ):
"""simple docstring"""
def __init__( self : Any , snake_case : T | None , snake_case : U | None ) -> Tuple:
'''simple docstring'''
__magic_name__ : Optional[Any] = key
__magic_name__ : str = val
__magic_name__ : DoubleLinkedListNode[T, U] | None = None
__magic_name__ : DoubleLinkedListNode[T, U] | None = None
def __repr__( self : str ) -> str:
'''simple docstring'''
return (
f"""Node: key: {self.key}, val: {self.val}, """
f"""has next: {bool(self.next )}, has prev: {bool(self.prev )}"""
)
class _UpperCamelCase ( Generic[T, U] ):
"""simple docstring"""
def __init__( self : str ) -> None:
'''simple docstring'''
__magic_name__ : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(snake_case , snake_case )
__magic_name__ : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(snake_case , snake_case )
__magic_name__ , __magic_name__ : Tuple = self.rear, self.head
def __repr__( self : str ) -> str:
'''simple docstring'''
__magic_name__ : List[str] = ['''DoubleLinkedList''']
__magic_name__ : Optional[Any] = self.head
while node.next is not None:
rep.append(str(snake_case ) )
__magic_name__ : Any = node.next
rep.append(str(self.rear ) )
return ",\n ".join(snake_case )
def _UpperCAmelCase ( self : List[str] , snake_case : DoubleLinkedListNode[T, U] ) -> None:
'''simple docstring'''
__magic_name__ : Tuple = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
__magic_name__ : Dict = node
__magic_name__ : Optional[int] = previous
__magic_name__ : Tuple = node
__magic_name__ : Optional[int] = self.rear
def _UpperCAmelCase ( self : str , snake_case : DoubleLinkedListNode[T, U] ) -> DoubleLinkedListNode[T, U] | None:
'''simple docstring'''
if node.prev is None or node.next is None:
return None
__magic_name__ : str = node.next
__magic_name__ : Dict = node.prev
__magic_name__ : Any = None
__magic_name__ : Dict = None
return node
class _UpperCamelCase ( Generic[T, U] ):
"""simple docstring"""
snake_case_ = {}
def __init__( self : Dict , snake_case : int ) -> Optional[Any]:
'''simple docstring'''
__magic_name__ : DoubleLinkedList[T, U] = DoubleLinkedList()
__magic_name__ : str = capacity
__magic_name__ : Tuple = 0
__magic_name__ : Optional[Any] = 0
__magic_name__ : List[str] = 0
__magic_name__ : dict[T, DoubleLinkedListNode[T, U]] = {}
def __repr__( self : Tuple ) -> str:
'''simple docstring'''
return (
f"""CacheInfo(hits={self.hits}, misses={self.miss}, """
f"""capacity={self.capacity}, current size={self.num_keys})"""
)
def __contains__( self : List[str] , snake_case : T ) -> bool:
'''simple docstring'''
return key in self.cache
def _UpperCAmelCase ( self : Optional[int] , snake_case : T ) -> U | None:
'''simple docstring'''
if key in self.cache:
self.hits += 1
__magic_name__ : DoubleLinkedListNode[T, U] = self.cache[key]
__magic_name__ : Optional[Any] = self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(snake_case )
return node.val
self.miss += 1
return None
def _UpperCAmelCase ( self : str , snake_case : T , snake_case : U ) -> None:
'''simple docstring'''
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
__magic_name__ : Optional[Any] = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(snake_case ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
__magic_name__ : List[str] = DoubleLinkedListNode(snake_case , snake_case )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
__magic_name__ : Dict = self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
__magic_name__ : Any = value
self.list.add(snake_case )
@classmethod
def _UpperCAmelCase ( cls : Tuple , snake_case : int = 128 ) -> Callable[[Callable[[T], U]], Callable[..., U]]:
'''simple docstring'''
def cache_decorator_inner(snake_case : Callable[[T], U] ) -> Callable[..., U]:
def cache_decorator_wrapper(*snake_case : T ) -> U:
if func not in cls.decorator_function_to_instance_map:
__magic_name__ : Any = LRUCache(snake_case )
__magic_name__ : Optional[Any] = cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
__magic_name__ : Any = func(*snake_case )
cls.decorator_function_to_instance_map[func].put(args[0] , snake_case )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(snake_case , '''cache_info''' , snake_case ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 147 | 0 |
'''simple docstring'''
def _lowerCAmelCase ( lowerCamelCase_ : float , lowerCamelCase_ : float , lowerCamelCase_ : int ):
if principal <= 0:
raise Exception('''Principal borrowed must be > 0''' )
if rate_per_annum < 0:
raise Exception('''Rate of interest must be >= 0''' )
if years_to_repay <= 0 or not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise Exception('''Years to repay must be an integer > 0''' )
# Yearly rate is divided by 12 to get monthly rate
__lowercase = rate_per_annum / 1_2
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
__lowercase = years_to_repay * 1_2
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 502 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/config.json''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/config.json''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json'''
),
'''distilbert-base-german-cased''': '''https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json''',
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json'''
),
'''distilbert-base-uncased-finetuned-sst-2-english''': (
'''https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json'''
),
}
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
a : int = "distilbert"
a : Union[str, Any] = {
"hidden_size": "dim",
"num_attention_heads": "n_heads",
"num_hidden_layers": "n_layers",
}
def __init__(self ,_lowerCamelCase=30522 ,_lowerCamelCase=512 ,_lowerCamelCase=False ,_lowerCamelCase=6 ,_lowerCamelCase=12 ,_lowerCamelCase=768 ,_lowerCamelCase=4 * 768 ,_lowerCamelCase=0.1 ,_lowerCamelCase=0.1 ,_lowerCamelCase="gelu" ,_lowerCamelCase=0.0_2 ,_lowerCamelCase=0.1 ,_lowerCamelCase=0.2 ,_lowerCamelCase=0 ,**_lowerCamelCase ,) -> Tuple:
'''simple docstring'''
__lowercase = vocab_size
__lowercase = max_position_embeddings
__lowercase = sinusoidal_pos_embds
__lowercase = n_layers
__lowercase = n_heads
__lowercase = dim
__lowercase = hidden_dim
__lowercase = dropout
__lowercase = attention_dropout
__lowercase = activation
__lowercase = initializer_range
__lowercase = qa_dropout
__lowercase = seq_classif_dropout
super().__init__(**_lowerCamelCase ,pad_token_id=_lowerCamelCase )
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
@property
def _UpperCAmelCase (self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
__lowercase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__lowercase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 502 | 1 |
'''simple docstring'''
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
'''The `inpainting.py` script is outdated. Please use directly `from diffusers import'''
''' StableDiffusionInpaintPipeline` instead.'''
)
| 709 | '''simple docstring'''
def UpperCamelCase_ ( snake_case_ : int ) -> list[int]:
'''simple docstring'''
if num <= 0:
raise ValueError("""Input must be a positive integer""" )
__lowerCAmelCase = [True] * (num + 1)
__lowerCAmelCase = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , snake_case_ ):
__lowerCAmelCase = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
_A : Tuple = int(input('''Enter a positive integer: ''').strip())
print(prime_sieve_eratosthenes(user_num))
| 330 | 0 |
'''simple docstring'''
import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
snake_case_ : List[Any] = threading.Lock()
snake_case_ : Optional[logging.Handler] = None
snake_case_ : Optional[int] = {
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL,
}
snake_case_ : List[str] = logging.WARNING
snake_case_ : Optional[Any] = True
def __snake_case ( ):
UpperCamelCase = os.getenv('''TRANSFORMERS_VERBOSITY''', _UpperCAmelCase)
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
f'Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, '
f'has to be one of: { ", ".join(log_levels.keys()) }')
return _default_log_level
def __snake_case ( ):
return __name__.split('''.''')[0]
def __snake_case ( ):
return logging.getLogger(_get_library_name())
def __snake_case ( ):
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
UpperCamelCase = logging.StreamHandler() # Set sys.stderr as stream.
UpperCamelCase = sys.stderr.flush
# Apply our default configuration to the library root logger.
UpperCamelCase = _get_library_root_logger()
library_root_logger.addHandler(_default_handler)
library_root_logger.setLevel(_get_default_logging_level())
UpperCamelCase = False
def __snake_case ( ):
global _default_handler
with _lock:
if not _default_handler:
return
UpperCamelCase = _get_library_root_logger()
library_root_logger.removeHandler(_default_handler)
library_root_logger.setLevel(logging.NOTSET)
UpperCamelCase = None
def __snake_case ( ):
return log_levels
def __snake_case ( _UpperCAmelCase : Optional[str] = None):
if name is None:
UpperCamelCase = _get_library_name()
_configure_library_root_logger()
return logging.getLogger(_UpperCAmelCase)
def __snake_case ( ):
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def __snake_case ( _UpperCAmelCase : int):
_configure_library_root_logger()
_get_library_root_logger().setLevel(_UpperCAmelCase)
def __snake_case ( ):
return set_verbosity(_UpperCAmelCase)
def __snake_case ( ):
return set_verbosity(_UpperCAmelCase)
def __snake_case ( ):
return set_verbosity(_UpperCAmelCase)
def __snake_case ( ):
return set_verbosity(_UpperCAmelCase)
def __snake_case ( ):
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler)
def __snake_case ( ):
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler)
def __snake_case ( _UpperCAmelCase : logging.Handler):
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(_UpperCAmelCase)
def __snake_case ( _UpperCAmelCase : logging.Handler):
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(_UpperCAmelCase)
def __snake_case ( ):
_configure_library_root_logger()
UpperCamelCase = False
def __snake_case ( ):
_configure_library_root_logger()
UpperCamelCase = True
def __snake_case ( ):
UpperCamelCase = _get_library_root_logger().handlers
for handler in handlers:
UpperCamelCase = logging.Formatter('''[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s''')
handler.setFormatter(_UpperCAmelCase)
def __snake_case ( ):
UpperCamelCase = _get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(_UpperCAmelCase)
def __snake_case ( self : List[Any], *_UpperCAmelCase : Any, **_UpperCAmelCase : Union[str, Any]):
UpperCamelCase = os.getenv('''TRANSFORMERS_NO_ADVISORY_WARNINGS''', _UpperCAmelCase)
if no_advisory_warnings:
return
self.warning(*_UpperCAmelCase, **_UpperCAmelCase)
snake_case_ : List[str] = warning_advice
@functools.lru_cache(_UpperCAmelCase)
def __snake_case ( self : Union[str, Any], *_UpperCAmelCase : Optional[int], **_UpperCAmelCase : Tuple):
self.warning(*_UpperCAmelCase, **_UpperCAmelCase)
snake_case_ : Tuple = warning_once
class lowercase__ :
'''simple docstring'''
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ): # pylint: disable=unused-argument
'''simple docstring'''
UpperCamelCase = args[0] if args else None
def __iter__( self ):
'''simple docstring'''
return iter(self._iterator )
def __getattr__( self , lowerCamelCase__ ):
'''simple docstring'''
def empty_fn(*lowerCamelCase__ , **lowerCamelCase__ ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self ):
'''simple docstring'''
return self
def __exit__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
return
class lowercase__ :
'''simple docstring'''
def __call__( self , *lowerCamelCase__ , **lowerCamelCase__ ):
'''simple docstring'''
if _tqdm_active:
return tqdm_lib.tqdm(*lowerCamelCase__ , **lowerCamelCase__ )
else:
return EmptyTqdm(*lowerCamelCase__ , **lowerCamelCase__ )
def UpperCAmelCase ( self , *lowerCamelCase__ , **lowerCamelCase__ ):
'''simple docstring'''
UpperCamelCase = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*lowerCamelCase__ , **lowerCamelCase__ )
def UpperCAmelCase ( self ):
'''simple docstring'''
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
snake_case_ : str = _tqdm_cls()
def __snake_case ( ):
global _tqdm_active
return bool(_tqdm_active)
def __snake_case ( ):
global _tqdm_active
UpperCamelCase = True
hf_hub_utils.enable_progress_bars()
def __snake_case ( ):
global _tqdm_active
UpperCamelCase = False
hf_hub_utils.disable_progress_bars()
| 212 |
'''simple docstring'''
def __snake_case ( _UpperCAmelCase : int, _UpperCAmelCase : str):
UpperCamelCase = ''''''
for i in table:
res += inp[i - 1]
return res
def __snake_case ( _UpperCAmelCase : Dict):
return data[1:] + data[0]
def __snake_case ( _UpperCAmelCase : Dict, _UpperCAmelCase : str):
UpperCamelCase = ''''''
for i in range(len(_UpperCAmelCase)):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def __snake_case ( _UpperCAmelCase : List[str], _UpperCAmelCase : Dict):
UpperCamelCase = int('''0b''' + data[0] + data[-1], 2)
UpperCamelCase = int('''0b''' + data[1:3], 2)
return bin(s[row][col])[2:]
def __snake_case ( _UpperCAmelCase : Dict, _UpperCAmelCase : Optional[Any], _UpperCAmelCase : Optional[Any], _UpperCAmelCase : List[str], _UpperCAmelCase : Optional[int]):
UpperCamelCase = message[:4]
UpperCamelCase = message[4:]
UpperCamelCase = apply_table(_UpperCAmelCase, _UpperCAmelCase)
UpperCamelCase = xor(_UpperCAmelCase, _UpperCAmelCase)
UpperCamelCase = apply_sbox(_UpperCAmelCase, temp[:4]) # noqa: E741
UpperCamelCase = apply_sbox(_UpperCAmelCase, temp[4:])
UpperCamelCase = '''0''' * (2 - len(_UpperCAmelCase)) + l # noqa: E741
UpperCamelCase = '''0''' * (2 - len(_UpperCAmelCase)) + r
UpperCamelCase = apply_table(l + r, _UpperCAmelCase)
UpperCamelCase = xor(_UpperCAmelCase, _UpperCAmelCase)
return temp + right
if __name__ == "__main__":
snake_case_ : List[Any] = input('Enter 10 bit key: ')
snake_case_ : Union[str, Any] = input('Enter 8 bit message: ')
snake_case_ : List[Any] = [6, 3, 7, 4, 8, 5, 10, 9]
snake_case_ : Dict = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6]
snake_case_ : Tuple = [2, 4, 3, 1]
snake_case_ : List[str] = [2, 6, 3, 1, 4, 8, 5, 7]
snake_case_ : Optional[int] = [4, 1, 3, 5, 7, 2, 8, 6]
snake_case_ : str = [4, 1, 2, 3, 2, 3, 4, 1]
snake_case_ : Optional[int] = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
snake_case_ : int = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
snake_case_ : Union[str, Any] = apply_table(key, paa_table)
snake_case_ : Optional[int] = temp[:5]
snake_case_ : str = temp[5:]
snake_case_ : str = left_shift(left)
snake_case_ : Dict = left_shift(right)
snake_case_ : List[Any] = apply_table(left + right, pa_table)
snake_case_ : Union[str, Any] = left_shift(left)
snake_case_ : Union[str, Any] = left_shift(right)
snake_case_ : str = left_shift(left)
snake_case_ : Tuple = left_shift(right)
snake_case_ : List[str] = apply_table(left + right, pa_table)
# encryption
snake_case_ : Any = apply_table(message, IP)
snake_case_ : Union[str, Any] = function(expansion, sa, sa, keya, temp)
snake_case_ : int = temp[4:] + temp[:4]
snake_case_ : List[str] = function(expansion, sa, sa, keya, temp)
snake_case_ : Dict = apply_table(temp, IP_inv)
print('Cipher text is:', CT)
# decryption
snake_case_ : List[Any] = apply_table(CT, IP)
snake_case_ : int = function(expansion, sa, sa, keya, temp)
snake_case_ : List[Any] = temp[4:] + temp[:4]
snake_case_ : int = function(expansion, sa, sa, keya, temp)
snake_case_ : Tuple = apply_table(temp, IP_inv)
print('Plain text after decypting is:', PT)
| 212 | 1 |
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
_a = False
class UpperCamelCase_ ( unittest.TestCase ):
"""simple docstring"""
pass
@nightly
@require_torch_gpu
class UpperCamelCase_ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase_ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self ):
__lowerCamelCase = VersatileDiffusionPipeline.from_pretrained("""shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
__lowerCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = pipe.dual_guided(
prompt="""first prompt""" , image=UpperCAmelCase , text_to_image_strength=0.75 , generator=UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(UpperCAmelCase )
__lowerCamelCase = VersatileDiffusionPipeline.from_pretrained(UpperCAmelCase , torch_dtype=torch.floataa )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
__lowerCamelCase = generator.manual_seed(0 )
__lowerCamelCase = pipe.dual_guided(
prompt="""first prompt""" , image=UpperCAmelCase , text_to_image_strength=0.75 , generator=UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def lowerCamelCase_ ( self ):
__lowerCamelCase = VersatileDiffusionPipeline.from_pretrained("""shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
__lowerCamelCase = """cyberpunk 2077"""
__lowerCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = pipe.dual_guided(
prompt=UpperCAmelCase , image=UpperCAmelCase , text_to_image_strength=0.75 , generator=UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=5_0 , output_type="""numpy""" , ).images
__lowerCamelCase = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowerCamelCase = np.array([0.14_48, 0.16_19, 0.17_41, 0.10_86, 0.11_47, 0.11_28, 0.11_99, 0.11_65, 0.10_01] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
__lowerCamelCase = """A painting of a squirrel eating a burger """
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = pipe.text_to_image(
prompt=UpperCAmelCase , generator=UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=5_0 , output_type="""numpy""" ).images
__lowerCamelCase = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowerCamelCase = np.array([0.33_67, 0.31_69, 0.26_56, 0.38_70, 0.47_90, 0.37_96, 0.40_09, 0.48_78, 0.47_78] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
__lowerCamelCase = pipe.image_variation(UpperCAmelCase , generator=UpperCAmelCase , output_type="""numpy""" ).images
__lowerCamelCase = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowerCamelCase = np.array([0.30_76, 0.31_23, 0.32_84, 0.37_82, 0.37_70, 0.38_94, 0.42_97, 0.43_31, 0.44_56] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 700 |
def UpperCamelCase__ ( _A: int ):
'''simple docstring'''
if not isinstance(_A , _A ):
__lowerCamelCase = f'''Input value of [number={number}] must be an integer'''
raise TypeError(_A )
if number < 0:
return False
__lowerCamelCase = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 571 | 0 |
"""simple docstring"""
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
SCREAMING_SNAKE_CASE_ = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.14.0''', '''To fix: pip install -r examples/pytorch/audio-classification/requirements.txt''')
def lowercase__ ( lowerCAmelCase : np.ndarray , lowerCAmelCase : float , lowerCAmelCase : int = 16_000 ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = int(round(sample_rate * max_length ) )
if len(lowerCAmelCase ) <= sample_length:
return wav
UpperCAmelCase = randint(0 , len(lowerCAmelCase ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class _UpperCAmelCase :
__SCREAMING_SNAKE_CASE : Optional[str] = field(default=SCREAMING_SNAKE_CASE_ , metadata={"help": "Name of a dataset from the datasets package"} )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"help": "A file containing the training audio paths and labels."} )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"help": "A file containing the validation audio paths and labels."} )
__SCREAMING_SNAKE_CASE : str = field(
default="train" , metadata={
"help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
} , )
__SCREAMING_SNAKE_CASE : str = field(
default="validation" , metadata={
"help": (
"The name of the training data set split to use (via the datasets library). Defaults to 'validation'"
)
} , )
__SCREAMING_SNAKE_CASE : str = field(
default="audio" , metadata={"help": "The name of the dataset column containing the audio data. Defaults to 'audio'"} , )
__SCREAMING_SNAKE_CASE : str = field(
default="label" , metadata={"help": "The name of the dataset column containing the labels. Defaults to 'label'"} )
__SCREAMING_SNAKE_CASE : Optional[int] = field(
default=SCREAMING_SNAKE_CASE_ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
__SCREAMING_SNAKE_CASE : Optional[int] = field(
default=SCREAMING_SNAKE_CASE_ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
__SCREAMING_SNAKE_CASE : float = field(
default=2_0 , metadata={"help": "Audio clips will be randomly cut to this length during training if the value is set."} , )
@dataclass
class _UpperCAmelCase :
__SCREAMING_SNAKE_CASE : str = field(
default="facebook/wav2vec2-base" , metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} , )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"help": "Where do you want to store the pretrained models downloaded from the Hub"} )
__SCREAMING_SNAKE_CASE : str = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"help": "Name or path of preprocessor config."} )
__SCREAMING_SNAKE_CASE : bool = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"help": "Whether to freeze the feature encoder layers of the model."} )
__SCREAMING_SNAKE_CASE : bool = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"help": "Whether to generate an attention mask in the feature extractor."} )
__SCREAMING_SNAKE_CASE : bool = field(
default=SCREAMING_SNAKE_CASE_ , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
__SCREAMING_SNAKE_CASE : Optional[bool] = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"help": "Whether to freeze the feature extractor layers of the model."} )
__SCREAMING_SNAKE_CASE : bool = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."} , )
def a_ ( self ) -> List[str]:
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
'The argument `--freeze_feature_extractor` is deprecated and '
'will be removed in a future version. Use `--freeze_feature_encoder`'
'instead. Setting `freeze_feature_encoder==True`.' , lowercase_ , )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
'The argument `--freeze_feature_extractor` is deprecated and '
'should not be used in combination with `--freeze_feature_encoder`.'
'Only make use of `--freeze_feature_encoder`.' )
def lowercase__ ( ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_audio_classification' , lowerCAmelCase , lowerCAmelCase )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCAmelCase = training_args.get_process_log_level()
logger.setLevel(lowerCAmelCase )
transformers.utils.logging.set_verbosity(lowerCAmelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} "
+ F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(F"Training/evaluation parameters {training_args}" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
UpperCAmelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCAmelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. "
'Use --overwrite_output_dir to train from scratch.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Initialize our dataset and prepare it for the audio classification task.
UpperCAmelCase = DatasetDict()
UpperCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , )
UpperCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F"--audio_column_name {data_args.audio_column_name} not found in dataset '{data_args.dataset_name}'. "
'Make sure to set `--audio_column_name` to the correct audio column - one of '
F"{', '.join(raw_datasets['train'].column_names )}." )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F"--label_column_name {data_args.label_column_name} not found in dataset '{data_args.dataset_name}'. "
'Make sure to set `--label_column_name` to the correct text column - one of '
F"{', '.join(raw_datasets['train'].column_names )}." )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
UpperCAmelCase = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
UpperCAmelCase = raw_datasets.cast_column(
data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
UpperCAmelCase = feature_extractor.model_input_names[0]
def train_transforms(lowerCAmelCase : List[Any] ):
UpperCAmelCase = []
for audio in batch[data_args.audio_column_name]:
UpperCAmelCase = random_subsample(
audio['array'] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(lowerCAmelCase )
UpperCAmelCase = feature_extractor(lowerCAmelCase , sampling_rate=feature_extractor.sampling_rate )
UpperCAmelCase = {model_input_name: inputs.get(lowerCAmelCase )}
UpperCAmelCase = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(lowerCAmelCase : int ):
UpperCAmelCase = [audio['array'] for audio in batch[data_args.audio_column_name]]
UpperCAmelCase = feature_extractor(lowerCAmelCase , sampling_rate=feature_extractor.sampling_rate )
UpperCAmelCase = {model_input_name: inputs.get(lowerCAmelCase )}
UpperCAmelCase = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
UpperCAmelCase = raw_datasets['train'].features[data_args.label_column_name].names
UpperCAmelCase , UpperCAmelCase = {}, {}
for i, label in enumerate(lowerCAmelCase ):
UpperCAmelCase = str(lowerCAmelCase )
UpperCAmelCase = label
# Load the accuracy metric from the datasets package
UpperCAmelCase = evaluate.load('accuracy' )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(lowerCAmelCase : int ):
UpperCAmelCase = np.argmax(eval_pred.predictions , axis=1 )
return metric.compute(predictions=lowerCAmelCase , references=eval_pred.label_ids )
UpperCAmelCase = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(lowerCAmelCase ) , labelaid=lowerCAmelCase , idalabel=lowerCAmelCase , finetuning_task='audio-classification' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
UpperCAmelCase = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=lowerCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
UpperCAmelCase = (
raw_datasets['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(lowerCAmelCase , output_all_columns=lowerCAmelCase )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
UpperCAmelCase = (
raw_datasets['eval'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(lowerCAmelCase , output_all_columns=lowerCAmelCase )
# Initialize our trainer
UpperCAmelCase = Trainer(
model=lowerCAmelCase , args=lowerCAmelCase , train_dataset=raw_datasets['train'] if training_args.do_train else None , eval_dataset=raw_datasets['eval'] if training_args.do_eval else None , compute_metrics=lowerCAmelCase , tokenizer=lowerCAmelCase , )
# Training
if training_args.do_train:
UpperCAmelCase = None
if training_args.resume_from_checkpoint is not None:
UpperCAmelCase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCAmelCase = last_checkpoint
UpperCAmelCase = trainer.train(resume_from_checkpoint=lowerCAmelCase )
trainer.save_model()
trainer.log_metrics('train' , train_result.metrics )
trainer.save_metrics('train' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
UpperCAmelCase = trainer.evaluate()
trainer.log_metrics('eval' , lowerCAmelCase )
trainer.save_metrics('eval' , lowerCAmelCase )
# Write model card and (optionally) push to hub
UpperCAmelCase = {
'finetuned_from': model_args.model_name_or_path,
'tasks': 'audio-classification',
'dataset': data_args.dataset_name,
'tags': ['audio-classification'],
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCAmelCase )
else:
trainer.create_model_card(**lowerCAmelCase )
if __name__ == "__main__":
main()
| 373 |
"""simple docstring"""
import importlib
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Union
import torch
from ..utils import BaseOutput
SCREAMING_SNAKE_CASE_ = '''scheduler_config.json'''
class _UpperCAmelCase ( SCREAMING_SNAKE_CASE_ ):
__SCREAMING_SNAKE_CASE : List[Any] = 1
__SCREAMING_SNAKE_CASE : Dict = 2
__SCREAMING_SNAKE_CASE : List[Any] = 3
__SCREAMING_SNAKE_CASE : Any = 4
__SCREAMING_SNAKE_CASE : Any = 5
__SCREAMING_SNAKE_CASE : Union[str, Any] = 6
__SCREAMING_SNAKE_CASE : str = 7
__SCREAMING_SNAKE_CASE : Any = 8
__SCREAMING_SNAKE_CASE : Tuple = 9
__SCREAMING_SNAKE_CASE : int = 1_0
__SCREAMING_SNAKE_CASE : Union[str, Any] = 1_1
__SCREAMING_SNAKE_CASE : Union[str, Any] = 1_2
__SCREAMING_SNAKE_CASE : Dict = 1_3
__SCREAMING_SNAKE_CASE : Optional[Any] = 1_4
@dataclass
class _UpperCAmelCase ( SCREAMING_SNAKE_CASE_ ):
__SCREAMING_SNAKE_CASE : torch.FloatTensor
class _UpperCAmelCase :
__SCREAMING_SNAKE_CASE : str = SCHEDULER_CONFIG_NAME
__SCREAMING_SNAKE_CASE : Dict = []
__SCREAMING_SNAKE_CASE : Union[str, Any] = True
@classmethod
def a_ ( cls , lowercase_ = None , lowercase_ = None , lowercase_=False , **lowercase_ , ) -> List[Any]:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = cls.load_config(
pretrained_model_name_or_path=lowercase_ , subfolder=lowercase_ , return_unused_kwargs=lowercase_ , return_commit_hash=lowercase_ , **lowercase_ , )
return cls.from_config(lowercase_ , return_unused_kwargs=lowercase_ , **lowercase_ )
def a_ ( self , lowercase_ , lowercase_ = False , **lowercase_ ) -> str:
self.save_config(save_directory=lowercase_ , push_to_hub=lowercase_ , **lowercase_ )
@property
def a_ ( self ) -> Union[str, Any]:
return self._get_compatibles()
@classmethod
def a_ ( cls ) -> Any:
UpperCAmelCase = list(set([cls.__name__] + cls._compatibles ) )
UpperCAmelCase = importlib.import_module(__name__.split('.' )[0] )
UpperCAmelCase = [
getattr(lowercase_ , lowercase_ ) for c in compatible_classes_str if hasattr(lowercase_ , lowercase_ )
]
return compatible_classes
| 373 | 1 |
'''simple docstring'''
def _A ( A__ = 50 ):
"""simple docstring"""
__lowercase = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(f'{solution() = }')
| 704 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class lowercase_ :
"""simple docstring"""
def __init__( self : Union[str, Any] ,lowercase__ : Union[str, Any] ,lowercase__ : List[Any]=1_3 ,lowercase__ : Union[str, Any]=7 ,lowercase__ : List[Any]=True ,lowercase__ : str=True ,lowercase__ : Any=True ,lowercase__ : Union[str, Any]=True ,lowercase__ : Tuple=9_9 ,lowercase__ : List[str]=[1, 1, 2] ,lowercase__ : List[Any]=1 ,lowercase__ : List[Any]=3_2 ,lowercase__ : int=4 ,lowercase__ : Tuple=8 ,lowercase__ : Tuple=3_7 ,lowercase__ : str="gelu_new" ,lowercase__ : Dict=0.1 ,lowercase__ : Optional[int]=0.1 ,lowercase__ : Optional[Any]=0.0 ,lowercase__ : Tuple=5_1_2 ,lowercase__ : Any=3 ,lowercase__ : List[str]=0.0_2 ,lowercase__ : List[Any]=3 ,lowercase__ : List[Any]=4 ,lowercase__ : Optional[Any]=None ,lowercase__ : Tuple=False ,):
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_input_mask
__lowercase = use_token_type_ids
__lowercase = use_labels
__lowercase = vocab_size
__lowercase = block_sizes
__lowercase = num_decoder_layers
__lowercase = d_model
__lowercase = n_head
__lowercase = d_head
__lowercase = d_inner
__lowercase = hidden_act
__lowercase = hidden_dropout
__lowercase = attention_dropout
__lowercase = activation_dropout
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = 2
__lowercase = num_labels
__lowercase = num_choices
__lowercase = scope
__lowercase = initializer_std
# Used in the tests to check the size of the first attention layer
__lowercase = n_head
# Used in the tests to check the size of the first hidden state
__lowercase = self.d_model
# Used in the tests to check the number of output hidden states/attentions
__lowercase = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
__lowercase = self.num_hidden_layers + 2
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
__lowercase = None
if self.use_input_mask:
__lowercase = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase = None
if self.use_token_type_ids:
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
__lowercase = None
__lowercase = None
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
__lowercase = ids_tensor([self.batch_size] ,self.num_choices )
__lowercase = FunnelConfig(
vocab_size=self.vocab_size ,block_sizes=self.block_sizes ,num_decoder_layers=self.num_decoder_layers ,d_model=self.d_model ,n_head=self.n_head ,d_head=self.d_head ,d_inner=self.d_inner ,hidden_act=self.hidden_act ,hidden_dropout=self.hidden_dropout ,attention_dropout=self.attention_dropout ,activation_dropout=self.activation_dropout ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_std=self.initializer_std ,)
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : List[str] ,lowercase__ : Union[str, Any] ,lowercase__ : int ,lowercase__ : List[str] ,lowercase__ : Optional[int] ,lowercase__ : List[Any] ,lowercase__ : List[str] ,):
__lowercase = TFFunnelModel(config=lowercase__ )
__lowercase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowercase = model(lowercase__ )
__lowercase = [input_ids, input_mask]
__lowercase = model(lowercase__ )
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.d_model) )
__lowercase = False
__lowercase = TFFunnelModel(config=lowercase__ )
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.d_model) )
__lowercase = False
__lowercase = TFFunnelModel(config=lowercase__ )
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.d_model) )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : List[Any] ,lowercase__ : Tuple ,lowercase__ : Dict ,lowercase__ : Any ,lowercase__ : Optional[Any] ,lowercase__ : Union[str, Any] ,lowercase__ : Dict ,):
__lowercase = TFFunnelBaseModel(config=lowercase__ )
__lowercase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowercase = model(lowercase__ )
__lowercase = [input_ids, input_mask]
__lowercase = model(lowercase__ )
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, 2, self.d_model) )
__lowercase = False
__lowercase = TFFunnelBaseModel(config=lowercase__ )
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, 3, self.d_model) )
__lowercase = False
__lowercase = TFFunnelBaseModel(config=lowercase__ )
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, 2, self.d_model) )
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : Any ,lowercase__ : Any ,lowercase__ : List[Any] ,lowercase__ : Optional[int] ,lowercase__ : Dict ,lowercase__ : int ,lowercase__ : List[str] ,):
__lowercase = TFFunnelForPreTraining(config=lowercase__ )
__lowercase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : Union[str, Any] ,lowercase__ : Optional[Any] ,lowercase__ : Dict ,lowercase__ : List[Any] ,lowercase__ : Tuple ,lowercase__ : Optional[Any] ,lowercase__ : Tuple ,):
__lowercase = TFFunnelForMaskedLM(config=lowercase__ )
__lowercase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : Tuple ,lowercase__ : Optional[Any] ,lowercase__ : List[Any] ,lowercase__ : List[Any] ,lowercase__ : str ,lowercase__ : Union[str, Any] ,lowercase__ : Tuple ,):
__lowercase = self.num_labels
__lowercase = TFFunnelForSequenceClassification(config=lowercase__ )
__lowercase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : List[str] ,lowercase__ : Optional[Any] ,lowercase__ : List[str] ,lowercase__ : List[str] ,lowercase__ : List[str] ,lowercase__ : str ,lowercase__ : Tuple ,):
__lowercase = self.num_choices
__lowercase = TFFunnelForMultipleChoice(config=lowercase__ )
__lowercase = tf.tile(tf.expand_dims(lowercase__ ,1 ) ,(1, self.num_choices, 1) )
__lowercase = tf.tile(tf.expand_dims(lowercase__ ,1 ) ,(1, self.num_choices, 1) )
__lowercase = tf.tile(tf.expand_dims(lowercase__ ,1 ) ,(1, self.num_choices, 1) )
__lowercase = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : Optional[Any] ,lowercase__ : List[Any] ,lowercase__ : List[str] ,lowercase__ : List[Any] ,lowercase__ : Dict ,lowercase__ : Any ,lowercase__ : Union[str, Any] ,):
__lowercase = self.num_labels
__lowercase = TFFunnelForTokenClassification(config=lowercase__ )
__lowercase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : Optional[Any] ,lowercase__ : Optional[int] ,lowercase__ : Dict ,lowercase__ : List[str] ,lowercase__ : str ,lowercase__ : Tuple ,lowercase__ : Any ,):
__lowercase = TFFunnelForQuestionAnswering(config=lowercase__ )
__lowercase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = config_and_inputs
__lowercase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class lowercase_ (lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
SCREAMING_SNAKE_CASE : Union[str, Any] = (
{
'feature-extraction': (TFFunnelBaseModel, TFFunnelModel),
'fill-mask': TFFunnelForMaskedLM,
'question-answering': TFFunnelForQuestionAnswering,
'text-classification': TFFunnelForSequenceClassification,
'token-classification': TFFunnelForTokenClassification,
'zero-shot': TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE : Tuple = False
SCREAMING_SNAKE_CASE : Any = False
def SCREAMING_SNAKE_CASE ( self : str ):
__lowercase = TFFunnelModelTester(self )
__lowercase = ConfigTester(self ,config_class=lowercase__ )
def SCREAMING_SNAKE_CASE ( self : str ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowercase__ )
@require_tf
class lowercase_ (lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
SCREAMING_SNAKE_CASE : Dict = False
SCREAMING_SNAKE_CASE : List[str] = False
def SCREAMING_SNAKE_CASE ( self : str ):
__lowercase = TFFunnelModelTester(self ,base=lowercase__ )
__lowercase = ConfigTester(self ,config_class=lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Any ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Dict ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowercase__ )
| 624 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _UpperCAmelCase ( A__ ,unittest.TestCase ):
"""simple docstring"""
lowercase__ = KandinskyVaaControlnetImgaImgPipeline
lowercase__ = ["""image_embeds""", """negative_image_embeds""", """image""", """hint"""]
lowercase__ = ["""image_embeds""", """negative_image_embeds""", """image""", """hint"""]
lowercase__ = [
"""generator""",
"""height""",
"""width""",
"""strength""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
lowercase__ = False
@property
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
return 32
@property
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
return 32
@property
def lowercase__ ( self : Dict ):
'''simple docstring'''
return self.time_input_dim
@property
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def lowercase__ ( self : Tuple ):
'''simple docstring'''
return 100
@property
def lowercase__ ( self : List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase__ = {
"""in_channels""": 8,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image_hint""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
lowercase__ = UNetaDConditionModel(**lowerCamelCase )
return model
@property
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase__ = VQModel(**self.dummy_movq_kwargs )
return model
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase__ = self.dummy_unet
lowercase__ = self.dummy_movq
lowercase__ = {
"""num_train_timesteps""": 1_000,
"""beta_schedule""": """linear""",
"""beta_start""": 0.00085,
"""beta_end""": 0.012,
"""clip_sample""": False,
"""set_alpha_to_one""": False,
"""steps_offset""": 0,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
}
lowercase__ = DDIMScheduler(**lowerCamelCase )
lowercase__ = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def lowercase__ ( self : Union[str, Any], lowerCamelCase : Union[str, Any], lowerCamelCase : int=0 ):
'''simple docstring'''
lowercase__ = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(lowerCamelCase ) ).to(lowerCamelCase )
lowercase__ = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(seed + 1 ) ).to(
lowerCamelCase )
# create init_image
lowercase__ = floats_tensor((1, 3, 64, 64), rng=random.Random(lowerCamelCase ) ).to(lowerCamelCase )
lowercase__ = image.cpu().permute(0, 2, 3, 1 )[0]
lowercase__ = Image.fromarray(np.uinta(lowerCamelCase ) ).convert('''RGB''' ).resize((256, 256) )
# create hint
lowercase__ = floats_tensor((1, 3, 64, 64), rng=random.Random(lowerCamelCase ) ).to(lowerCamelCase )
if str(lowerCamelCase ).startswith('''mps''' ):
lowercase__ = torch.manual_seed(lowerCamelCase )
else:
lowercase__ = torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase )
lowercase__ = {
"""image""": init_image,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""hint""": hint,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 10,
"""guidance_scale""": 7.0,
"""strength""": 0.2,
"""output_type""": """np""",
}
return inputs
def lowercase__ ( self : List[str] ):
'''simple docstring'''
lowercase__ = """cpu"""
lowercase__ = self.get_dummy_components()
lowercase__ = self.pipeline_class(**lowerCamelCase )
lowercase__ = pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
lowercase__ = pipe(**self.get_dummy_inputs(lowerCamelCase ) )
lowercase__ = output.images
lowercase__ = pipe(
**self.get_dummy_inputs(lowerCamelCase ), return_dict=lowerCamelCase, )[0]
lowercase__ = image[0, -3:, -3:, -1]
lowercase__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase__ = np.array(
[0.54985034, 0.55509365, 0.52561504, 0.5570494, 0.5593818, 0.5263979, 0.50285643, 0.5069846, 0.51196736] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self : Dict ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy''' )
lowercase__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
lowercase__ = init_image.resize((512, 512) )
lowercase__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/hint_image_cat.png''' )
lowercase__ = torch.from_numpy(np.array(lowerCamelCase ) ).float() / 255.0
lowercase__ = hint.permute(2, 0, 1 ).unsqueeze(0 )
lowercase__ = """A robot, 4k photo"""
lowercase__ = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''', torch_dtype=torch.floataa )
pipe_prior.to(lowerCamelCase )
lowercase__ = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-controlnet-depth''', torch_dtype=torch.floataa )
lowercase__ = pipeline.to(lowerCamelCase )
pipeline.set_progress_bar_config(disable=lowerCamelCase )
lowercase__ = torch.Generator(device='''cpu''' ).manual_seed(0 )
lowercase__ = pipe_prior(
lowerCamelCase, image=lowerCamelCase, strength=0.85, generator=lowerCamelCase, negative_prompt='''''', ).to_tuple()
lowercase__ = pipeline(
image=lowerCamelCase, image_embeds=lowerCamelCase, negative_image_embeds=lowerCamelCase, hint=lowerCamelCase, generator=lowerCamelCase, num_inference_steps=100, height=512, width=512, strength=0.5, output_type='''np''', )
lowercase__ = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(lowerCamelCase, lowerCamelCase )
| 183 |
"""simple docstring"""
from math import pow
def lowerCamelCase ( _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : int , ) -> tuple[int, int]:
'''simple docstring'''
if current_sum == needed_sum:
# If the sum of the powers is equal to needed_sum, then we have a solution.
solutions_count += 1
return current_sum, solutions_count
__UpperCAmelCase : List[str] = int(pow(_UpperCamelCase , _UpperCamelCase ) )
if current_sum + i_to_n <= needed_sum:
# If the sum of the powers is less than needed_sum, then continue adding powers.
current_sum += i_to_n
__UpperCAmelCase ,__UpperCAmelCase : Union[str, Any] = backtrack(
_UpperCamelCase , _UpperCamelCase , current_number + 1 , _UpperCamelCase , _UpperCamelCase )
current_sum -= i_to_n
if i_to_n < needed_sum:
# If the power of i is less than needed_sum, then try with the next power.
__UpperCAmelCase ,__UpperCAmelCase : List[Any] = backtrack(
_UpperCamelCase , _UpperCamelCase , current_number + 1 , _UpperCamelCase , _UpperCamelCase )
return current_sum, solutions_count
def lowerCamelCase ( _UpperCamelCase : int , _UpperCamelCase : int ) -> int:
'''simple docstring'''
if not (1 <= needed_sum <= 1_0_0_0 and 2 <= power <= 1_0):
raise ValueError(
"""Invalid input\n"""
"""needed_sum must be between 1 and 1000, power between 2 and 10.""" )
return backtrack(_UpperCamelCase , _UpperCamelCase , 1 , 0 , 0 )[1] # Return the solutions_count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 139 | 0 |
'''simple docstring'''
import cva
import numpy as np
class lowerCamelCase_ :
def __init__( self : Optional[int] , lowerCAmelCase__ : float , lowerCAmelCase__ : int ):
"""simple docstring"""
if k in (0.04, 0.06):
SCREAMING_SNAKE_CASE : str = k
SCREAMING_SNAKE_CASE : Optional[Any] = window_size
else:
raise ValueError('''invalid k value''' )
def __str__( self : List[Any] ):
"""simple docstring"""
return str(self.k )
def __lowercase ( self : Tuple , lowerCAmelCase__ : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = cva.imread(lowerCAmelCase__ , 0 )
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Union[str, Any] = img.shape
SCREAMING_SNAKE_CASE : list[list[int]] = []
SCREAMING_SNAKE_CASE : Dict = img.copy()
SCREAMING_SNAKE_CASE : List[str] = cva.cvtColor(lowerCAmelCase__ , cva.COLOR_GRAY2RGB )
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : int = np.gradient(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : str = dx**2
SCREAMING_SNAKE_CASE : Any = dy**2
SCREAMING_SNAKE_CASE : Optional[Any] = dx * dy
SCREAMING_SNAKE_CASE : str = 0.04
SCREAMING_SNAKE_CASE : str = self.window_size // 2
for y in range(lowerCAmelCase__ , h - offset ):
for x in range(lowerCAmelCase__ , w - offset ):
SCREAMING_SNAKE_CASE : Any = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
SCREAMING_SNAKE_CASE : List[Any] = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
SCREAMING_SNAKE_CASE : Optional[int] = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
SCREAMING_SNAKE_CASE : Dict = (wxx * wyy) - (wxy**2)
SCREAMING_SNAKE_CASE : Dict = wxx + wyy
SCREAMING_SNAKE_CASE : Tuple = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 2_55 )
return color_img, corner_list
if __name__ == "__main__":
lowerCAmelCase_ : Tuple = HarrisCorner(0.04, 3)
lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = edge_detect.detect('path_to_image')
cva.imwrite('detect.png', color_img)
| 464 |
'''simple docstring'''
import string
from math import logaa
def UpperCAmelCase ( A : str , A : str ):
SCREAMING_SNAKE_CASE : Optional[Any] = document.translate(
str.maketrans('''''' , '''''' , string.punctuation ) ).replace('''\n''' , '''''' )
SCREAMING_SNAKE_CASE : Tuple = document_without_punctuation.split(''' ''' ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def UpperCAmelCase ( A : str , A : str ):
SCREAMING_SNAKE_CASE : int = corpus.lower().translate(
str.maketrans('''''' , '''''' , string.punctuation ) ) # strip all punctuation and replace it with ''
SCREAMING_SNAKE_CASE : Any = corpus_without_punctuation.split('''\n''' )
SCREAMING_SNAKE_CASE : Optional[Any] = term.lower()
return (len([doc for doc in docs if term in doc] ), len(A ))
def UpperCAmelCase ( A : int , A : int , A : str=False ):
if smoothing:
if n == 0:
raise ValueError('''log10(0) is undefined.''' )
return round(1 + logaa(n / (1 + df) ) , 3 )
if df == 0:
raise ZeroDivisionError('''df must be > 0''' )
elif n == 0:
raise ValueError('''log10(0) is undefined.''' )
return round(logaa(n / df ) , 3 )
def UpperCAmelCase ( A : int , A : int ):
return round(tf * idf , 3 )
| 464 | 1 |
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def _a ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = ArgumentParser('Transformers CLI tool' , usage='transformers-cli <command> [<args>]' )
SCREAMING_SNAKE_CASE__ : Tuple = parser.add_subparsers(help='transformers-cli command helpers' )
# Register commands
ConvertCommand.register_subcommand(lowercase__ )
DownloadCommand.register_subcommand(lowercase__ )
EnvironmentCommand.register_subcommand(lowercase__ )
RunCommand.register_subcommand(lowercase__ )
ServeCommand.register_subcommand(lowercase__ )
UserCommands.register_subcommand(lowercase__ )
AddNewModelCommand.register_subcommand(lowercase__ )
AddNewModelLikeCommand.register_subcommand(lowercase__ )
LfsCommands.register_subcommand(lowercase__ )
PTtoTFCommand.register_subcommand(lowercase__ )
# Let's go
SCREAMING_SNAKE_CASE__ : str = parser.parse_args()
if not hasattr(lowercase__ , 'func' ):
parser.print_help()
exit(1 )
# Run
SCREAMING_SNAKE_CASE__ : str = args.func(lowercase__ )
service.run()
if __name__ == "__main__":
main()
| 85 | import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
SCREAMING_SNAKE_CASE__ : Optional[int] = logging.getLogger(__name__)
SCREAMING_SNAKE_CASE__ : List[Any] = "Hello world! cécé herlolip"
SCREAMING_SNAKE_CASE__ : Dict = namedtuple(
"BertAbsConfig",
[
"temp_dir",
"large",
"use_bert_emb",
"finetune_bert",
"encoder",
"share_emb",
"max_pos",
"enc_layers",
"enc_hidden_size",
"enc_heads",
"enc_ff_size",
"enc_dropout",
"dec_layers",
"dec_hidden_size",
"dec_heads",
"dec_ff_size",
"dec_dropout",
],
)
def _a ( lowercase__ : List[str] , lowercase__ : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = BertAbsConfig(
temp_dir='.' , finetune_bert=lowercase__ , large=lowercase__ , share_emb=lowercase__ , use_bert_emb=lowercase__ , encoder='bert' , max_pos=5_12 , enc_layers=6 , enc_hidden_size=5_12 , enc_heads=8 , enc_ff_size=5_12 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=7_68 , dec_heads=8 , dec_ff_size=20_48 , dec_dropout=0.2 , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.load(lowercase__ , lambda lowercase__ , lowercase__ : storage )
SCREAMING_SNAKE_CASE__ : Any = AbsSummarizer(lowercase__ , torch.device('cpu' ) , lowercase__ )
original.eval()
SCREAMING_SNAKE_CASE__ : List[Any] = BertAbsSummarizer(lowercase__ , torch.device('cpu' ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info('convert the model' )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info('Make sure that the models\' outputs are identical' )
SCREAMING_SNAKE_CASE__ : Any = BertTokenizer.from_pretrained('bert-base-uncased' )
# prepare the model inputs
SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer.encode('This is sample éàalj\'-.' )
encoder_input_ids.extend([tokenizer.pad_token_id] * (5_12 - len(lowercase__ )) )
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.tensor(lowercase__ ).unsqueeze(0 )
SCREAMING_SNAKE_CASE__ : List[str] = tokenizer.encode('This is sample 3 éàalj\'-.' )
decoder_input_ids.extend([tokenizer.pad_token_id] * (5_12 - len(lowercase__ )) )
SCREAMING_SNAKE_CASE__ : List[str] = torch.tensor(lowercase__ ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
SCREAMING_SNAKE_CASE__ : int = encoder_input_ids
SCREAMING_SNAKE_CASE__ : Any = decoder_input_ids
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
SCREAMING_SNAKE_CASE__ : Dict = None
SCREAMING_SNAKE_CASE__ : str = None
SCREAMING_SNAKE_CASE__ : List[str] = None
SCREAMING_SNAKE_CASE__ : Optional[Any] = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
SCREAMING_SNAKE_CASE__ : Optional[Any] = original(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )[0]
SCREAMING_SNAKE_CASE__ : Optional[int] = original.generator(lowercase__ )
SCREAMING_SNAKE_CASE__ : Tuple = new_model(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )[0]
SCREAMING_SNAKE_CASE__ : List[Any] = new_model.generator(lowercase__ )
SCREAMING_SNAKE_CASE__ : Tuple = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print('Maximum absolute difference beween weights: {:.2f}'.format(lowercase__ ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print('Maximum absolute difference beween weights: {:.2f}'.format(lowercase__ ) )
SCREAMING_SNAKE_CASE__ : List[Any] = torch.allclose(lowercase__ , lowercase__ , atol=1E-3 )
if are_identical:
logging.info('all weights are equal up to 1e-3' )
else:
raise ValueError('the weights are different. The new model is likely different from the original one.' )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info('saving the model\'s state dictionary' )
torch.save(
new_model.state_dict() , './bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Tuple = argparse.ArgumentParser()
parser.add_argument(
"--bertabs_checkpoint_path",
default=None,
type=str,
required=True,
help="Path the official PyTorch dump.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the output PyTorch model.",
)
SCREAMING_SNAKE_CASE__ : Tuple = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 85 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''google/canine-s''': '''https://huggingface.co/google/canine-s/resolve/main/config.json''',
# See all CANINE models at https://huggingface.co/models?filter=canine
}
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : Dict = '''canine'''
def __init__( self , a_=768 , a_=12 , a_=12 , a_=3072 , a_="gelu" , a_=0.1 , a_=0.1 , a_=1_6384 , a_=16 , a_=0.02 , a_=1E-12 , a_=0 , a_=0Xe000 , a_=0Xe001 , a_=4 , a_=4 , a_=8 , a_=1_6384 , a_=128 , **a_ , ):
super().__init__(pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ , **a_ )
lowerCamelCase_ : Dict = max_position_embeddings
lowerCamelCase_ : List[Any] = hidden_size
lowerCamelCase_ : str = num_hidden_layers
lowerCamelCase_ : Optional[int] = num_attention_heads
lowerCamelCase_ : List[str] = intermediate_size
lowerCamelCase_ : Tuple = hidden_act
lowerCamelCase_ : Any = hidden_dropout_prob
lowerCamelCase_ : Any = attention_probs_dropout_prob
lowerCamelCase_ : List[Any] = initializer_range
lowerCamelCase_ : Union[str, Any] = type_vocab_size
lowerCamelCase_ : List[str] = layer_norm_eps
# Character config:
lowerCamelCase_ : List[Any] = downsampling_rate
lowerCamelCase_ : Union[str, Any] = upsampling_kernel_size
lowerCamelCase_ : Optional[Any] = num_hash_functions
lowerCamelCase_ : str = num_hash_buckets
lowerCamelCase_ : int = local_transformer_stride
| 73 |
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _UpperCamelCase ( self ):
lowerCamelCase_ : int = ["a", "b", "c"]
# Defaults to last layer if both are None
lowerCamelCase_ ,lowerCamelCase_ : Tuple = get_aligned_output_features_output_indices(a_ , a_ , a_ )
self.assertEqual(a_ , ["c"] )
self.assertEqual(a_ , [2] )
# Out indices set to match out features
lowerCamelCase_ ,lowerCamelCase_ : Optional[int] = get_aligned_output_features_output_indices(["a", "c"] , a_ , a_ )
self.assertEqual(a_ , ["a", "c"] )
self.assertEqual(a_ , [0, 2] )
# Out features set to match out indices
lowerCamelCase_ ,lowerCamelCase_ : Tuple = get_aligned_output_features_output_indices(a_ , [0, 2] , a_ )
self.assertEqual(a_ , ["a", "c"] )
self.assertEqual(a_ , [0, 2] )
# Out features selected from negative indices
lowerCamelCase_ ,lowerCamelCase_ : Dict = get_aligned_output_features_output_indices(a_ , [-3, -1] , a_ )
self.assertEqual(a_ , ["a", "c"] )
self.assertEqual(a_ , [-3, -1] )
def _UpperCamelCase ( self ):
# Stage names must be set
with self.assertRaises(a_ ):
verify_out_features_out_indices(["a", "b"] , (0, 1) , a_ )
# Out features must be a list
with self.assertRaises(a_ ):
verify_out_features_out_indices(("a", "b") , (0, 1) , ["a", "b"] )
# Out features must be a subset of stage names
with self.assertRaises(a_ ):
verify_out_features_out_indices(["a", "b"] , (0, 1) , ["a"] )
# Out indices must be a list or tuple
with self.assertRaises(a_ ):
verify_out_features_out_indices(a_ , 0 , ["a", "b"] )
# Out indices must be a subset of stage names
with self.assertRaises(a_ ):
verify_out_features_out_indices(a_ , (0, 1) , ["a"] )
# Out features and out indices must be the same length
with self.assertRaises(a_ ):
verify_out_features_out_indices(["a", "b"] , (0,) , ["a", "b", "c"] )
# Out features should match out indices
with self.assertRaises(a_ ):
verify_out_features_out_indices(["a", "b"] , (0, 2) , ["a", "b", "c"] )
# Out features and out indices should be in order
with self.assertRaises(a_ ):
verify_out_features_out_indices(["b", "a"] , (0, 1) , ["a", "b"] )
# Check passes with valid inputs
verify_out_features_out_indices(["a", "b", "d"] , (0, 1, -1) , ["a", "b", "c", "d"] )
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[Any] = BackboneMixin()
lowerCamelCase_ : List[Any] = ["a", "b", "c"]
lowerCamelCase_ : Optional[int] = ["a", "c"]
lowerCamelCase_ : Dict = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features , ["a", "c"] )
self.assertEqual(backbone.out_indices , [0, 2] )
# Check out features and indices are updated correctly
lowerCamelCase_ : Union[str, Any] = ["a", "b"]
self.assertEqual(backbone.out_features , ["a", "b"] )
self.assertEqual(backbone.out_indices , [0, 1] )
lowerCamelCase_ : str = [-3, -1]
self.assertEqual(backbone.out_features , ["a", "c"] )
self.assertEqual(backbone.out_indices , [-3, -1] )
| 73 | 1 |
import qiskit
def lowerCAmelCase_ ( __a , __a ) -> qiskit.result.counts.Counts:
"""simple docstring"""
lowerCamelCase__: List[Any] =qiskit.Aer.get_backend("aer_simulator" )
lowerCamelCase__: int =qiskit.QuantumCircuit(4 , 2 )
# encode inputs in qubits 0 and 1
if bita == 1:
qc_ha.x(0 )
if bita == 1:
qc_ha.x(1 )
qc_ha.barrier()
# use cnots to write XOR of the inputs on qubit2
qc_ha.cx(0 , 2 )
qc_ha.cx(1 , 2 )
# use ccx / toffoli gate to write AND of the inputs on qubit3
qc_ha.ccx(0 , 1 , 3 )
qc_ha.barrier()
# extract outputs
qc_ha.measure(2 , 0 ) # extract XOR value
qc_ha.measure(3 , 1 ) # extract AND value
# Execute the circuit on the qasm simulator
lowerCamelCase__: List[str] =qiskit.execute(_a , _a , shots=1000 )
# Return the histogram data of the results of the experiment
return job.result().get_counts(_a )
if __name__ == "__main__":
__A = half_adder(1, 1)
print(f'Half Adder Output Qubit Counts: {counts}')
| 59 |
"""simple docstring"""
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
_snake_case = logging.getLogger(__name__)
_snake_case = "Hello world! cécé herlolip"
_snake_case = namedtuple(
"BertAbsConfig",
[
"temp_dir",
"large",
"use_bert_emb",
"finetune_bert",
"encoder",
"share_emb",
"max_pos",
"enc_layers",
"enc_hidden_size",
"enc_heads",
"enc_ff_size",
"enc_dropout",
"dec_layers",
"dec_hidden_size",
"dec_heads",
"dec_ff_size",
"dec_dropout",
],
)
def snake_case ( _a: Union[str, Any] , _a: Dict )-> List[str]:
'''simple docstring'''
lowerCamelCase__ = BertAbsConfig(
temp_dir='.' , finetune_bert=_a , large=_a , share_emb=_a , use_bert_emb=_a , encoder='bert' , max_pos=512 , enc_layers=6 , enc_hidden_size=512 , enc_heads=8 , enc_ff_size=512 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=768 , dec_heads=8 , dec_ff_size=2048 , dec_dropout=0.2 , )
lowerCamelCase__ = torch.load(_a , lambda _a , _a : storage )
lowerCamelCase__ = AbsSummarizer(_a , torch.device('cpu' ) , _a )
original.eval()
lowerCamelCase__ = BertAbsSummarizer(_a , torch.device('cpu' ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info('convert the model' )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info('Make sure that the models\' outputs are identical' )
lowerCamelCase__ = BertTokenizer.from_pretrained('bert-base-uncased' )
# prepare the model inputs
lowerCamelCase__ = tokenizer.encode('This is sample éàalj\'-.' )
encoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(_a )) )
lowerCamelCase__ = torch.tensor(_a ).unsqueeze(0 )
lowerCamelCase__ = tokenizer.encode('This is sample 3 éàalj\'-.' )
decoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(_a )) )
lowerCamelCase__ = torch.tensor(_a ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
lowerCamelCase__ = encoder_input_ids
lowerCamelCase__ = decoder_input_ids
lowerCamelCase__ = lowerCamelCase__ = None
lowerCamelCase__ = None
lowerCamelCase__ = lowerCamelCase__ = None
lowerCamelCase__ = lowerCamelCase__ = None
lowerCamelCase__ = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
lowerCamelCase__ = original(_a , _a , _a , _a , _a , _a , _a )[0]
lowerCamelCase__ = original.generator(_a )
lowerCamelCase__ = new_model(
_a , _a , _a , _a , _a )[0]
lowerCamelCase__ = new_model.generator(_a )
lowerCamelCase__ = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print('Maximum absolute difference beween weights: {:.2f}'.format(_a ) )
lowerCamelCase__ = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print('Maximum absolute difference beween weights: {:.2f}'.format(_a ) )
lowerCamelCase__ = torch.allclose(_a , _a , atol=1E-3 )
if are_identical:
logging.info('all weights are equal up to 1e-3' )
else:
raise ValueError('the weights are different. The new model is likely different from the original one.' )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info('saving the model\'s state dictionary' )
torch.save(
new_model.state_dict() , './bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin' )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument(
"--bertabs_checkpoint_path",
default=None,
type=str,
required=True,
help="Path the official PyTorch dump.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the output PyTorch model.",
)
_snake_case = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 510 | 0 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : List[Any] = ['''image_processor''', '''tokenizer''']
UpperCamelCase_ : Union[str, Any] = '''ViTImageProcessor'''
UpperCamelCase_ : Optional[Any] = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self : Union[str, Any] , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : List[Any]=None , **UpperCAmelCase_ : Optional[int] ):
SCREAMING_SNAKE_CASE : str = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , UpperCAmelCase_ , )
SCREAMING_SNAKE_CASE : List[str] = kwargs.pop("feature_extractor" )
SCREAMING_SNAKE_CASE : Any = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(UpperCAmelCase_ , UpperCAmelCase_ )
def __call__( self : int , UpperCAmelCase_ : int=None , UpperCAmelCase_ : str=None , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : Union[str, Any]=None , **UpperCAmelCase_ : Any ):
if text is None and visual_prompt is None and images is None:
raise ValueError("You have to specify either text, visual prompt or images." )
if text is not None and visual_prompt is not None:
raise ValueError("You have to specify exactly one type of prompt. Either text or visual prompt." )
if text is not None:
SCREAMING_SNAKE_CASE : Any = self.tokenizer(UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ )
if visual_prompt is not None:
SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processor(UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ )
if images is not None:
SCREAMING_SNAKE_CASE : Tuple = self.image_processor(UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ )
if visual_prompt is not None and images is not None:
SCREAMING_SNAKE_CASE : Optional[int] = {
"pixel_values": image_features.pixel_values,
"conditional_pixel_values": prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
SCREAMING_SNAKE_CASE : Optional[int] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
SCREAMING_SNAKE_CASE : Union[str, Any] = {
"conditional_pixel_values": prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**UpperCAmelCase_ ) , tensor_type=UpperCAmelCase_ )
def _A ( self : Optional[Any] , *UpperCAmelCase_ : str , **UpperCAmelCase_ : List[Any] ):
return self.tokenizer.batch_decode(*UpperCAmelCase_ , **UpperCAmelCase_ )
def _A ( self : Tuple , *UpperCAmelCase_ : int , **UpperCAmelCase_ : int ):
return self.tokenizer.decode(*UpperCAmelCase_ , **UpperCAmelCase_ )
@property
def _A ( self : Optional[int] ):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , UpperCAmelCase_ , )
return self.image_processor_class
@property
def _A ( self : List[str] ):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , UpperCAmelCase_ , )
return self.image_processor
| 488 |
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case = logging.get_logger(__name__)
snake_case = {
"""nvidia/segformer-b0-finetuned-ade-512-512""": (
"""https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json"""
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : str = '''segformer'''
def __init__( self : List[Any] , UpperCAmelCase_ : int=3 , UpperCAmelCase_ : Optional[Any]=4 , UpperCAmelCase_ : List[str]=[2, 2, 2, 2] , UpperCAmelCase_ : Optional[int]=[8, 4, 2, 1] , UpperCAmelCase_ : Union[str, Any]=[32, 64, 160, 256] , UpperCAmelCase_ : int=[7, 3, 3, 3] , UpperCAmelCase_ : str=[4, 2, 2, 2] , UpperCAmelCase_ : List[str]=[1, 2, 5, 8] , UpperCAmelCase_ : List[Any]=[4, 4, 4, 4] , UpperCAmelCase_ : Dict="gelu" , UpperCAmelCase_ : Optional[Any]=0.0 , UpperCAmelCase_ : Optional[Any]=0.0 , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : List[Any]=0.02 , UpperCAmelCase_ : Optional[int]=0.1 , UpperCAmelCase_ : Optional[Any]=1E-6 , UpperCAmelCase_ : List[str]=256 , UpperCAmelCase_ : Dict=255 , **UpperCAmelCase_ : Dict , ):
super().__init__(**UpperCAmelCase_ )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
"Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be"
" removed, as the behaviour will default to that of reshape_last_stage = True." , UpperCAmelCase_ , )
SCREAMING_SNAKE_CASE : Dict = num_channels
SCREAMING_SNAKE_CASE : Any = num_encoder_blocks
SCREAMING_SNAKE_CASE : Tuple = depths
SCREAMING_SNAKE_CASE : Any = sr_ratios
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_sizes
SCREAMING_SNAKE_CASE : int = patch_sizes
SCREAMING_SNAKE_CASE : Optional[int] = strides
SCREAMING_SNAKE_CASE : Tuple = mlp_ratios
SCREAMING_SNAKE_CASE : Any = num_attention_heads
SCREAMING_SNAKE_CASE : int = hidden_act
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : str = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Tuple = classifier_dropout_prob
SCREAMING_SNAKE_CASE : List[Any] = initializer_range
SCREAMING_SNAKE_CASE : Dict = drop_path_rate
SCREAMING_SNAKE_CASE : Any = layer_norm_eps
SCREAMING_SNAKE_CASE : Any = decoder_hidden_size
SCREAMING_SNAKE_CASE : Union[str, Any] = kwargs.get("reshape_last_stage" , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[str] = semantic_loss_ignore_index
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Dict = version.parse('''1.11''' )
@property
def _A ( self : Optional[Any] ):
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def _A ( self : List[str] ):
return 1E-4
@property
def _A ( self : Any ):
return 12
| 488 | 1 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
_A = None
_A = logging.get_logger(__name__)
_A = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
_A = {
"""vocab_file""": {
"""facebook/mbart-large-en-ro""": (
"""https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model"""
),
"""facebook/mbart-large-cc25""": (
"""https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model"""
),
},
"""tokenizer_file""": {
"""facebook/mbart-large-en-ro""": """https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json""",
"""facebook/mbart-large-cc25""": """https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json""",
},
}
_A = {
"""facebook/mbart-large-en-ro""": 1024,
"""facebook/mbart-large-cc25""": 1024,
}
# fmt: off
_A = ["""ar_AR""", """cs_CZ""", """de_DE""", """en_XX""", """es_XX""", """et_EE""", """fi_FI""", """fr_XX""", """gu_IN""", """hi_IN""", """it_IT""", """ja_XX""", """kk_KZ""", """ko_KR""", """lt_LT""", """lv_LV""", """my_MM""", """ne_NP""", """nl_XX""", """ro_RO""", """ru_RU""", """si_LK""", """tr_TR""", """vi_VN""", """zh_CN"""]
class _lowerCAmelCase ( UpperCamelCase__ ):
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = ['input_ids', 'attention_mask']
lowerCamelCase__ = MBartTokenizer
lowerCamelCase__ = []
lowerCamelCase__ = []
def __init__( self , snake_case_=None , snake_case_=None , snake_case_="<s>" , snake_case_="</s>" , snake_case_="</s>" , snake_case_="<s>" , snake_case_="<unk>" , snake_case_="<pad>" , snake_case_="<mask>" , snake_case_=None , snake_case_=None , snake_case_=None , **snake_case_ , ) -> str:
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE : List[str] =AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else mask_token
super().__init__(
vocab_file=snake_case_ , tokenizer_file=snake_case_ , bos_token=snake_case_ , eos_token=snake_case_ , sep_token=snake_case_ , cls_token=snake_case_ , unk_token=snake_case_ , pad_token=snake_case_ , mask_token=snake_case_ , src_lang=snake_case_ , tgt_lang=snake_case_ , additional_special_tokens=snake_case_ , **snake_case_ , )
SCREAMING_SNAKE_CASE : Union[str, Any] =vocab_file
SCREAMING_SNAKE_CASE : Union[str, Any] =False if not self.vocab_file else True
SCREAMING_SNAKE_CASE : int =FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'''additional_special_tokens''': _additional_special_tokens} )
SCREAMING_SNAKE_CASE : List[Any] ={
lang_code: self.convert_tokens_to_ids(snake_case_ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
SCREAMING_SNAKE_CASE : List[Any] =src_lang if src_lang is not None else '''en_XX'''
SCREAMING_SNAKE_CASE : Optional[Any] =self.convert_tokens_to_ids(self._src_lang )
SCREAMING_SNAKE_CASE : List[str] =tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def __a ( self ) -> str:
return self._src_lang
@src_lang.setter
def __a ( self , snake_case_ ) -> None:
SCREAMING_SNAKE_CASE : str =new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __a ( self , snake_case_ , snake_case_ = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __a ( self , snake_case_ , snake_case_ = None ) -> List[int]:
SCREAMING_SNAKE_CASE : Dict =[self.sep_token_id]
SCREAMING_SNAKE_CASE : Optional[int] =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __a ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , **snake_case_ ) -> List[Any]:
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
SCREAMING_SNAKE_CASE : str =src_lang
SCREAMING_SNAKE_CASE : List[str] =self(snake_case_ , add_special_tokens=snake_case_ , return_tensors=snake_case_ , **snake_case_ )
SCREAMING_SNAKE_CASE : Any =self.convert_tokens_to_ids(snake_case_ )
SCREAMING_SNAKE_CASE : List[Any] =tgt_lang_id
return inputs
def __a ( self , snake_case_ , snake_case_ = "en_XX" , snake_case_ = None , snake_case_ = "ro_RO" , **snake_case_ , ) -> BatchEncoding:
SCREAMING_SNAKE_CASE : Union[str, Any] =src_lang
SCREAMING_SNAKE_CASE : int =tgt_lang
return super().prepare_seqaseq_batch(snake_case_ , snake_case_ , **snake_case_ )
def __a ( self ) -> List[Any]:
return self.set_src_lang_special_tokens(self.src_lang )
def __a ( self ) -> List[Any]:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __a ( self , snake_case_ ) -> None:
SCREAMING_SNAKE_CASE : List[str] =self.convert_tokens_to_ids(snake_case_ )
SCREAMING_SNAKE_CASE : int =[]
SCREAMING_SNAKE_CASE : Optional[int] =[self.eos_token_id, self.cur_lang_code]
SCREAMING_SNAKE_CASE : Union[str, Any] =self.convert_ids_to_tokens(self.prefix_tokens )
SCREAMING_SNAKE_CASE : Any =self.convert_ids_to_tokens(self.suffix_tokens )
SCREAMING_SNAKE_CASE : str =processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __a ( self , snake_case_ ) -> None:
SCREAMING_SNAKE_CASE : int =self.convert_tokens_to_ids(snake_case_ )
SCREAMING_SNAKE_CASE : str =[]
SCREAMING_SNAKE_CASE : Optional[Any] =[self.eos_token_id, self.cur_lang_code]
SCREAMING_SNAKE_CASE : List[str] =self.convert_ids_to_tokens(self.prefix_tokens )
SCREAMING_SNAKE_CASE : List[Any] =self.convert_ids_to_tokens(self.suffix_tokens )
SCREAMING_SNAKE_CASE : List[str] =processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __a ( self , snake_case_ , snake_case_ = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(snake_case_ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory.' )
return
SCREAMING_SNAKE_CASE : Union[str, Any] =os.path.join(
snake_case_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case_ ):
copyfile(self.vocab_file , snake_case_ )
return (out_vocab_file,)
| 258 |
def lowerCAmelCase_ ( __a , __a ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : list[list[str]] =[[] for _ in range(__a )]
SCREAMING_SNAKE_CASE : Any =key - 1
if key <= 0:
raise ValueError('''Height of grid can\'t be 0 or negative''' )
if key == 1 or len(__a ) <= key:
return input_string
for position, character in enumerate(__a ):
SCREAMING_SNAKE_CASE : Any =position % (lowest * 2) # puts it in bounds
SCREAMING_SNAKE_CASE : Optional[int] =min(__a , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append(__a )
SCREAMING_SNAKE_CASE : int =[''''''.join(__a ) for row in temp_grid]
SCREAMING_SNAKE_CASE : List[Any] =''''''.join(__a )
return output_string
def lowerCAmelCase_ ( __a , __a ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any =[]
SCREAMING_SNAKE_CASE : Optional[int] =key - 1
if key <= 0:
raise ValueError('''Height of grid can\'t be 0 or negative''' )
if key == 1:
return input_string
SCREAMING_SNAKE_CASE : list[list[str]] =[[] for _ in range(__a )] # generates template
for position in range(len(__a ) ):
SCREAMING_SNAKE_CASE : Union[str, Any] =position % (lowest * 2) # puts it in bounds
SCREAMING_SNAKE_CASE : Optional[int] =min(__a , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append('''*''' )
SCREAMING_SNAKE_CASE : Optional[Any] =0
for row in temp_grid: # fills in the characters
SCREAMING_SNAKE_CASE : List[Any] =input_string[counter : counter + len(__a )]
grid.append(list(__a ) )
counter += len(__a )
SCREAMING_SNAKE_CASE : int ='''''' # reads as zigzag
for position in range(len(__a ) ):
SCREAMING_SNAKE_CASE : List[str] =position % (lowest * 2) # puts it in bounds
SCREAMING_SNAKE_CASE : Union[str, Any] =min(__a , lowest * 2 - num ) # creates zigzag pattern
output_string += grid[num][0]
grid[num].pop(0 )
return output_string
def lowerCAmelCase_ ( __a ) -> dict[int, str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict ={}
for key_guess in range(1 , len(__a ) ): # tries every key
SCREAMING_SNAKE_CASE : Optional[Any] =decrypt(__a , __a )
return results
if __name__ == "__main__":
import doctest
doctest.testmod()
| 258 | 1 |
'''simple docstring'''
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def lowerCAmelCase_ ( snake_case__ ): # picklable for multiprocessing
'''simple docstring'''
return x.sum()
def lowerCAmelCase_ ( snake_case__ ): # picklable for multiprocessing
'''simple docstring'''
return i + 1
@dataclass
class A :
__magic_name__ = 42
__magic_name__ = 42
class A ( __snake_case ):
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
A : Optional[Any] = {}
A : Any = []
A : List[str] = 1
A : Union[str, Any] = [1, 2]
A : List[Any] = {'''a''': 1, '''b''': 2}
A : Optional[Any] = {'''a''': [1, 2], '''b''': [3, 4]}
A : Union[str, Any] = {'''a''': {'''1''': 1}, '''b''': 2}
A : List[Any] = {'''a''': 1, '''b''': 2, '''c''': 3, '''d''': 4}
A : Optional[Any] = {}
A : Any = []
A : List[str] = 2
A : Union[str, Any] = [2, 3]
A : List[Any] = {'''a''': 2, '''b''': 3}
A : Dict = {'''a''': [2, 3], '''b''': [4, 5]}
A : Union[str, Any] = {'''a''': {'''1''': 2}, '''b''': 3}
A : Optional[int] = {'''a''': 2, '''b''': 3, '''c''': 4, '''d''': 5}
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
A : int = 2
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , num_proc=SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , num_proc=SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , num_proc=SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , num_proc=SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , num_proc=SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , num_proc=SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , num_proc=SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , num_proc=SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
A : Any = {'''a''': np.eye(2 ), '''b''': np.zeros(3 ), '''c''': np.ones(2 )}
A : Optional[Any] = {'''a''': 2, '''b''': 0, '''c''': 2}
A : Any = {
'''a''': np.eye(2 ).astype(SCREAMING_SNAKE_CASE ),
'''b''': np.zeros(3 ).astype(SCREAMING_SNAKE_CASE ),
'''c''': np.ones(2 ).astype(SCREAMING_SNAKE_CASE ),
}
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , map_numpy=SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , map_numpy=SCREAMING_SNAKE_CASE ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , map_numpy=SCREAMING_SNAKE_CASE , num_proc=SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , map_numpy=SCREAMING_SNAKE_CASE , num_proc=SCREAMING_SNAKE_CASE ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(SCREAMING_SNAKE_CASE ): # can't pickle a local lambda
map_nested(lambda SCREAMING_SNAKE_CASE : x + 1 , SCREAMING_SNAKE_CASE , num_proc=SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
A : int = {'''a''': 1, '''b''': 2}
A : Union[str, Any] = {'''a''': 3, '''b''': 4}
A : Dict = {'''a''': 5, '''b''': 6}
A : Any = sorted([('''a''', (1, 3, 5)), ('''b''', (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) , SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
class A :
__magic_name__ = '''bar'''
A : Optional[Any] = Foo()
self.assertEqual(foo.my_attr , '''bar''' )
with temporary_assignment(SCREAMING_SNAKE_CASE , '''my_attr''' , '''BAR''' ):
self.assertEqual(foo.my_attr , '''BAR''' )
self.assertEqual(foo.my_attr , '''bar''' )
@pytest.mark.parametrize(
'''iterable_length, num_proc, expected_num_proc''' , [
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
] , )
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
with patch('''datasets.utils.py_utils._single_map_nested''' ) as mock_single_map_nested, patch(
'''datasets.parallel.parallel.Pool''' ) as mock_multiprocessing_pool:
A : Union[str, Any] = {F'{i}': i for i in range(snake_case__ )}
A : Union[str, Any] = map_nested(lambda snake_case__ : x + 10 , snake_case__ , num_proc=snake_case__ , parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class A ( __snake_case ):
@require_tf
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
import tensorflow as tf
from tensorflow.keras import layers
A : Dict = layers.Dense(2 )
def gen_random_output():
A : Tuple = tf.random.uniform((1, 3) )
return model(SCREAMING_SNAKE_CASE ).numpy()
with temp_seed(42 , set_tensorflow=SCREAMING_SNAKE_CASE ):
A : Optional[int] = gen_random_output()
with temp_seed(42 , set_tensorflow=SCREAMING_SNAKE_CASE ):
A : int = gen_random_output()
A : List[str] = gen_random_output()
np.testing.assert_equal(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@require_torch
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
import torch
def gen_random_output():
A : Optional[Any] = torch.nn.Linear(3 , 2 )
A : str = torch.rand(1 , 3 )
return model(SCREAMING_SNAKE_CASE ).detach().numpy()
with temp_seed(42 , set_pytorch=SCREAMING_SNAKE_CASE ):
A : List[str] = gen_random_output()
with temp_seed(42 , set_pytorch=SCREAMING_SNAKE_CASE ):
A : str = gen_random_output()
A : Tuple = gen_random_output()
np.testing.assert_equal(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
def gen_random_output():
return np.random.rand(1 , 3 )
with temp_seed(42 ):
A : Union[str, Any] = gen_random_output()
with temp_seed(42 ):
A : Union[str, Any] = gen_random_output()
A : Any = gen_random_output()
np.testing.assert_equal(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@pytest.mark.parametrize('''input_data''' , [{}] )
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
A : Dict = NestedDataStructure(snake_case__ ).data
assert output_data == input_data
@pytest.mark.parametrize(
'''data, expected_output''' , [
({}, []),
([], []),
('''foo''', ['''foo''']),
(['''foo''', '''bar'''], ['''foo''', '''bar''']),
([['''foo''', '''bar''']], ['''foo''', '''bar''']),
([[['''foo'''], ['''bar''']]], ['''foo''', '''bar''']),
([[['''foo'''], '''bar''']], ['''foo''', '''bar''']),
({'''a''': 1, '''b''': 2}, [1, 2]),
({'''a''': [1, 2], '''b''': [3, 4]}, [1, 2, 3, 4]),
({'''a''': [[1, 2]], '''b''': [[3, 4]]}, [1, 2, 3, 4]),
({'''a''': [[1, 2]], '''b''': [3, 4]}, [1, 2, 3, 4]),
({'''a''': [[[1], [2]]], '''b''': [[[3], [4]]]}, [1, 2, 3, 4]),
({'''a''': [[[1], [2]]], '''b''': [[3, 4]]}, [1, 2, 3, 4]),
({'''a''': [[[1], [2]]], '''b''': [3, 4]}, [1, 2, 3, 4]),
({'''a''': [[[1], [2]]], '''b''': [3, [4]]}, [1, 2, 3, 4]),
({'''a''': {'''1''': 1}, '''b''': 2}, [1, 2]),
({'''a''': {'''1''': [1]}, '''b''': 2}, [1, 2]),
({'''a''': {'''1''': [1]}, '''b''': [2]}, [1, 2]),
] , )
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
A : Optional[Any] = NestedDataStructure(snake_case__ ).flatten()
assert output == expected_output
def lowerCAmelCase_ ( ):
'''simple docstring'''
A : Optional[int] = A(x=1 , y='''foobar''' )
A : List[Any] = {'''x''': 1, '''y''': '''foobar'''}
assert asdict(snake_case__ ) == expected_output
A : Dict = {'''a''': {'''b''': A(x=10 , y='''foo''' )}, '''c''': [A(x=20 , y='''bar''' )]}
A : Dict = {'''a''': {'''b''': {'''x''': 10, '''y''': '''foo'''}}, '''c''': [{'''x''': 20, '''y''': '''bar'''}]}
assert asdict(snake_case__ ) == expected_output
with pytest.raises(snake_case__ ):
asdict([1, A(x=10 , y='''foo''' )] )
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
return text.split()
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def lowerCAmelCase_ ( ):
'''simple docstring'''
with Pool(2 ) as pool:
A : List[str] = list(iflatmap_unordered(snake_case__ , _split_text , kwargs_iterable=[{'''text''': '''hello there'''}] * 10 ) )
assert out.count('''hello''' ) == 10
assert out.count('''there''' ) == 10
assert len(snake_case__ ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
A : List[Any] = list(iflatmap_unordered(snake_case__ , _split_text , kwargs_iterable=[{'''text''': '''hello there'''}] * 10 ) )
assert out.count('''hello''' ) == 10
assert out.count('''there''' ) == 10
assert len(snake_case__ ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
A : Dict = []
for yield_time, content in iflatmap_unordered(
snake_case__ , _aseconds_generator_of_aitems_with_timing , kwargs_iterable=[{'''content''': '''a'''}, {'''content''': '''b'''}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(snake_case__ )
assert out.count('''a''' ) == 2
assert out.count('''b''' ) == 2
assert len(snake_case__ ) == 4
| 343 |
'''simple docstring'''
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
A : int = torch.load(snake_case__ , map_location='''cpu''' )
A : Optional[Any] = chkpt['''model''']
# We have the base model one level deeper than the original XLM repository
A : Any = {}
for k, v in state_dict.items():
if "pred_layer" in k:
A : Any = v
else:
A : int = v
A : str = chkpt['''params''']
A : Tuple = {n: v for n, v in config.items() if not isinstance(snake_case__ , (torch.FloatTensor, numpy.ndarray) )}
A : Dict = chkpt['''dico_word2id''']
A : Optional[int] = {s + '''</w>''' if s.find('''@@''' ) == -1 and i > 13 else s.replace('''@@''' , '''''' ): i for s, i in vocab.items()}
# Save pytorch-model
A : str = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
A : Dict = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
A : Optional[int] = pytorch_dump_folder_path + '''/''' + VOCAB_FILES_NAMES['''vocab_file''']
print(F'Save PyTorch model to {pytorch_weights_dump_path}' )
torch.save(snake_case__ , snake_case__ )
print(F'Save configuration file to {pytorch_config_dump_path}' )
with open(snake_case__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(snake_case__ , indent=2 ) + '''\n''' )
print(F'Save vocab file to {pytorch_config_dump_path}' )
with open(snake_case__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(snake_case__ , indent=2 ) + '''\n''' )
if __name__ == "__main__":
lowercase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--xlm_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowercase : int = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 343 | 1 |
import requests
from bsa import BeautifulSoup
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = BeautifulSoup(requests.get(lowercase , params=lowercase ).content , "html.parser" )
SCREAMING_SNAKE_CASE : Union[str, Any] = soup.find("div" , attrs={"class": "gs_ri"} )
SCREAMING_SNAKE_CASE : str = div.find("div" , attrs={"class": "gs_fl"} ).find_all("a" )
return anchors[2].get_text()
if __name__ == "__main__":
snake_case = {
"""title""": (
"""Precisely geometry controlled microsupercapacitors for ultrahigh areal """
"""capacitance, volumetric capacitance, and energy density"""
),
"""journal""": """Chem. Mater.""",
"""volume""": 30,
"""pages""": """3979-3990""",
"""year""": 2_018,
"""hl""": """en""",
}
print(get_citation("""https://scholar.google.com/scholar_lookup""", params=params))
| 62 | import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
lowerCAmelCase__ = 'true'
def __lowercase ( _UpperCAmelCase , _UpperCAmelCase=82 , _UpperCAmelCase=16 ) -> int:
'''simple docstring'''
set_seed(42 )
__lowercase = RegressionModel()
__lowercase = deepcopy(_UpperCAmelCase )
__lowercase = RegressionDataset(length=_UpperCAmelCase )
__lowercase = DataLoader(_UpperCAmelCase , batch_size=_UpperCAmelCase )
model.to(accelerator.device )
__lowercase , __lowercase = accelerator.prepare(_UpperCAmelCase , _UpperCAmelCase )
return model, ddp_model, dataloader
def __lowercase ( _UpperCAmelCase , _UpperCAmelCase=False ) -> int:
'''simple docstring'''
__lowercase = AutoTokenizer.from_pretrained("hf-internal-testing/mrpc-bert-base-cased" )
__lowercase = load_dataset("glue" , "mrpc" , split="validation" )
def tokenize_function(_UpperCAmelCase ):
__lowercase = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase )
return outputs
with accelerator.main_process_first():
__lowercase = dataset.map(
_UpperCAmelCase , batched=_UpperCAmelCase , remove_columns=["idx", "sentence1", "sentence2"] , )
__lowercase = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(_UpperCAmelCase ):
if use_longest:
return tokenizer.pad(_UpperCAmelCase , padding="longest" , return_tensors="pt" )
return tokenizer.pad(_UpperCAmelCase , padding="max_length" , max_length=128 , return_tensors="pt" )
return DataLoader(_UpperCAmelCase , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=16 )
def __lowercase ( _UpperCAmelCase , _UpperCAmelCase ) -> Tuple:
'''simple docstring'''
__lowercase = Accelerator(dispatch_batches=_UpperCAmelCase , split_batches=_UpperCAmelCase )
__lowercase = get_dataloader(_UpperCAmelCase , not dispatch_batches )
__lowercase = AutoModelForSequenceClassification.from_pretrained(
"hf-internal-testing/mrpc-bert-base-cased" , return_dict=_UpperCAmelCase )
__lowercase , __lowercase = accelerator.prepare(_UpperCAmelCase , _UpperCAmelCase )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def __lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Tuple:
'''simple docstring'''
__lowercase = []
for batch in dataloader:
__lowercase , __lowercase = batch.values()
with torch.no_grad():
__lowercase = model(_UpperCAmelCase )
__lowercase , __lowercase = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
__lowercase , __lowercase = [], []
for logit, targ in logits_and_targets:
logits.append(_UpperCAmelCase )
targs.append(_UpperCAmelCase )
__lowercase , __lowercase = torch.cat(_UpperCAmelCase ), torch.cat(_UpperCAmelCase )
return logits, targs
def __lowercase ( _UpperCAmelCase , _UpperCAmelCase=82 , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=16 ) -> Any:
'''simple docstring'''
__lowercase , __lowercase , __lowercase = get_basic_setup(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
__lowercase , __lowercase = generate_predictions(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
assert (
len(_UpperCAmelCase ) == num_samples
), f'''Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(_UpperCAmelCase )}'''
def __lowercase ( _UpperCAmelCase = False , _UpperCAmelCase = False ) -> str:
'''simple docstring'''
__lowercase = evaluate.load("glue" , "mrpc" )
__lowercase , __lowercase = get_mrpc_setup(_UpperCAmelCase , _UpperCAmelCase )
# First do baseline
__lowercase , __lowercase , __lowercase = setup["no"]
model.to(_UpperCAmelCase )
model.eval()
for batch in dataloader:
batch.to(_UpperCAmelCase )
with torch.inference_mode():
__lowercase = model(**_UpperCAmelCase )
__lowercase = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=_UpperCAmelCase , references=batch["labels"] )
__lowercase = metric.compute()
# Then do distributed
__lowercase , __lowercase , __lowercase = setup["ddp"]
model.eval()
for batch in dataloader:
with torch.inference_mode():
__lowercase = model(**_UpperCAmelCase )
__lowercase = outputs.logits.argmax(dim=-1 )
__lowercase = batch["labels"]
__lowercase , __lowercase = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=_UpperCAmelCase , references=_UpperCAmelCase )
__lowercase = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), f'''Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n'''
def __lowercase ( ) -> Dict:
'''simple docstring'''
__lowercase = Accelerator(split_batches=_UpperCAmelCase , dispatch_batches=_UpperCAmelCase )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print("**Testing gather_for_metrics**" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(f'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`''' )
test_mrpc(_UpperCAmelCase , _UpperCAmelCase )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("**Test torch metrics**" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
__lowercase = Accelerator(split_batches=_UpperCAmelCase , dispatch_batches=_UpperCAmelCase )
if accelerator.is_local_main_process:
print(f'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99''' )
test_torch_metrics(_UpperCAmelCase , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("**Test last batch is not dropped when perfectly divisible**" )
__lowercase = Accelerator()
test_torch_metrics(_UpperCAmelCase , 512 )
accelerator.state._reset_state()
def __lowercase ( _UpperCAmelCase ) -> List[str]:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 321 | 0 |
import json
import logging
import os
import sys
from time import time
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, require_torch_tpu
logging.basicConfig(level=logging.DEBUG)
A_ :int = logging.getLogger()
def A ( a_ ) -> Dict:
__UpperCamelCase : List[str] ={}
__UpperCamelCase : int =os.path.join(a_ ,'all_results.json' )
if os.path.exists(a_ ):
with open(a_ ,'r' ) as f:
__UpperCamelCase : Tuple =json.load(a_ )
else:
raise ValueError(F'can\'t find {path}' )
return results
A_ :Optional[Any] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
@require_torch_tpu
class __A ( a ):
"""simple docstring"""
def __lowercase ( self ):
"""simple docstring"""
import xla_spawn
__UpperCamelCase : List[str] =self.get_auto_remove_tmp_dir()
__UpperCamelCase : Any =f'\n ./examples/pytorch/text-classification/run_glue.py\n --num_cores=8\n ./examples/pytorch/text-classification/run_glue.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --do_train\n --do_eval\n --debug tpu_metrics_debug\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --max_steps=10\n --warmup_steps=2\n --seed=42\n --max_seq_length=128\n '.split()
with patch.object(lowerCamelCase__ , 'argv' , lowerCamelCase__ ):
__UpperCamelCase : List[str] =time()
xla_spawn.main()
__UpperCamelCase : Dict =time()
__UpperCamelCase : Any =get_results(lowerCamelCase__ )
self.assertGreaterEqual(result['eval_accuracy'] , 0.75 )
# Assert that the script takes less than 500 seconds to make sure it doesn't hang.
self.assertLess(end - start , 500 )
def __lowercase ( self ):
"""simple docstring"""
import xla_spawn
__UpperCamelCase : Union[str, Any] ='\n ./tests/test_trainer_tpu.py\n --num_cores=8\n ./tests/test_trainer_tpu.py\n '.split()
with patch.object(lowerCamelCase__ , 'argv' , lowerCamelCase__ ):
xla_spawn.main()
| 708 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class __A ( a ):
"""simple docstring"""
UpperCamelCase__ : torch.FloatTensor
UpperCamelCase__ : torch.FloatTensor
class __A ( a , a ):
"""simple docstring"""
UpperCamelCase__ : List[str] =1
@register_to_config
def __init__( self , lowerCamelCase__ = 2000 , lowerCamelCase__ = 0.15 , lowerCamelCase__ = 0.01 , lowerCamelCase__ = 1_348.0 , lowerCamelCase__ = 1E-5 , lowerCamelCase__ = 1 , ):
"""simple docstring"""
__UpperCamelCase : List[str] =sigma_max
# setable values
__UpperCamelCase : Tuple =None
self.set_sigmas(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
"""simple docstring"""
return sample
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None ):
"""simple docstring"""
__UpperCamelCase : List[Any] =sampling_eps if sampling_eps is not None else self.config.sampling_eps
__UpperCamelCase : Any =torch.linspace(1 , lowerCamelCase__ , lowerCamelCase__ , device=lowerCamelCase__ )
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None ):
"""simple docstring"""
__UpperCamelCase : List[str] =sigma_min if sigma_min is not None else self.config.sigma_min
__UpperCamelCase : Dict =sigma_max if sigma_max is not None else self.config.sigma_max
__UpperCamelCase : List[Any] =sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : int =sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
__UpperCamelCase : Tuple =torch.exp(torch.linspace(math.log(lowerCamelCase__ ) , math.log(lowerCamelCase__ ) , lowerCamelCase__ ) )
__UpperCamelCase : Union[str, Any] =torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , )
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = True , ):
"""simple docstring"""
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' )
__UpperCamelCase : List[Any] =timestep * torch.ones(
sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
__UpperCamelCase : Optional[int] =(timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
__UpperCamelCase : Tuple =timesteps.to(self.discrete_sigmas.device )
__UpperCamelCase : Any =self.discrete_sigmas[timesteps].to(sample.device )
__UpperCamelCase : List[str] =self.get_adjacent_sigma(lowerCamelCase__ , lowerCamelCase__ ).to(sample.device )
__UpperCamelCase : Union[str, Any] =torch.zeros_like(lowerCamelCase__ )
__UpperCamelCase : List[str] =(sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
__UpperCamelCase : List[str] =diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
__UpperCamelCase : Optional[int] =diffusion.unsqueeze(-1 )
__UpperCamelCase : Optional[int] =drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
__UpperCamelCase : Union[str, Any] =randn_tensor(
sample.shape , layout=sample.layout , generator=lowerCamelCase__ , device=sample.device , dtype=sample.dtype )
__UpperCamelCase : List[str] =sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
__UpperCamelCase : Union[str, Any] =prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=lowerCamelCase__ , prev_sample_mean=lowerCamelCase__ )
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = True , ):
"""simple docstring"""
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
__UpperCamelCase : Optional[int] =randn_tensor(sample.shape , layout=sample.layout , generator=lowerCamelCase__ ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
__UpperCamelCase : Dict =torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean()
__UpperCamelCase : Optional[Any] =torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean()
__UpperCamelCase : Optional[Any] =(self.config.snr * noise_norm / grad_norm) ** 2 * 2
__UpperCamelCase : List[Any] =step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
__UpperCamelCase : int =step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
__UpperCamelCase : Optional[Any] =step_size.unsqueeze(-1 )
__UpperCamelCase : Any =sample + step_size * model_output
__UpperCamelCase : Optional[Any] =prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=lowerCamelCase__ )
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ):
"""simple docstring"""
__UpperCamelCase : List[Any] =timesteps.to(original_samples.device )
__UpperCamelCase : List[Any] =self.discrete_sigmas.to(original_samples.device )[timesteps]
__UpperCamelCase : Optional[int] =(
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(lowerCamelCase__ ) * sigmas[:, None, None, None]
)
__UpperCamelCase : Any =noise + original_samples
return noisy_samples
def __len__( self ):
"""simple docstring"""
return self.config.num_train_timesteps
| 154 | 0 |
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class _A ( UpperCAmelCase_ ):
def __init__( self : Union[str, Any] , lowerCamelCase__ : Dict , lowerCamelCase__ : str=13 , lowerCamelCase__ : List[Any]=7 , lowerCamelCase__ : Tuple=True , lowerCamelCase__ : Any=True , lowerCamelCase__ : str=True , lowerCamelCase__ : Tuple=True , lowerCamelCase__ : Union[str, Any]=True , lowerCamelCase__ : Union[str, Any]=False , lowerCamelCase__ : Optional[Any]=False , lowerCamelCase__ : int=False , lowerCamelCase__ : str=2 , lowerCamelCase__ : Dict=99 , lowerCamelCase__ : Dict=0 , lowerCamelCase__ : Dict=32 , lowerCamelCase__ : str=5 , lowerCamelCase__ : Optional[Any]=4 , lowerCamelCase__ : Optional[int]=0.1 , lowerCamelCase__ : int=0.1 , lowerCamelCase__ : Any=5_12 , lowerCamelCase__ : int=12 , lowerCamelCase__ : Optional[int]=2 , lowerCamelCase__ : Tuple=0.02 , lowerCamelCase__ : Optional[Any]=3 , lowerCamelCase__ : Any=4 , lowerCamelCase__ : List[str]="last" , lowerCamelCase__ : Tuple=None , lowerCamelCase__ : str=None , ):
"""simple docstring"""
__UpperCamelCase : Tuple = parent
__UpperCamelCase : Optional[Any] = batch_size
__UpperCamelCase : Optional[Any] = seq_length
__UpperCamelCase : List[str] = is_training
__UpperCamelCase : List[Any] = use_input_lengths
__UpperCamelCase : int = use_token_type_ids
__UpperCamelCase : List[str] = use_labels
__UpperCamelCase : str = gelu_activation
__UpperCamelCase : Tuple = sinusoidal_embeddings
__UpperCamelCase : Tuple = causal
__UpperCamelCase : Optional[Any] = asm
__UpperCamelCase : str = n_langs
__UpperCamelCase : List[Any] = vocab_size
__UpperCamelCase : Tuple = n_special
__UpperCamelCase : Optional[int] = hidden_size
__UpperCamelCase : Any = num_hidden_layers
__UpperCamelCase : str = num_attention_heads
__UpperCamelCase : Union[str, Any] = hidden_dropout_prob
__UpperCamelCase : Union[str, Any] = attention_probs_dropout_prob
__UpperCamelCase : Tuple = max_position_embeddings
__UpperCamelCase : List[Any] = type_vocab_size
__UpperCamelCase : Tuple = type_sequence_label_size
__UpperCamelCase : Optional[Any] = initializer_range
__UpperCamelCase : List[str] = num_labels
__UpperCamelCase : Any = num_choices
__UpperCamelCase : Optional[Any] = summary_type
__UpperCamelCase : int = use_proj
__UpperCamelCase : Union[str, Any] = scope
def a ( self : Dict ):
"""simple docstring"""
__UpperCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase : str = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase : List[str] = None
if self.use_input_lengths:
__UpperCamelCase : str = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
__UpperCamelCase : Optional[int] = None
if self.use_token_type_ids:
__UpperCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
__UpperCamelCase : List[str] = None
__UpperCamelCase : Dict = None
__UpperCamelCase : Dict = None
if self.use_labels:
__UpperCamelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCamelCase : Tuple = ids_tensor([self.batch_size] , 2 ).float()
__UpperCamelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
__UpperCamelCase : str = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def a ( self : Optional[Any] ):
"""simple docstring"""
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def a ( self : Optional[Any] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Dict , lowerCamelCase__ : Dict , lowerCamelCase__ : int , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : str , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Union[str, Any] , ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] = FlaubertModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__UpperCamelCase : Union[str, Any] = model(lowerCamelCase__ , lengths=lowerCamelCase__ , langs=lowerCamelCase__ )
__UpperCamelCase : int = model(lowerCamelCase__ , langs=lowerCamelCase__ )
__UpperCamelCase : Tuple = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a ( self : Optional[int] , lowerCamelCase__ : int , lowerCamelCase__ : List[str] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : int , lowerCamelCase__ : List[str] , lowerCamelCase__ : List[str] , lowerCamelCase__ : Any , lowerCamelCase__ : str , ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] = FlaubertWithLMHeadModel(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__UpperCamelCase : Dict = model(lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a ( self : List[Any] , lowerCamelCase__ : List[str] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Any , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Any , ):
"""simple docstring"""
__UpperCamelCase : int = FlaubertForQuestionAnsweringSimple(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__UpperCamelCase : int = model(lowerCamelCase__ )
__UpperCamelCase : Optional[Any] = model(lowerCamelCase__ , start_positions=lowerCamelCase__ , end_positions=lowerCamelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a ( self : int , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : str , lowerCamelCase__ : Dict , lowerCamelCase__ : str , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Tuple , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : int , ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] = FlaubertForQuestionAnswering(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__UpperCamelCase : Optional[Any] = model(lowerCamelCase__ )
__UpperCamelCase : Optional[int] = model(
lowerCamelCase__ , start_positions=lowerCamelCase__ , end_positions=lowerCamelCase__ , cls_index=lowerCamelCase__ , is_impossible=lowerCamelCase__ , p_mask=lowerCamelCase__ , )
__UpperCamelCase : List[Any] = model(
lowerCamelCase__ , start_positions=lowerCamelCase__ , end_positions=lowerCamelCase__ , cls_index=lowerCamelCase__ , is_impossible=lowerCamelCase__ , )
((__UpperCamelCase) , ) : Any = result_with_labels.to_tuple()
__UpperCamelCase : List[Any] = model(lowerCamelCase__ , start_positions=lowerCamelCase__ , end_positions=lowerCamelCase__ )
((__UpperCamelCase) , ) : List[str] = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def a ( self : List[Any] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Tuple , lowerCamelCase__ : str , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : int , lowerCamelCase__ : Dict , lowerCamelCase__ : str , ):
"""simple docstring"""
__UpperCamelCase : Optional[int] = FlaubertForSequenceClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__UpperCamelCase : Any = model(lowerCamelCase__ )
__UpperCamelCase : Union[str, Any] = model(lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def a ( self : Tuple , lowerCamelCase__ : Dict , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Any , lowerCamelCase__ : Dict , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : str , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Any , lowerCamelCase__ : Optional[Any] , ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] = self.num_labels
__UpperCamelCase : str = FlaubertForTokenClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__UpperCamelCase : List[str] = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a ( self : Tuple , lowerCamelCase__ : Any , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : str , lowerCamelCase__ : Any , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : int , lowerCamelCase__ : Any , lowerCamelCase__ : Tuple , lowerCamelCase__ : List[Any] , ):
"""simple docstring"""
__UpperCamelCase : Tuple = self.num_choices
__UpperCamelCase : Tuple = FlaubertForMultipleChoice(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__UpperCamelCase : Optional[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCamelCase : Tuple = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCamelCase : Union[str, Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCamelCase : int = model(
lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def a ( self : Union[str, Any] ):
"""simple docstring"""
__UpperCamelCase : Dict = self.prepare_config_and_inputs()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) : Optional[int] = config_and_inputs
__UpperCamelCase : Optional[int] = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""lengths""": input_lengths,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_torch
class _A ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
lowercase_ : int = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
lowercase_ : List[str] = (
{
'''feature-extraction''': FlaubertModel,
'''fill-mask''': FlaubertWithLMHeadModel,
'''question-answering''': FlaubertForQuestionAnsweringSimple,
'''text-classification''': FlaubertForSequenceClassification,
'''token-classification''': FlaubertForTokenClassification,
'''zero-shot''': FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def a ( self : Optional[int] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : int , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : str ):
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("""Fast""" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def a ( self : Union[str, Any] , lowerCamelCase__ : Dict , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Dict=False ):
"""simple docstring"""
__UpperCamelCase : int = super()._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
__UpperCamelCase : Tuple = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase__ )
__UpperCamelCase : Tuple = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase__ )
return inputs_dict
def a ( self : str ):
"""simple docstring"""
__UpperCamelCase : Tuple = FlaubertModelTester(self )
__UpperCamelCase : Tuple = ConfigTester(self , config_class=lowerCamelCase__ , emb_dim=37 )
def a ( self : Union[str, Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def a ( self : Optional[Any] ):
"""simple docstring"""
__UpperCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*lowerCamelCase__ )
def a ( self : Any ):
"""simple docstring"""
__UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*lowerCamelCase__ )
def a ( self : Optional[Any] ):
"""simple docstring"""
__UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*lowerCamelCase__ )
def a ( self : Union[str, Any] ):
"""simple docstring"""
__UpperCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*lowerCamelCase__ )
def a ( self : Union[str, Any] ):
"""simple docstring"""
__UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*lowerCamelCase__ )
def a ( self : Tuple ):
"""simple docstring"""
__UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*lowerCamelCase__ )
def a ( self : Optional[int] ):
"""simple docstring"""
__UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*lowerCamelCase__ )
@slow
def a ( self : Optional[int] ):
"""simple docstring"""
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase : Union[str, Any] = FlaubertModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
@slow
@require_torch_gpu
def a ( self : Optional[Any] ):
"""simple docstring"""
__UpperCamelCase , __UpperCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
__UpperCamelCase : int = True
__UpperCamelCase : Any = model_class(config=lowerCamelCase__ )
__UpperCamelCase : List[str] = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : Tuple = torch.jit.trace(
lowerCamelCase__ , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(lowerCamelCase__ , os.path.join(lowerCamelCase__ , """traced_model.pt""" ) )
__UpperCamelCase : Optional[int] = torch.jit.load(os.path.join(lowerCamelCase__ , """traced_model.pt""" ) , map_location=lowerCamelCase__ )
loaded(inputs_dict["""input_ids"""].to(lowerCamelCase__ ) , inputs_dict["""attention_mask"""].to(lowerCamelCase__ ) )
@require_torch
class _A ( unittest.TestCase ):
@slow
def a ( self : str ):
"""simple docstring"""
__UpperCamelCase : Dict = FlaubertModel.from_pretrained("""flaubert/flaubert_base_cased""" )
__UpperCamelCase : List[str] = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
with torch.no_grad():
__UpperCamelCase : List[Any] = model(lowerCamelCase__ )[0]
__UpperCamelCase : Tuple = torch.Size((1, 11, 7_68) )
self.assertEqual(output.shape , lowerCamelCase__ )
__UpperCamelCase : List[Any] = torch.tensor(
[[[-2.6251, -1.4298, -0.0227], [-2.8510, -1.6387, 0.2258], [-2.8114, -1.1832, -0.3066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCamelCase__ , atol=1e-4 ) )
| 269 |
from __future__ import annotations
class _A :
def __init__( self : List[str] , lowerCamelCase__ : Any=None ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] = data
__UpperCamelCase : Union[str, Any] = None
def __repr__( self : int ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] = []
__UpperCamelCase : Optional[int] = self
while temp:
string_rep.append(f'{temp.data}' )
__UpperCamelCase : Dict = temp.next
return "->".join(lowerCamelCase__ )
def __lowerCamelCase ( __lowerCAmelCase : list ) -> Tuple:
if not elements_list:
raise Exception("""The Elements List is empty""" )
__UpperCamelCase : Dict = Node(elements_list[0] )
for i in range(1 , len(__lowerCAmelCase ) ):
__UpperCamelCase : List[str] = Node(elements_list[i] )
__UpperCamelCase : Optional[int] = current.next
return head
def __lowerCamelCase ( __lowerCAmelCase : Node ) -> None:
if head_node is not None and isinstance(__lowerCAmelCase , __lowerCAmelCase ):
print_reverse(head_node.next )
print(head_node.data )
def __lowerCamelCase ( ) -> Union[str, Any]:
from doctest import testmod
testmod()
__UpperCamelCase : Any = make_linked_list([14, 52, 14, 12, 43] )
print("""Linked List:""" )
print(__lowerCAmelCase )
print("""Elements in Reverse:""" )
print_reverse(__lowerCAmelCase )
if __name__ == "__main__":
main()
| 269 | 1 |
"""simple docstring"""
import math
def SCREAMING_SNAKE_CASE ( snake_case, snake_case = 0, snake_case = 0):
__snake_case = end or len(snake_case)
for i in range(snake_case, snake_case):
__snake_case = i
__snake_case = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
__snake_case = array[temp_index - 1]
temp_index -= 1
__snake_case = temp_index_value
return array
def SCREAMING_SNAKE_CASE ( snake_case, snake_case, snake_case): # Max Heap
__snake_case = index
__snake_case = 2 * index + 1 # Left Node
__snake_case = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
__snake_case = left_index
if right_index < heap_size and array[largest] < array[right_index]:
__snake_case = right_index
if largest != index:
__snake_case , __snake_case = array[largest], array[index]
heapify(snake_case, snake_case, snake_case)
def SCREAMING_SNAKE_CASE ( snake_case):
__snake_case = len(snake_case)
for i in range(n // 2, -1, -1):
heapify(snake_case, snake_case, snake_case)
for i in range(n - 1, 0, -1):
__snake_case , __snake_case = array[0], array[i]
heapify(snake_case, 0, snake_case)
return array
def SCREAMING_SNAKE_CASE ( snake_case, snake_case, snake_case, snake_case):
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def SCREAMING_SNAKE_CASE ( snake_case, snake_case, snake_case, snake_case):
__snake_case = low
__snake_case = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
__snake_case , __snake_case = array[j], array[i]
i += 1
def SCREAMING_SNAKE_CASE ( snake_case):
if len(snake_case) == 0:
return array
__snake_case = 2 * math.ceil(math.loga(len(snake_case)))
__snake_case = 16
return intro_sort(snake_case, 0, len(snake_case), snake_case, snake_case)
def SCREAMING_SNAKE_CASE ( snake_case, snake_case, snake_case, snake_case, snake_case):
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(snake_case)
max_depth -= 1
__snake_case = median_of_a(snake_case, snake_case, start + ((end - start) // 2) + 1, end - 1)
__snake_case = partition(snake_case, snake_case, snake_case, snake_case)
intro_sort(snake_case, snake_case, snake_case, snake_case, snake_case)
__snake_case = p
return insertion_sort(snake_case, snake_case, snake_case)
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowercase : Union[str, Any] = input("Enter numbers separated by a comma : ").strip()
__lowercase : int = [float(item) for item in user_input.split(",")]
print(sort(unsorted)) | 93 | """simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class _A ( unittest.TestCase ):
"""simple docstring"""
def lowercase ( self : Tuple ) -> str:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase ( self : int ) -> Tuple:
__snake_case = StableDiffusionKDiffusionPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' )
__snake_case = sd_pipe.to(A_ )
sd_pipe.set_progress_bar_config(disable=A_ )
sd_pipe.set_scheduler('''sample_euler''' )
__snake_case = '''A painting of a squirrel eating a burger'''
__snake_case = torch.manual_seed(0 )
__snake_case = sd_pipe([prompt] , generator=A_ , guidance_scale=9.0 , num_inference_steps=20 , output_type='''np''' )
__snake_case = output.images
__snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__snake_case = np.array([0.04_47, 0.04_92, 0.04_68, 0.04_08, 0.03_83, 0.04_08, 0.03_54, 0.03_80, 0.03_39] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowercase ( self : Optional[Any] ) -> Tuple:
__snake_case = StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
__snake_case = sd_pipe.to(A_ )
sd_pipe.set_progress_bar_config(disable=A_ )
sd_pipe.set_scheduler('''sample_euler''' )
__snake_case = '''A painting of a squirrel eating a burger'''
__snake_case = torch.manual_seed(0 )
__snake_case = sd_pipe([prompt] , generator=A_ , guidance_scale=9.0 , num_inference_steps=20 , output_type='''np''' )
__snake_case = output.images
__snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__snake_case = np.array([0.12_37, 0.13_20, 0.14_38, 0.13_59, 0.13_90, 0.11_32, 0.12_77, 0.11_75, 0.11_12] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-1
def lowercase ( self : List[str] ) -> Optional[Any]:
__snake_case = StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
__snake_case = sd_pipe.to(A_ )
sd_pipe.set_progress_bar_config(disable=A_ )
sd_pipe.set_scheduler('''sample_dpmpp_2m''' )
__snake_case = '''A painting of a squirrel eating a burger'''
__snake_case = torch.manual_seed(0 )
__snake_case = sd_pipe(
[prompt] , generator=A_ , guidance_scale=7.5 , num_inference_steps=15 , output_type='''np''' , use_karras_sigmas=A_ , )
__snake_case = output.images
__snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__snake_case = np.array(
[0.11_38_16_89, 0.12_11_29_21, 0.1_38_94_57, 0.12_54_96_06, 0.1_24_49_64, 0.10_83_15_17, 0.11_56_28_66, 0.10_86_78_16, 0.10_49_90_48] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 | 93 | 1 |
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCAmelCase__ ( unittest.TestCase ):
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str=3 , SCREAMING_SNAKE_CASE__ : Tuple=32 , SCREAMING_SNAKE_CASE__ : Tuple=3 , SCREAMING_SNAKE_CASE__ : Optional[int]=10 , SCREAMING_SNAKE_CASE__ : List[Any]=[10, 20, 30, 40] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=[1, 1, 2, 1] , SCREAMING_SNAKE_CASE__ : Optional[int]=True , SCREAMING_SNAKE_CASE__ : List[str]=True , SCREAMING_SNAKE_CASE__ : List[str]="relu" , SCREAMING_SNAKE_CASE__ : Any=3 , SCREAMING_SNAKE_CASE__ : Any=None , ) -> Any:
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = image_size
__lowerCamelCase = num_channels
__lowerCamelCase = embeddings_size
__lowerCamelCase = hidden_sizes
__lowerCamelCase = depths
__lowerCamelCase = is_training
__lowerCamelCase = use_labels
__lowerCamelCase = hidden_act
__lowerCamelCase = num_labels
__lowerCamelCase = scope
__lowerCamelCase = len(SCREAMING_SNAKE_CASE_ )
def __A ( self : int ) -> Dict:
__lowerCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCamelCase = self.get_config()
return config, pixel_values
def __A ( self : Optional[int] ) -> Optional[int]:
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def __A ( self : Any , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Dict ) -> List[str]:
__lowerCamelCase = FlaxRegNetModel(config=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase = model(SCREAMING_SNAKE_CASE_ )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __A ( self : Any , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ) -> str:
__lowerCamelCase = self.num_labels
__lowerCamelCase = FlaxRegNetForImageClassification(config=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self : Any ) -> Optional[int]:
__lowerCamelCase = self.prepare_config_and_inputs()
__lowerCamelCase , __lowerCamelCase = config_and_inputs
__lowerCamelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_flax
class lowerCAmelCase__ ( lowercase__ , unittest.TestCase ):
a__ : Optional[Any] = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
a__ : int = False
a__ : Union[str, Any] = False
a__ : Dict = False
def __A ( self : int ) -> None:
__lowerCamelCase = FlaxRegNetModelTester(self )
__lowerCamelCase = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , has_text_modality=SCREAMING_SNAKE_CASE_ )
def __A ( self : Any ) -> Any:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __A ( self : Tuple ) -> Tuple:
return
def __A ( self : int ) -> str:
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def __A ( self : Tuple ) -> str:
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE_ )
@unittest.skip(reason='''RegNet does not use inputs_embeds''' )
def __A ( self : Optional[Any] ) -> Tuple:
pass
@unittest.skip(reason='''RegNet does not support input and output embeddings''' )
def __A ( self : Optional[int] ) -> Any:
pass
def __A ( self : Union[str, Any] ) -> Any:
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase = model_class(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCamelCase = [*signature.parameters.keys()]
__lowerCamelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ )
def __A ( self : Optional[int] ) -> Dict:
def check_hidden_states_output(SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
__lowerCamelCase = model_class(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
__lowerCamelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__lowerCamelCase = self.model_tester.num_stages
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , expected_num_stages + 1 )
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCamelCase = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def __A ( self : List[Any] ) -> List[str]:
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__lowerCamelCase = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase = model_class(SCREAMING_SNAKE_CASE_ )
@jax.jit
def model_jitted(SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : Any ):
return model(pixel_values=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
with self.subTest('''JIT Enabled''' ):
__lowerCamelCase = model_jitted(**SCREAMING_SNAKE_CASE_ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
__lowerCamelCase = model_jitted(**SCREAMING_SNAKE_CASE_ ).to_tuple()
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) )
for jitted_output, output in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
self.assertEqual(jitted_output.shape , output.shape )
def __magic_name__ ( ) -> Tuple:
__lowerCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_flax
class lowerCAmelCase__ ( unittest.TestCase ):
@cached_property
def __A ( self : str ) -> Optional[int]:
return AutoImageProcessor.from_pretrained('''facebook/regnet-y-040''' ) if is_vision_available() else None
@slow
def __A ( self : Optional[int] ) -> Dict:
__lowerCamelCase = FlaxRegNetForImageClassification.from_pretrained('''facebook/regnet-y-040''' )
__lowerCamelCase = self.default_image_processor
__lowerCamelCase = prepare_img()
__lowerCamelCase = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='''np''' )
__lowerCamelCase = model(**SCREAMING_SNAKE_CASE_ )
# verify the logits
__lowerCamelCase = (1, 10_00)
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase = jnp.array([-0.4180, -1.5051, -3.4836] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) )
| 298 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class lowercase ( metaclass=lowercase__ ):
lowercase = ['''flax''', '''transformers''']
def __init__(self : List[Any] ,*SCREAMING_SNAKE_CASE_ : Union[str, Any] ,**SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> str:
"""simple docstring"""
requires_backends(self ,['''flax''', '''transformers'''] )
@classmethod
def UpperCAmelCase (cls : List[Any] ,*SCREAMING_SNAKE_CASE_ : List[Any] ,**SCREAMING_SNAKE_CASE_ : int ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls ,['''flax''', '''transformers'''] )
@classmethod
def UpperCAmelCase (cls : List[Any] ,*SCREAMING_SNAKE_CASE_ : str ,**SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls ,['''flax''', '''transformers'''] )
class lowercase ( metaclass=lowercase__ ):
lowercase = ['''flax''', '''transformers''']
def __init__(self : int ,*SCREAMING_SNAKE_CASE_ : List[Any] ,**SCREAMING_SNAKE_CASE_ : Any ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self ,['''flax''', '''transformers'''] )
@classmethod
def UpperCAmelCase (cls : int ,*SCREAMING_SNAKE_CASE_ : str ,**SCREAMING_SNAKE_CASE_ : int ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls ,['''flax''', '''transformers'''] )
@classmethod
def UpperCAmelCase (cls : List[str] ,*SCREAMING_SNAKE_CASE_ : List[str] ,**SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Dict:
"""simple docstring"""
requires_backends(cls ,['''flax''', '''transformers'''] )
class lowercase ( metaclass=lowercase__ ):
lowercase = ['''flax''', '''transformers''']
def __init__(self : Tuple ,*SCREAMING_SNAKE_CASE_ : Optional[int] ,**SCREAMING_SNAKE_CASE_ : Dict ) -> List[str]:
"""simple docstring"""
requires_backends(self ,['''flax''', '''transformers'''] )
@classmethod
def UpperCAmelCase (cls : Tuple ,*SCREAMING_SNAKE_CASE_ : Dict ,**SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> List[Any]:
"""simple docstring"""
requires_backends(cls ,['''flax''', '''transformers'''] )
@classmethod
def UpperCAmelCase (cls : Tuple ,*SCREAMING_SNAKE_CASE_ : Tuple ,**SCREAMING_SNAKE_CASE_ : Tuple ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls ,['''flax''', '''transformers'''] )
class lowercase ( metaclass=lowercase__ ):
lowercase = ['''flax''', '''transformers''']
def __init__(self : Optional[Any] ,*SCREAMING_SNAKE_CASE_ : Any ,**SCREAMING_SNAKE_CASE_ : str ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self ,['''flax''', '''transformers'''] )
@classmethod
def UpperCAmelCase (cls : List[Any] ,*SCREAMING_SNAKE_CASE_ : Optional[Any] ,**SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls ,['''flax''', '''transformers'''] )
@classmethod
def UpperCAmelCase (cls : List[Any] ,*SCREAMING_SNAKE_CASE_ : List[Any] ,**SCREAMING_SNAKE_CASE_ : List[Any] ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls ,['''flax''', '''transformers'''] )
| 535 | 0 |
"""simple docstring"""
from collections.abc import Callable
import numpy as np
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = int(np.ceil((x_end - xa) / step_size ) )
__SCREAMING_SNAKE_CASE = np.zeros((n + 1,) )
__SCREAMING_SNAKE_CASE = ya
__SCREAMING_SNAKE_CASE = xa
for k in range(lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE = y[k] + step_size * ode_func(lowerCAmelCase_ , y[k] )
__SCREAMING_SNAKE_CASE = y[k] + (
(step_size / 2) * (ode_func(lowerCAmelCase_ , y[k] ) + ode_func(x + step_size , lowerCAmelCase_ ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 718 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
a__ : List[str] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : int = ['''MLukeTokenizer''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
a__ : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 553 | 0 |
'''simple docstring'''
import sys
from collections import defaultdict
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Optional[Any] ):
'''simple docstring'''
_A = []
def lowerCAmelCase ( self : str , __UpperCAmelCase : str ):
'''simple docstring'''
return self.node_position[vertex]
def lowerCAmelCase ( self : Any , __UpperCAmelCase : Dict , __UpperCAmelCase : List[Any] ):
'''simple docstring'''
_A = pos
def lowerCAmelCase ( self : int , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : int , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : str ):
'''simple docstring'''
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
_A = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
_A = 2 * start + 1
else:
_A = 2 * start + 2
if heap[smallest_child] < heap[start]:
_A , _A = heap[smallest_child], positions[smallest_child]
_A , _A = (
heap[start],
positions[start],
)
_A , _A = temp, tempa
_A = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , lowerCAmelCase_ )
self.top_to_bottom(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def lowerCAmelCase ( self : str , __UpperCAmelCase : Any , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Union[str, Any] ):
'''simple docstring'''
_A = position[index]
while index != 0:
_A = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
_A = heap[parent]
_A = position[parent]
self.set_position(position[parent] , lowerCAmelCase_ )
else:
_A = val
_A = temp
self.set_position(lowerCAmelCase_ , lowerCAmelCase_ )
break
_A = parent
else:
_A = val
_A = temp
self.set_position(lowerCAmelCase_ , 0 )
def lowerCAmelCase ( self : Dict , __UpperCAmelCase : Tuple , __UpperCAmelCase : Dict ):
'''simple docstring'''
_A = len(lowerCAmelCase_ ) // 2 - 1
for i in range(lowerCAmelCase_ , -1 , -1 ):
self.top_to_bottom(lowerCAmelCase_ , lowerCAmelCase_ , len(lowerCAmelCase_ ) , lowerCAmelCase_ )
def lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Any ):
'''simple docstring'''
_A = positions[0]
_A = sys.maxsize
self.top_to_bottom(lowerCAmelCase_ , 0 , len(lowerCAmelCase_ ) , lowerCAmelCase_ )
return temp
def __lowercase ( __lowercase ) -> Tuple:
'''simple docstring'''
_A = Heap()
_A = [0] * len(__lowercase )
_A = [-1] * len(__lowercase ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
_A = [] # Heap of Distance of vertices from their neighboring vertex
_A = []
for vertex in range(len(__lowercase ) ):
distance_tv.append(sys.maxsize )
positions.append(__lowercase )
heap.node_position.append(__lowercase )
_A = []
_A = 1
_A = sys.maxsize
for neighbor, distance in adjacency_list[0]:
_A = 0
_A = distance
heap.heapify(__lowercase , __lowercase )
for _ in range(1 , len(__lowercase ) ):
_A = heap.delete_minimum(__lowercase , __lowercase )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
_A = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(__lowercase )]
):
_A = distance
heap.bottom_to_top(
__lowercase , heap.get_position(__lowercase ) , __lowercase , __lowercase )
_A = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
lowerCamelCase_ = int(input('''Enter number of edges: ''').strip())
lowerCamelCase_ = defaultdict(list)
for _ in range(edges_number):
lowerCamelCase_ = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 330 |
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class snake_case ( unittest.TestCase ):
'''simple docstring'''
def _lowercase ( self : Dict ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = mock.Mock()
SCREAMING_SNAKE_CASE_ = 500
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = HTTPError
SCREAMING_SNAKE_CASE_ = {}
# Download this model to make sure it's in the cache.
SCREAMING_SNAKE_CASE_ = BertTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('''requests.Session.request''' , return_value=lowerCAmelCase_ ) as mock_head:
SCREAMING_SNAKE_CASE_ = BertTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def _lowercase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = mock.Mock()
SCREAMING_SNAKE_CASE_ = 500
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = HTTPError
SCREAMING_SNAKE_CASE_ = {}
# Download this model to make sure it's in the cache.
SCREAMING_SNAKE_CASE_ = GPTaTokenizerFast.from_pretrained('''gpt2''' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('''requests.Session.request''' , return_value=lowerCAmelCase_ ) as mock_head:
SCREAMING_SNAKE_CASE_ = GPTaTokenizerFast.from_pretrained('''gpt2''' )
# This check we did call the fake head request
mock_head.assert_called()
def _lowercase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
try:
SCREAMING_SNAKE_CASE_ = tempfile.mktemp()
with open(lowerCAmelCase_ , '''wb''' ) as f:
http_get('''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''' , lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ = AlbertTokenizer.from_pretrained(lowerCAmelCase_ )
finally:
os.remove(lowerCAmelCase_ )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile('''tokenizer.json''' ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open('''tokenizer.json''' , '''wb''' ) as f:
http_get('''https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json''' , lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size , 1_000 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove('''tokenizer.json''' )
def _lowercase ( self : int ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = AlbertTokenizer.from_pretrained('''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''' )
@is_staging_test
class snake_case ( unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase : Any = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """bla""", """blou"""]
@classmethod
def _lowercase ( cls : Optional[Any] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = TOKEN
HfFolder.save_token(lowerCAmelCase_ )
@classmethod
def _lowercase ( cls : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id='''test-tokenizer''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-tokenizer-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-tokenizer''' )
except HTTPError:
pass
def _lowercase ( self : List[Any] ) -> int:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE_ = os.path.join(lowerCAmelCase_ , '''vocab.txt''' )
with open(lowerCAmelCase_ , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE_ = BertTokenizer(lowerCAmelCase_ )
tokenizer.push_to_hub('''test-tokenizer''' , use_auth_token=self._token )
SCREAMING_SNAKE_CASE_ = BertTokenizer.from_pretrained(F'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id='''test-tokenizer''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowerCAmelCase_ , repo_id='''test-tokenizer''' , push_to_hub=lowerCAmelCase_ , use_auth_token=self._token )
SCREAMING_SNAKE_CASE_ = BertTokenizer.from_pretrained(F'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
def _lowercase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE_ = os.path.join(lowerCAmelCase_ , '''vocab.txt''' )
with open(lowerCAmelCase_ , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE_ = BertTokenizer(lowerCAmelCase_ )
tokenizer.push_to_hub('''valid_org/test-tokenizer-org''' , use_auth_token=self._token )
SCREAMING_SNAKE_CASE_ = BertTokenizer.from_pretrained('''valid_org/test-tokenizer-org''' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-tokenizer-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
lowerCAmelCase_ , repo_id='''valid_org/test-tokenizer-org''' , push_to_hub=lowerCAmelCase_ , use_auth_token=self._token )
SCREAMING_SNAKE_CASE_ = BertTokenizer.from_pretrained('''valid_org/test-tokenizer-org''' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
@require_tokenizers
def _lowercase ( self : Any ) -> List[Any]:
"""simple docstring"""
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE_ = os.path.join(lowerCAmelCase_ , '''vocab.txt''' )
with open(lowerCAmelCase_ , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE_ = CustomTokenizer(lowerCAmelCase_ )
# No fast custom tokenizer
tokenizer.push_to_hub('''test-dynamic-tokenizer''' , use_auth_token=self._token )
SCREAMING_SNAKE_CASE_ = AutoTokenizer.from_pretrained(F'''{USER}/test-dynamic-tokenizer''' , trust_remote_code=lowerCAmelCase_ )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , '''CustomTokenizer''' )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE_ = os.path.join(lowerCAmelCase_ , '''vocab.txt''' )
with open(lowerCAmelCase_ , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE_ = BertTokenizerFast.from_pretrained(lowerCAmelCase_ )
bert_tokenizer.save_pretrained(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ = CustomTokenizerFast.from_pretrained(lowerCAmelCase_ )
tokenizer.push_to_hub('''test-dynamic-tokenizer''' , use_auth_token=self._token )
SCREAMING_SNAKE_CASE_ = AutoTokenizer.from_pretrained(F'''{USER}/test-dynamic-tokenizer''' , trust_remote_code=lowerCAmelCase_ )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , '''CustomTokenizerFast''' )
SCREAMING_SNAKE_CASE_ = AutoTokenizer.from_pretrained(
F'''{USER}/test-dynamic-tokenizer''' , use_fast=lowerCAmelCase_ , trust_remote_code=lowerCAmelCase_ )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , '''CustomTokenizer''' )
class snake_case ( unittest.TestCase ):
'''simple docstring'''
def _lowercase ( self : Any ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = Trie()
trie.add('''Hello 友達''' )
self.assertEqual(trie.data , {'''H''': {'''e''': {'''l''': {'''l''': {'''o''': {''' ''': {'''友''': {'''達''': {'''''': 1}}}}}}}}} )
trie.add('''Hello''' )
trie.data
self.assertEqual(trie.data , {'''H''': {'''e''': {'''l''': {'''l''': {'''o''': {'''''': 1, ''' ''': {'''友''': {'''達''': {'''''': 1}}}}}}}}} )
def _lowercase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = Trie()
self.assertEqual(trie.split('''[CLS] This is a extra_id_100''' ) , ['''[CLS] This is a extra_id_100'''] )
trie.add('''[CLS]''' )
trie.add('''extra_id_1''' )
trie.add('''extra_id_100''' )
self.assertEqual(trie.split('''[CLS] This is a extra_id_100''' ) , ['''[CLS]''', ''' This is a ''', '''extra_id_100'''] )
def _lowercase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = Trie()
trie.add('''A''' )
self.assertEqual(trie.split('''ABC''' ) , ['''A''', '''BC'''] )
self.assertEqual(trie.split('''BCA''' ) , ['''BC''', '''A'''] )
def _lowercase ( self : Tuple ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = Trie()
trie.add('''TOKEN]''' )
trie.add('''[SPECIAL_TOKEN]''' )
self.assertEqual(trie.split('''This is something [SPECIAL_TOKEN]''' ) , ['''This is something ''', '''[SPECIAL_TOKEN]'''] )
def _lowercase ( self : Optional[int] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = Trie()
trie.add('''A''' )
trie.add('''P''' )
trie.add('''[SPECIAL_TOKEN]''' )
self.assertEqual(trie.split('''This is something [SPECIAL_TOKEN]''' ) , ['''This is something ''', '''[SPECIAL_TOKEN]'''] )
def _lowercase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = Trie()
trie.add('''AB''' )
trie.add('''B''' )
trie.add('''C''' )
self.assertEqual(trie.split('''ABC''' ) , ['''AB''', '''C'''] )
def _lowercase ( self : Any ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = Trie()
trie.add('''ABC''' )
trie.add('''B''' )
trie.add('''CD''' )
self.assertEqual(trie.split('''ABCD''' ) , ['''ABC''', '''D'''] )
def _lowercase ( self : Tuple ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = Trie()
SCREAMING_SNAKE_CASE_ = trie.cut_text('''ABC''' , [0, 0, 2, 1, 2, 3] )
self.assertEqual(lowerCAmelCase_ , ['''AB''', '''C'''] )
| 393 | 0 |
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class _lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
def __init__( self : str , SCREAMING_SNAKE_CASE__ : NestedDataStructureLike[PathLike] , SCREAMING_SNAKE_CASE__ : Optional[NamedSplit] = None , SCREAMING_SNAKE_CASE__ : Optional[Features] = None , SCREAMING_SNAKE_CASE__ : str = None , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : Optional[int] = None , **SCREAMING_SNAKE_CASE__ : Any , ):
"""simple docstring"""
super().__init__(
SCREAMING_SNAKE_CASE__ , split=SCREAMING_SNAKE_CASE__ , features=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ , keep_in_memory=SCREAMING_SNAKE_CASE__ , streaming=SCREAMING_SNAKE_CASE__ , num_proc=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
UpperCamelCase = path_or_paths if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else {self.split: path_or_paths}
UpperCamelCase = Text(
cache_dir=SCREAMING_SNAKE_CASE__ , data_files=SCREAMING_SNAKE_CASE__ , features=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
def __lowerCAmelCase ( self : Optional[int] ):
"""simple docstring"""
if self.streaming:
UpperCamelCase = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
self.builder.download_and_prepare(
download_config=SCREAMING_SNAKE_CASE__ , download_mode=SCREAMING_SNAKE_CASE__ , verification_mode=SCREAMING_SNAKE_CASE__ , base_path=SCREAMING_SNAKE_CASE__ , num_proc=self.num_proc , )
UpperCamelCase = self.builder.as_dataset(
split=self.split , verification_mode=SCREAMING_SNAKE_CASE__ , in_memory=self.keep_in_memory )
return dataset
| 170 |
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
_snake_case = logging.get_logger(__name__)
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : Any , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
UpperCamelCase = question_encoder
UpperCamelCase = generator
UpperCamelCase = self.question_encoder
def __lowerCAmelCase ( self : str , SCREAMING_SNAKE_CASE__ : Dict ):
"""simple docstring"""
if os.path.isfile(SCREAMING_SNAKE_CASE__ ):
raise ValueError(F'Provided path ({save_directory}) should be a directory, not a file' )
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
UpperCamelCase = os.path.join(SCREAMING_SNAKE_CASE__ , 'question_encoder_tokenizer' )
UpperCamelCase = os.path.join(SCREAMING_SNAKE_CASE__ , 'generator_tokenizer' )
self.question_encoder.save_pretrained(SCREAMING_SNAKE_CASE__ )
self.generator.save_pretrained(SCREAMING_SNAKE_CASE__ )
@classmethod
def __lowerCAmelCase ( cls : Any , SCREAMING_SNAKE_CASE__ : List[str] , **SCREAMING_SNAKE_CASE__ : Dict ):
"""simple docstring"""
from ..auto.tokenization_auto import AutoTokenizer
UpperCamelCase = kwargs.pop('config' , SCREAMING_SNAKE_CASE__ )
if config is None:
UpperCamelCase = RagConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
UpperCamelCase = AutoTokenizer.from_pretrained(
SCREAMING_SNAKE_CASE__ , config=config.question_encoder , subfolder='question_encoder_tokenizer' )
UpperCamelCase = AutoTokenizer.from_pretrained(
SCREAMING_SNAKE_CASE__ , config=config.generator , subfolder='generator_tokenizer' )
return cls(question_encoder=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ )
def __call__( self : Tuple , *SCREAMING_SNAKE_CASE__ : List[Any] , **SCREAMING_SNAKE_CASE__ : List[Any] ):
"""simple docstring"""
return self.current_tokenizer(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : int , *SCREAMING_SNAKE_CASE__ : Dict , **SCREAMING_SNAKE_CASE__ : List[str] ):
"""simple docstring"""
return self.generator.batch_decode(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Any , *SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : Optional[Any] ):
"""simple docstring"""
return self.generator.decode(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : int ):
"""simple docstring"""
UpperCamelCase = self.question_encoder
def __lowerCAmelCase ( self : str ):
"""simple docstring"""
UpperCamelCase = self.generator
def __lowerCAmelCase ( self : Any , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[List[str]] = None , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : str = "longest" , SCREAMING_SNAKE_CASE__ : str = None , SCREAMING_SNAKE_CASE__ : bool = True , **SCREAMING_SNAKE_CASE__ : Optional[Any] , ):
"""simple docstring"""
warnings.warn(
'`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the '
'regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` '
'context manager to prepare your targets. See the documentation of your specific tokenizer for more '
'details' , SCREAMING_SNAKE_CASE__ , )
if max_length is None:
UpperCamelCase = self.current_tokenizer.model_max_length
UpperCamelCase = self(
SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
UpperCamelCase = self.current_tokenizer.model_max_length
UpperCamelCase = self(
text_target=SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
UpperCamelCase = labels['input_ids']
return model_inputs
| 170 | 1 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {'vocab_file': 'spiece.model'}
UpperCamelCase__ = {
'vocab_file': {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model',
}
}
UpperCamelCase__ = {
'xlnet-base-cased': None,
'xlnet-large-cased': None,
}
# Segments (not really needed)
UpperCamelCase__ = 0
UpperCamelCase__ = 1
UpperCamelCase__ = 2
UpperCamelCase__ = 3
UpperCamelCase__ = 4
class UpperCAmelCase__ ( __snake_case ):
'''simple docstring'''
UpperCAmelCase_ = VOCAB_FILES_NAMES
UpperCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ = 'left'
def __init__( self : Optional[int] , UpperCamelCase : int , UpperCamelCase : int=False , UpperCamelCase : List[Any]=True , UpperCamelCase : Dict=False , UpperCamelCase : Optional[Any]="<s>" , UpperCamelCase : str="</s>" , UpperCamelCase : List[str]="<unk>" , UpperCamelCase : Dict="<sep>" , UpperCamelCase : int="<pad>" , UpperCamelCase : Optional[Any]="<cls>" , UpperCamelCase : str="<mask>" , UpperCamelCase : int=["<eop>", "<eod>"] , UpperCamelCase : Optional[Dict[str, Any]] = None , **UpperCamelCase : str , ):
"""simple docstring"""
_lowercase : List[Any] = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else mask_token
_lowercase : Any = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__a , remove_space=__a , keep_accents=__a , bos_token=__a , eos_token=__a , unk_token=__a , sep_token=__a , pad_token=__a , cls_token=__a , mask_token=__a , additional_special_tokens=__a , sp_model_kwargs=self.sp_model_kwargs , **__a , )
_lowercase : Tuple = 3
_lowercase : Tuple = do_lower_case
_lowercase : List[Any] = remove_space
_lowercase : Tuple = keep_accents
_lowercase : List[str] = vocab_file
_lowercase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__a )
@property
def lowerCAmelCase_ ( self : Optional[Any] ):
"""simple docstring"""
return len(self.sp_model )
def lowerCAmelCase_ ( self : Optional[int] ):
"""simple docstring"""
_lowercase : Union[str, Any] = {self.convert_ids_to_tokens(__a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : List[str] ):
"""simple docstring"""
_lowercase : Tuple = self.__dict__.copy()
_lowercase : int = None
return state
def __setstate__( self : Union[str, Any] , UpperCamelCase : Optional[Any] ):
"""simple docstring"""
_lowercase : Optional[Any] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_lowercase : Any = {}
_lowercase : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCAmelCase_ ( self : str , UpperCamelCase : Tuple ):
"""simple docstring"""
if self.remove_space:
_lowercase : List[Any] = """ """.join(inputs.strip().split() )
else:
_lowercase : Union[str, Any] = inputs
_lowercase : Dict = outputs.replace('''``''' , '''\"''' ).replace('''\'\'''' , '''\"''' )
if not self.keep_accents:
_lowercase : Any = unicodedata.normalize('''NFKD''' , __a )
_lowercase : str = """""".join([c for c in outputs if not unicodedata.combining(__a )] )
if self.do_lower_case:
_lowercase : Any = outputs.lower()
return outputs
def lowerCAmelCase_ ( self : Union[str, Any] , UpperCamelCase : str ):
"""simple docstring"""
_lowercase : Tuple = self.preprocess_text(__a )
_lowercase : Union[str, Any] = self.sp_model.encode(__a , out_type=__a )
_lowercase : Any = []
for piece in pieces:
if len(__a ) > 1 and piece[-1] == str(''',''' ) and piece[-2].isdigit():
_lowercase : Union[str, Any] = self.sp_model.EncodeAsPieces(piece[:-1].replace(__a , '''''' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
_lowercase : int = cur_pieces[1:]
else:
_lowercase : List[Any] = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(__a )
else:
new_pieces.append(__a )
return new_pieces
def lowerCAmelCase_ ( self : Any , UpperCamelCase : List[Any] ):
"""simple docstring"""
return self.sp_model.PieceToId(__a )
def lowerCAmelCase_ ( self : Union[str, Any] , UpperCamelCase : List[Any] ):
"""simple docstring"""
return self.sp_model.IdToPiece(__a )
def lowerCAmelCase_ ( self : Optional[int] , UpperCamelCase : Optional[Any] ):
"""simple docstring"""
_lowercase : List[Any] = """""".join(__a ).replace(__a , ''' ''' ).strip()
return out_string
def lowerCAmelCase_ ( self : Dict , UpperCamelCase : List[int] , UpperCamelCase : bool = False , UpperCamelCase : bool = None , UpperCamelCase : bool = True , **UpperCamelCase : Dict , ):
"""simple docstring"""
_lowercase : Union[str, Any] = kwargs.pop('''use_source_tokenizer''' , __a )
_lowercase : List[str] = self.convert_ids_to_tokens(__a , skip_special_tokens=__a )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
_lowercase : Any = []
_lowercase : int = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(__a ) )
_lowercase : str = []
sub_texts.append(__a )
else:
current_sub_text.append(__a )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(__a ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
_lowercase : Any = """""".join(__a )
_lowercase : Any = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
_lowercase : Any = self.clean_up_tokenization(__a )
return clean_text
else:
return text
def lowerCAmelCase_ ( self : Dict , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ):
"""simple docstring"""
_lowercase : Tuple = [self.sep_token_id]
_lowercase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def lowerCAmelCase_ ( self : Tuple , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None , UpperCamelCase : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__a , token_ids_a=__a , already_has_special_tokens=__a )
if token_ids_a is not None:
return ([0] * len(__a )) + [1] + ([0] * len(__a )) + [1, 1]
return ([0] * len(__a )) + [1, 1]
def lowerCAmelCase_ ( self : str , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ):
"""simple docstring"""
_lowercase : Optional[int] = [self.sep_token_id]
_lowercase : Optional[int] = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def lowerCAmelCase_ ( self : Tuple , UpperCamelCase : str , UpperCamelCase : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(__a ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
_lowercase : Union[str, Any] = os.path.join(
__a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __a )
elif not os.path.isfile(self.vocab_file ):
with open(__a , '''wb''' ) as fi:
_lowercase : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(__a )
return (out_vocab_file,) | 322 |
from __future__ import annotations
from math import pi
def _lowercase ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_):
"""simple docstring"""
if (inductance, frequency, reactance).count(0) != 1:
raise ValueError("""One and only one argument must be 0""")
if inductance < 0:
raise ValueError("""Inductance cannot be negative""")
if frequency < 0:
raise ValueError("""Frequency cannot be negative""")
if reactance < 0:
raise ValueError("""Inductive reactance cannot be negative""")
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError("""Exactly one argument must be 0""")
if __name__ == "__main__":
import doctest
doctest.testmod()
| 648 | 0 |
'''simple docstring'''
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
lowercase__ = logging.get_logger(__name__)
class UpperCAmelCase_ ( __lowerCamelCase ):
"""simple docstring"""
snake_case = """linear"""
snake_case = """cosine"""
snake_case = """cosine_with_restarts"""
snake_case = """polynomial"""
snake_case = """constant"""
snake_case = """constant_with_warmup"""
snake_case = """piecewise_constant"""
def __snake_case ( lowercase : Any , lowercase : Optional[int] = -1 ):
return LambdaLR(lowerCamelCase_ , lambda lowercase : 1 , last_epoch=lowerCamelCase_ )
def __snake_case ( lowercase : Union[str, Any] , lowercase : Optional[int] , lowercase : str = -1 ):
def lr_lambda(lowercase : Optional[int] ):
if current_step < num_warmup_steps:
return float(lowerCamelCase_ ) / float(max(1.0 , lowerCamelCase_ ) )
return 1.0
return LambdaLR(lowerCamelCase_ , lowerCamelCase_ , last_epoch=lowerCamelCase_ )
def __snake_case ( lowercase : str , lowercase : Union[str, Any] , lowercase : List[Any] = -1 ):
snake_case_ = {}
snake_case_ = step_rules.split("," )
for rule_str in rule_list[:-1]:
snake_case_ = rule_str.split(":" )
snake_case_ = int(lowerCamelCase_ )
snake_case_ = float(lowerCamelCase_ )
snake_case_ = value
snake_case_ = float(rule_list[-1] )
def create_rules_function(lowercase : Dict , lowercase : Tuple ):
def rule_func(lowercase : Optional[Any] ) -> float:
snake_case_ = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(lowerCamelCase_ ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
snake_case_ = create_rules_function(lowerCamelCase_ , lowerCamelCase_ )
return LambdaLR(lowerCamelCase_ , lowerCamelCase_ , last_epoch=lowerCamelCase_ )
def __snake_case ( lowercase : str , lowercase : Union[str, Any] , lowercase : List[str] , lowercase : Union[str, Any]=-1 ):
def lr_lambda(lowercase : Optional[Any] ):
if current_step < num_warmup_steps:
return float(lowerCamelCase_ ) / float(max(1 , lowerCamelCase_ ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def __snake_case ( lowercase : int , lowercase : Optional[Any] , lowercase : Dict , lowercase : List[Any] = 0.5 , lowercase : Dict = -1 ):
def lr_lambda(lowercase : Union[str, Any] ):
if current_step < num_warmup_steps:
return float(lowerCamelCase_ ) / float(max(1 , lowerCamelCase_ ) )
snake_case_ = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(lowerCamelCase_ ) * 2.0 * progress )) )
return LambdaLR(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def __snake_case ( lowercase : Optional[int] , lowercase : Dict , lowercase : Dict , lowercase : Optional[int] = 1 , lowercase : Optional[int] = -1 ):
def lr_lambda(lowercase : List[str] ):
if current_step < num_warmup_steps:
return float(lowerCamelCase_ ) / float(max(1 , lowerCamelCase_ ) )
snake_case_ = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(lowerCamelCase_ ) * progress) % 1.0) )) )
return LambdaLR(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def __snake_case ( lowercase : str , lowercase : Union[str, Any] , lowercase : Tuple , lowercase : List[Any]=1E-7 , lowercase : Dict=1.0 , lowercase : List[Any]=-1 ):
snake_case_ = optimizer.defaults['lr']
if not (lr_init > lr_end):
raise ValueError(f'''lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})''' )
def lr_lambda(lowercase : int ):
if current_step < num_warmup_steps:
return float(lowerCamelCase_ ) / float(max(1 , lowerCamelCase_ ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
snake_case_ = lr_init - lr_end
snake_case_ = num_training_steps - num_warmup_steps
snake_case_ = 1 - (current_step - num_warmup_steps) / decay_steps
snake_case_ = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
lowercase__ = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def __snake_case ( lowercase : Union[str, Any] , lowercase : Optional[int] , lowercase : List[Any] = None , lowercase : Dict = None , lowercase : Tuple = None , lowercase : str = 1 , lowercase : Dict = 1.0 , lowercase : Tuple = -1 , ):
snake_case_ = SchedulerType(lowerCamelCase_ )
snake_case_ = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(lowerCamelCase_ , last_epoch=lowerCamelCase_ )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(lowerCamelCase_ , step_rules=lowerCamelCase_ , last_epoch=lowerCamelCase_ )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f'''{name} requires `num_warmup_steps`, please provide that argument.''' )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(lowerCamelCase_ , num_warmup_steps=lowerCamelCase_ , last_epoch=lowerCamelCase_ )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f'''{name} requires `num_training_steps`, please provide that argument.''' )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
lowerCamelCase_ , num_warmup_steps=lowerCamelCase_ , num_training_steps=lowerCamelCase_ , num_cycles=lowerCamelCase_ , last_epoch=lowerCamelCase_ , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
lowerCamelCase_ , num_warmup_steps=lowerCamelCase_ , num_training_steps=lowerCamelCase_ , power=lowerCamelCase_ , last_epoch=lowerCamelCase_ , )
return schedule_func(
lowerCamelCase_ , num_warmup_steps=lowerCamelCase_ , num_training_steps=lowerCamelCase_ , last_epoch=lowerCamelCase_ )
| 706 |
'''simple docstring'''
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , UpperCAmelCase_ ):
super().__init__()
snake_case_ = nn.ModuleList(UpperCAmelCase_ )
def _lowercase ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = False , UpperCAmelCase_ = True , ):
for i, (image, scale, controlnet) in enumerate(zip(UpperCAmelCase_ , UpperCAmelCase_ , self.nets ) ):
snake_case_ , snake_case_ = controlnet(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , )
# merge samples
if i == 0:
snake_case_ , snake_case_ = down_samples, mid_sample
else:
snake_case_ = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(UpperCAmelCase_ , UpperCAmelCase_ )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def _lowercase ( self , UpperCAmelCase_ , UpperCAmelCase_ = True , UpperCAmelCase_ = None , UpperCAmelCase_ = False , UpperCAmelCase_ = None , ):
snake_case_ = 0
snake_case_ = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
UpperCAmelCase_ , is_main_process=UpperCAmelCase_ , save_function=UpperCAmelCase_ , safe_serialization=UpperCAmelCase_ , variant=UpperCAmelCase_ , )
idx += 1
snake_case_ = model_path_to_save + f'''_{idx}'''
@classmethod
def _lowercase ( cls , UpperCAmelCase_ , **UpperCAmelCase_ ):
snake_case_ = 0
snake_case_ = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
snake_case_ = pretrained_model_path
while os.path.isdir(UpperCAmelCase_ ):
snake_case_ = ControlNetModel.from_pretrained(UpperCAmelCase_ , **UpperCAmelCase_ )
controlnets.append(UpperCAmelCase_ )
idx += 1
snake_case_ = pretrained_model_path + f'''_{idx}'''
logger.info(f'''{len(UpperCAmelCase_ )} controlnets loaded from {pretrained_model_path}.''' )
if len(UpperCAmelCase_ ) == 0:
raise ValueError(
f'''No ControlNets found under {os.path.dirname(UpperCAmelCase_ )}. Expected at least {pretrained_model_path + "_0"}.''' )
return cls(UpperCAmelCase_ )
| 420 | 0 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase_ = {"""configuration_mmbt""": ["""MMBTConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ["""MMBTForClassification""", """MMBTModel""", """ModalEmbeddings"""]
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 2 |
def SCREAMING_SNAKE_CASE_ ( _snake_case :bytes ) -> str:
return "".join([hex(_snake_case )[2:].zfill(2 ).upper() for byte in list(_snake_case )] )
def SCREAMING_SNAKE_CASE_ ( _snake_case :str ) -> bytes:
# Check data validity, following RFC3548
# https://www.ietf.org/rfc/rfc3548.txt
if (len(_snake_case ) % 2) != 0:
raise ValueError(
'''Base16 encoded data is invalid:
Data does not have an even number of hex digits.''' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(_snake_case ) <= set('''0123456789ABCDEF''' ):
raise ValueError(
'''Base16 encoded data is invalid:
Data is not uppercase hex or it contains invalid characters.''' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(_snake_case ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 2 | 1 |
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class _snake_case ( unittest.TestCase ):
def __init__( self , a , a=7 , a=3 , a=18 , a=30 , a=400 , a=True , a=None , a=True , a=None , a=True , a=[0.5, 0.5, 0.5] , a=[0.5, 0.5, 0.5] , a=False , ) -> Optional[Any]:
SCREAMING_SNAKE_CASE = size if size is not None else {'height': 20, 'width': 20}
SCREAMING_SNAKE_CASE = crop_size if crop_size is not None else {'height': 18, 'width': 18}
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = min_resolution
SCREAMING_SNAKE_CASE = max_resolution
SCREAMING_SNAKE_CASE = do_resize
SCREAMING_SNAKE_CASE = size
SCREAMING_SNAKE_CASE = do_center_crop
SCREAMING_SNAKE_CASE = crop_size
SCREAMING_SNAKE_CASE = do_normalize
SCREAMING_SNAKE_CASE = image_mean
SCREAMING_SNAKE_CASE = image_std
SCREAMING_SNAKE_CASE = do_reduce_labels
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def lowerCamelCase__ ():
SCREAMING_SNAKE_CASE = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test')
SCREAMING_SNAKE_CASE = Image.open(dataset[0]['file'])
SCREAMING_SNAKE_CASE = Image.open(dataset[1]['file'])
return image, map
def lowerCamelCase__ ():
SCREAMING_SNAKE_CASE = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test')
SCREAMING_SNAKE_CASE = Image.open(ds[0]['file'])
SCREAMING_SNAKE_CASE = Image.open(ds[1]['file'])
SCREAMING_SNAKE_CASE = Image.open(ds[2]['file'])
SCREAMING_SNAKE_CASE = Image.open(ds[3]['file'])
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class _snake_case ( A__ , unittest.TestCase ):
_lowercase : List[str] = BeitImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]:
SCREAMING_SNAKE_CASE = BeitImageProcessingTester(self)
@property
def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]:
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(a , 'do_resize'))
self.assertTrue(hasattr(a , 'size'))
self.assertTrue(hasattr(a , 'do_center_crop'))
self.assertTrue(hasattr(a , 'center_crop'))
self.assertTrue(hasattr(a , 'do_normalize'))
self.assertTrue(hasattr(a , 'image_mean'))
self.assertTrue(hasattr(a , 'image_std'))
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {'height': 20, 'width': 20})
self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18})
self.assertEqual(image_processor.do_reduce_labels , a)
SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=a)
self.assertEqual(image_processor.size , {'height': 42, 'width': 42})
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84})
self.assertEqual(image_processor.do_reduce_labels , a)
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
pass
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
# Initialize image_processing
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=a)
for image in image_inputs:
self.assertIsInstance(a , Image.Image)
# Test not batched input
SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
SCREAMING_SNAKE_CASE = image_processing(a , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def SCREAMING_SNAKE_CASE__ ( self) -> List[str]:
# Initialize image_processing
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , numpify=a)
for image in image_inputs:
self.assertIsInstance(a , np.ndarray)
# Test not batched input
SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
SCREAMING_SNAKE_CASE = image_processing(a , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def SCREAMING_SNAKE_CASE__ ( self) -> List[str]:
# Initialize image_processing
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , torchify=a)
for image in image_inputs:
self.assertIsInstance(a , torch.Tensor)
# Test not batched input
SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
SCREAMING_SNAKE_CASE = image_processing(a , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
# Initialize image_processing
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , torchify=a)
SCREAMING_SNAKE_CASE = []
for image in image_inputs:
self.assertIsInstance(a , torch.Tensor)
maps.append(torch.zeros(image.shape[-2:]).long())
# Test not batched input
SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , maps[0] , return_tensors='pt')
self.assertEqual(
encoding['pixel_values'].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(
encoding['labels'].shape , (
1,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(encoding['labels'].dtype , torch.long)
self.assertTrue(encoding['labels'].min().item() >= 0)
self.assertTrue(encoding['labels'].max().item() <= 255)
# Test batched
SCREAMING_SNAKE_CASE = image_processing(a , a , return_tensors='pt')
self.assertEqual(
encoding['pixel_values'].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(
encoding['labels'].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(encoding['labels'].dtype , torch.long)
self.assertTrue(encoding['labels'].min().item() >= 0)
self.assertTrue(encoding['labels'].max().item() <= 255)
# Test not batched input (PIL images)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = prepare_semantic_single_inputs()
SCREAMING_SNAKE_CASE = image_processing(a , a , return_tensors='pt')
self.assertEqual(
encoding['pixel_values'].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(
encoding['labels'].shape , (
1,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(encoding['labels'].dtype , torch.long)
self.assertTrue(encoding['labels'].min().item() >= 0)
self.assertTrue(encoding['labels'].max().item() <= 255)
# Test batched input (PIL images)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = prepare_semantic_batch_inputs()
SCREAMING_SNAKE_CASE = image_processing(a , a , return_tensors='pt')
self.assertEqual(
encoding['pixel_values'].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(
encoding['labels'].shape , (
2,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(encoding['labels'].dtype , torch.long)
self.assertTrue(encoding['labels'].min().item() >= 0)
self.assertTrue(encoding['labels'].max().item() <= 255)
def SCREAMING_SNAKE_CASE__ ( self) -> Dict:
# Initialize image_processing
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict)
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = prepare_semantic_single_inputs()
SCREAMING_SNAKE_CASE = image_processing(a , a , return_tensors='pt')
self.assertTrue(encoding['labels'].min().item() >= 0)
self.assertTrue(encoding['labels'].max().item() <= 150)
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = image_processing(a , a , return_tensors='pt')
self.assertTrue(encoding['labels'].min().item() >= 0)
self.assertTrue(encoding['labels'].max().item() <= 255)
| 721 |
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
a_ : Dict = Path(__file__).resolve().parents[3] / 'src'
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(42)
a_ : List[str] = {'base': 'patrickvonplaten/wav2vec2_tiny_random', 'robust': 'patrickvonplaten/wav2vec2_tiny_random_robust'}
a_ : Union[str, Any] = 'zero2'
a_ : List[Any] = 'zero3'
a_ : List[str] = [ZEROa, ZEROa]
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
# customize the test name generator function as we want both params to appear in the sub-test
# name, as by default it shows only the first param
SCREAMING_SNAKE_CASE = parameterized.to_safe_name('_'.join(str(_UpperCAmelCase) for x in param.args))
return F'''{func.__name__}_{param_based_name}'''
# Cartesian-product of zero stages with models to test
a_ : List[Any] = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class _snake_case ( A__ ):
@parameterized.expand(a , name_func=a)
def SCREAMING_SNAKE_CASE__ ( self , a , a) -> List[str]:
self.run_and_check(
stage=a , model=a , distributed=a , fpaa=a , )
@require_torch_multi_gpu
@parameterized.expand(a , name_func=a)
def SCREAMING_SNAKE_CASE__ ( self , a , a) -> List[Any]:
self.run_and_check(
stage=a , model=a , distributed=a , fpaa=a , )
@parameterized.expand(a , name_func=a)
def SCREAMING_SNAKE_CASE__ ( self , a , a) -> int:
self.run_and_check(
stage=a , model=a , distributed=a , fpaa=a , )
@require_torch_multi_gpu
@parameterized.expand(a , name_func=a)
def SCREAMING_SNAKE_CASE__ ( self , a , a) -> List[Any]:
self.run_and_check(
stage=a , model=a , distributed=a , fpaa=a , )
def SCREAMING_SNAKE_CASE__ ( self , a) -> Dict:
# XXX: run_asr is premature and doesn't save any results
# so all we check for now is that the process didn't fail
pass
def SCREAMING_SNAKE_CASE__ ( self , a , a , a = 10 , a = True , a = True , a = True , ) -> Dict:
SCREAMING_SNAKE_CASE = models[model]
SCREAMING_SNAKE_CASE = self.run_trainer(
stage=a , model_name=a , eval_steps=a , num_train_epochs=1 , distributed=a , fpaa=a , )
self.do_checks(a)
return output_dir
def SCREAMING_SNAKE_CASE__ ( self , a , a , a = 10 , a = 1 , a = True , a = True , ) -> List[str]:
SCREAMING_SNAKE_CASE = self.get_auto_remove_tmp_dir('./xxx' , after=a)
SCREAMING_SNAKE_CASE = f'''
--model_name_or_path {model_name}
--dataset_name hf-internal-testing/librispeech_asr_dummy
--dataset_config_name clean
--train_split_name validation
--validation_split_name validation
--output_dir {output_dir}
--num_train_epochs {str(a)}
--per_device_train_batch_size 2
--per_device_eval_batch_size 2
--evaluation_strategy steps
--learning_rate 5e-4
--warmup_steps 8
--orthography timit
--preprocessing_num_workers 1
--group_by_length
--freeze_feature_extractor
--report_to none
--save_steps 0
--eval_steps {eval_steps}
--report_to none
'''.split()
if fpaa:
args.extend(['--fp16'])
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
SCREAMING_SNAKE_CASE = f'''--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json'''.split()
SCREAMING_SNAKE_CASE = [f'''{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py''']
SCREAMING_SNAKE_CASE = self.get_launcher(a)
SCREAMING_SNAKE_CASE = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(a , env=self.get_env())
return output_dir
def SCREAMING_SNAKE_CASE__ ( self , a=False) -> Optional[int]:
# 1. explicitly set --num_nodes=1 just in case these tests end up run on a multi-node setup
# - it won't be able to handle that
# 2. for now testing with just 2 gpus max (since some quality tests may give different
# results with mode gpus because we use very little data)
SCREAMING_SNAKE_CASE = min(2 , get_gpu_count()) if distributed else 1
return f'''deepspeed --num_nodes 1 --num_gpus {num_gpus}'''.split()
| 444 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def snake_case__ ( self : int , lowercase__ : int , lowercase__ : int ) ->Union[str, Any]:
'''simple docstring'''
_UpperCamelCase : int = jnp.ones((batch_size, length) ) / length
return scores
def snake_case__ ( self : Tuple ) ->Tuple:
'''simple docstring'''
_UpperCamelCase : List[str] = None
_UpperCamelCase : Optional[int] = 20
_UpperCamelCase : Dict = self._get_uniform_logits(batch_size=2 , length=lowercase__ )
# tweak scores to not be uniform anymore
_UpperCamelCase : Union[str, Any] = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
_UpperCamelCase : str = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
_UpperCamelCase : Optional[int] = jax.nn.softmax(lowercase__ , axis=-1 )
_UpperCamelCase : str = FlaxTemperatureLogitsWarper(temperature=0.5 )
_UpperCamelCase : Any = FlaxTemperatureLogitsWarper(temperature=1.3 )
_UpperCamelCase : Optional[int] = jax.nn.softmax(temp_dist_warper_sharper(lowercase__ , scores.copy() , cur_len=lowercase__ ) , axis=-1 )
_UpperCamelCase : List[str] = jax.nn.softmax(temp_dist_warper_smoother(lowercase__ , scores.copy() , cur_len=lowercase__ ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1e-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1e-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def snake_case__ ( self : Dict ) ->Optional[int]:
'''simple docstring'''
_UpperCamelCase : Tuple = None
_UpperCamelCase : Any = 10
_UpperCamelCase : List[str] = 2
# create ramp distribution
_UpperCamelCase : str = np.broadcast_to(np.arange(lowercase__ )[None, :] , (batch_size, vocab_size) ).copy()
_UpperCamelCase : int = ramp_logits[1:, : vocab_size // 2] + vocab_size
_UpperCamelCase : int = FlaxTopKLogitsWarper(3 )
_UpperCamelCase : List[str] = top_k_warp(lowercase__ , lowercase__ , cur_len=lowercase__ )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
_UpperCamelCase : List[str] = 5
_UpperCamelCase : Union[str, Any] = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
_UpperCamelCase : Dict = np.broadcast_to(np.arange(lowercase__ )[None, :] , (batch_size, length) ).copy()
_UpperCamelCase : int = top_k_warp_safety_check(lowercase__ , lowercase__ , cur_len=lowercase__ )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def snake_case__ ( self : str ) ->Optional[int]:
'''simple docstring'''
_UpperCamelCase : List[Any] = None
_UpperCamelCase : int = 10
_UpperCamelCase : Optional[int] = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
_UpperCamelCase : Optional[int] = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.1_5, 0.3, 0.3, 0.2_5]] ) )
_UpperCamelCase : Dict = FlaxTopPLogitsWarper(0.8 )
_UpperCamelCase : str = np.exp(top_p_warp(lowercase__ , lowercase__ , cur_len=lowercase__ ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
_UpperCamelCase : Optional[int] = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.2_5]] )
self.assertTrue(np.allclose(lowercase__ , lowercase__ , atol=1e-3 ) )
# check edge cases with negative and extreme logits
_UpperCamelCase : Any = np.broadcast_to(np.arange(lowercase__ )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
_UpperCamelCase : List[Any] = ramp_logits[1] * 1_0_0.0
# make sure at least 2 tokens are kept
_UpperCamelCase : str = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
_UpperCamelCase : Dict = top_p_warp(lowercase__ , lowercase__ , cur_len=lowercase__ )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def snake_case__ ( self : List[Any] ) ->Tuple:
'''simple docstring'''
_UpperCamelCase : List[Any] = 20
_UpperCamelCase : List[str] = 4
_UpperCamelCase : str = 0
_UpperCamelCase : Tuple = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=lowercase__ )
# check that min length is applied at length 5
_UpperCamelCase : Optional[int] = ids_tensor((batch_size, 20) , vocab_size=20 )
_UpperCamelCase : Any = 5
_UpperCamelCase : int = self._get_uniform_logits(lowercase__ , lowercase__ )
_UpperCamelCase : Any = min_dist_processor(lowercase__ , lowercase__ , cur_len=lowercase__ )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float("inf" )] )
# check that min length is not applied anymore at length 15
_UpperCamelCase : Any = self._get_uniform_logits(lowercase__ , lowercase__ )
_UpperCamelCase : List[Any] = 15
_UpperCamelCase : Dict = min_dist_processor(lowercase__ , lowercase__ , cur_len=lowercase__ )
self.assertFalse(jnp.isinf(lowercase__ ).any() )
def snake_case__ ( self : Optional[int] ) ->int:
'''simple docstring'''
_UpperCamelCase : Optional[Any] = 20
_UpperCamelCase : int = 4
_UpperCamelCase : Any = 0
_UpperCamelCase : Tuple = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=lowercase__ )
# check that all scores are -inf except the bos_token_id score
_UpperCamelCase : Optional[Any] = ids_tensor((batch_size, 1) , vocab_size=20 )
_UpperCamelCase : Optional[int] = 1
_UpperCamelCase : Any = self._get_uniform_logits(lowercase__ , lowercase__ )
_UpperCamelCase : Optional[Any] = logits_processor(lowercase__ , lowercase__ , cur_len=lowercase__ )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
_UpperCamelCase : Optional[Any] = 3
_UpperCamelCase : Any = self._get_uniform_logits(lowercase__ , lowercase__ )
_UpperCamelCase : List[str] = logits_processor(lowercase__ , lowercase__ , cur_len=lowercase__ )
self.assertFalse(jnp.isinf(lowercase__ ).any() )
def snake_case__ ( self : Tuple ) ->Union[str, Any]:
'''simple docstring'''
_UpperCamelCase : str = 20
_UpperCamelCase : str = 4
_UpperCamelCase : List[str] = 0
_UpperCamelCase : Dict = 5
_UpperCamelCase : Dict = FlaxForcedEOSTokenLogitsProcessor(max_length=lowercase__ , eos_token_id=lowercase__ )
# check that all scores are -inf except the eos_token_id when max_length is reached
_UpperCamelCase : Tuple = ids_tensor((batch_size, 4) , vocab_size=20 )
_UpperCamelCase : Optional[Any] = 4
_UpperCamelCase : Tuple = self._get_uniform_logits(lowercase__ , lowercase__ )
_UpperCamelCase : str = logits_processor(lowercase__ , lowercase__ , cur_len=lowercase__ )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
_UpperCamelCase : int = 3
_UpperCamelCase : Dict = self._get_uniform_logits(lowercase__ , lowercase__ )
_UpperCamelCase : Dict = logits_processor(lowercase__ , lowercase__ , cur_len=lowercase__ )
self.assertFalse(jnp.isinf(lowercase__ ).any() )
def snake_case__ ( self : Optional[int] ) ->Dict:
'''simple docstring'''
_UpperCamelCase : List[Any] = 4
_UpperCamelCase : List[str] = 10
_UpperCamelCase : Tuple = 15
_UpperCamelCase : int = 2
_UpperCamelCase : Optional[int] = 1
_UpperCamelCase : Any = 15
# dummy input_ids and scores
_UpperCamelCase : int = ids_tensor((batch_size, sequence_length) , lowercase__ )
_UpperCamelCase : int = input_ids.copy()
_UpperCamelCase : List[str] = self._get_uniform_logits(lowercase__ , lowercase__ )
_UpperCamelCase : Any = scores.copy()
# instantiate all dist processors
_UpperCamelCase : Optional[Any] = FlaxTemperatureLogitsWarper(temperature=0.5 )
_UpperCamelCase : Union[str, Any] = FlaxTopKLogitsWarper(3 )
_UpperCamelCase : Any = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
_UpperCamelCase : Dict = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=lowercase__ )
_UpperCamelCase : List[str] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=lowercase__ )
_UpperCamelCase : Dict = FlaxForcedEOSTokenLogitsProcessor(max_length=lowercase__ , eos_token_id=lowercase__ )
_UpperCamelCase : Optional[int] = 10
# no processor list
_UpperCamelCase : Optional[Any] = temp_dist_warp(lowercase__ , lowercase__ , cur_len=lowercase__ )
_UpperCamelCase : Optional[Any] = top_k_warp(lowercase__ , lowercase__ , cur_len=lowercase__ )
_UpperCamelCase : Any = top_p_warp(lowercase__ , lowercase__ , cur_len=lowercase__ )
_UpperCamelCase : Union[str, Any] = min_dist_proc(lowercase__ , lowercase__ , cur_len=lowercase__ )
_UpperCamelCase : str = bos_dist_proc(lowercase__ , lowercase__ , cur_len=lowercase__ )
_UpperCamelCase : List[str] = eos_dist_proc(lowercase__ , lowercase__ , cur_len=lowercase__ )
# with processor list
_UpperCamelCase : List[str] = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
_UpperCamelCase : List[Any] = processor(lowercase__ , lowercase__ , cur_len=lowercase__ )
# scores should be equal
self.assertTrue(jnp.allclose(lowercase__ , lowercase__ , atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def snake_case__ ( self : Optional[int] ) ->Optional[int]:
'''simple docstring'''
_UpperCamelCase : Any = 4
_UpperCamelCase : List[str] = 10
_UpperCamelCase : List[Any] = 15
_UpperCamelCase : str = 2
_UpperCamelCase : Union[str, Any] = 1
_UpperCamelCase : Union[str, Any] = 15
# dummy input_ids and scores
_UpperCamelCase : Any = ids_tensor((batch_size, sequence_length) , lowercase__ )
_UpperCamelCase : List[Any] = input_ids.copy()
_UpperCamelCase : str = self._get_uniform_logits(lowercase__ , lowercase__ )
_UpperCamelCase : Optional[Any] = scores.copy()
# instantiate all dist processors
_UpperCamelCase : Tuple = FlaxTemperatureLogitsWarper(temperature=0.5 )
_UpperCamelCase : int = FlaxTopKLogitsWarper(3 )
_UpperCamelCase : Tuple = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
_UpperCamelCase : Union[str, Any] = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=lowercase__ )
_UpperCamelCase : Any = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=lowercase__ )
_UpperCamelCase : Optional[Any] = FlaxForcedEOSTokenLogitsProcessor(max_length=lowercase__ , eos_token_id=lowercase__ )
_UpperCamelCase : List[str] = 10
# no processor list
def run_no_processor_list(lowercase__ : Optional[int] , lowercase__ : Dict , lowercase__ : Optional[int] ):
_UpperCamelCase : List[str] = temp_dist_warp(lowercase__ , lowercase__ , cur_len=lowercase__ )
_UpperCamelCase : List[Any] = top_k_warp(lowercase__ , lowercase__ , cur_len=lowercase__ )
_UpperCamelCase : str = top_p_warp(lowercase__ , lowercase__ , cur_len=lowercase__ )
_UpperCamelCase : List[Any] = min_dist_proc(lowercase__ , lowercase__ , cur_len=lowercase__ )
_UpperCamelCase : int = bos_dist_proc(lowercase__ , lowercase__ , cur_len=lowercase__ )
_UpperCamelCase : str = eos_dist_proc(lowercase__ , lowercase__ , cur_len=lowercase__ )
return scores
# with processor list
def run_processor_list(lowercase__ : Dict , lowercase__ : str , lowercase__ : Union[str, Any] ):
_UpperCamelCase : List[Any] = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
_UpperCamelCase : int = processor(lowercase__ , lowercase__ , cur_len=lowercase__ )
return scores
_UpperCamelCase : Dict = jax.jit(lowercase__ )
_UpperCamelCase : Optional[Any] = jax.jit(lowercase__ )
_UpperCamelCase : Tuple = jitted_run_no_processor_list(lowercase__ , lowercase__ , lowercase__ )
_UpperCamelCase : Any = jitted_run_processor_list(lowercase__ , lowercase__ , lowercase__ )
# scores should be equal
self.assertTrue(jnp.allclose(lowercase__ , lowercase__ , atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
| 435 | '''simple docstring'''
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__ = AutoencoderKL
UpperCAmelCase__ = '''sample'''
UpperCAmelCase__ = 1E-2
@property
def snake_case__ ( self : List[Any] ) ->List[Any]:
'''simple docstring'''
_UpperCamelCase : List[str] = 4
_UpperCamelCase : Dict = 3
_UpperCamelCase : Tuple = (32, 32)
_UpperCamelCase : str = floats_tensor((batch_size, num_channels) + sizes ).to(lowercase__ )
return {"sample": image}
@property
def snake_case__ ( self : Any ) ->List[str]:
'''simple docstring'''
return (3, 32, 32)
@property
def snake_case__ ( self : Optional[int] ) ->Optional[int]:
'''simple docstring'''
return (3, 32, 32)
def snake_case__ ( self : Optional[int] ) ->Optional[Any]:
'''simple docstring'''
_UpperCamelCase : Optional[Any] = {
"block_out_channels": [32, 64],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 4,
}
_UpperCamelCase : Optional[Any] = self.dummy_input
return init_dict, inputs_dict
def snake_case__ ( self : int ) ->int:
'''simple docstring'''
pass
def snake_case__ ( self : Dict ) ->Any:
'''simple docstring'''
pass
@unittest.skipIf(torch_device == "mps" , "Gradient checkpointing skipped on MPS" )
def snake_case__ ( self : Dict ) ->List[Any]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase : Union[str, Any] = self.prepare_init_args_and_inputs_for_common()
_UpperCamelCase : Optional[int] = self.model_class(**lowercase__ )
model.to(lowercase__ )
assert not model.is_gradient_checkpointing and model.training
_UpperCamelCase : List[Any] = model(**lowercase__ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
_UpperCamelCase : Dict = torch.randn_like(lowercase__ )
_UpperCamelCase : Dict = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
_UpperCamelCase : List[str] = self.model_class(**lowercase__ )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(lowercase__ )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
_UpperCamelCase : int = model_a(**lowercase__ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
_UpperCamelCase : Union[str, Any] = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1e-5 )
_UpperCamelCase : List[Any] = dict(model.named_parameters() )
_UpperCamelCase : Tuple = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5e-5 ) )
def snake_case__ ( self : Optional[int] ) ->List[Any]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase : Tuple = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" , output_loading_info=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(lowercase__ )
_UpperCamelCase : Tuple = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def snake_case__ ( self : Optional[Any] ) ->Optional[Any]:
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" )
_UpperCamelCase : int = model.to(lowercase__ )
model.eval()
if torch_device == "mps":
_UpperCamelCase : Dict = torch.manual_seed(0 )
else:
_UpperCamelCase : Union[str, Any] = torch.Generator(device=lowercase__ ).manual_seed(0 )
_UpperCamelCase : Dict = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
_UpperCamelCase : str = image.to(lowercase__ )
with torch.no_grad():
_UpperCamelCase : List[str] = model(lowercase__ , sample_posterior=lowercase__ , generator=lowercase__ ).sample
_UpperCamelCase : List[Any] = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
_UpperCamelCase : Dict = torch.tensor(
[
-4.0078e-01,
-3.8323e-04,
-1.2681e-01,
-1.1462e-01,
2.0095e-01,
1.0893e-01,
-8.8247e-02,
-3.0361e-01,
-9.8644e-03,
] )
elif torch_device == "cpu":
_UpperCamelCase : Dict = torch.tensor(
[-0.1_3_5_2, 0.0_8_7_8, 0.0_4_1_9, -0.0_8_1_8, -0.1_0_6_9, 0.0_6_8_8, -0.1_4_5_8, -0.4_4_4_6, -0.0_0_2_6] )
else:
_UpperCamelCase : Dict = torch.tensor(
[-0.2_4_2_1, 0.4_6_4_2, 0.2_5_0_7, -0.0_4_3_8, 0.0_6_8_2, 0.3_1_6_0, -0.2_0_1_8, -0.0_7_2_7, 0.2_4_8_5] )
self.assertTrue(torch_all_close(lowercase__ , lowercase__ , rtol=1e-2 ) )
@slow
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def snake_case__ ( self : List[str] , lowercase__ : List[str] , lowercase__ : List[Any] ) ->Tuple:
'''simple docstring'''
return f'''gaussian_noise_s={seed}_shape={'_'.join([str(lowercase__ ) for s in shape] )}.npy'''
def snake_case__ ( self : Tuple ) ->List[str]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case__ ( self : Dict , lowercase__ : int=0 , lowercase__ : Tuple=(4, 3, 512, 512) , lowercase__ : Any=False ) ->Dict:
'''simple docstring'''
_UpperCamelCase : Optional[Any] = torch.floataa if fpaa else torch.floataa
_UpperCamelCase : Optional[Any] = torch.from_numpy(load_hf_numpy(self.get_file_format(lowercase__ , lowercase__ ) ) ).to(lowercase__ ).to(lowercase__ )
return image
def snake_case__ ( self : int , lowercase__ : Any="CompVis/stable-diffusion-v1-4" , lowercase__ : Optional[int]=False ) ->Dict:
'''simple docstring'''
_UpperCamelCase : Tuple = "fp16" if fpaa else None
_UpperCamelCase : List[Any] = torch.floataa if fpaa else torch.floataa
_UpperCamelCase : Dict = AutoencoderKL.from_pretrained(
lowercase__ , subfolder="vae" , torch_dtype=lowercase__ , revision=lowercase__ , )
model.to(lowercase__ ).eval()
return model
def snake_case__ ( self : List[Any] , lowercase__ : Any=0 ) ->Tuple:
'''simple docstring'''
if torch_device == "mps":
return torch.manual_seed(lowercase__ )
return torch.Generator(device=lowercase__ ).manual_seed(lowercase__ )
@parameterized.expand(
[
# fmt: off
[33, [-0.1_6_0_3, 0.9_8_7_8, -0.0_4_9_5, -0.0_7_9_0, -0.2_7_0_9, 0.8_3_7_5, -0.2_0_6_0, -0.0_8_2_4], [-0.2_3_9_5, 0.0_0_9_8, 0.0_1_0_2, -0.0_7_0_9, -0.2_8_4_0, -0.0_2_7_4, -0.0_7_1_8, -0.1_8_2_4]],
[47, [-0.2_3_7_6, 0.1_1_6_8, 0.1_3_3_2, -0.4_8_4_0, -0.2_5_0_8, -0.0_7_9_1, -0.0_4_9_3, -0.4_0_8_9], [0.0_3_5_0, 0.0_8_4_7, 0.0_4_6_7, 0.0_3_4_4, -0.0_8_4_2, -0.0_5_4_7, -0.0_6_3_3, -0.1_1_3_1]],
# fmt: on
] )
def snake_case__ ( self : Tuple , lowercase__ : str , lowercase__ : Dict , lowercase__ : Any ) ->Tuple:
'''simple docstring'''
_UpperCamelCase : Any = self.get_sd_vae_model()
_UpperCamelCase : Union[str, Any] = self.get_sd_image(lowercase__ )
_UpperCamelCase : Optional[int] = self.get_generator(lowercase__ )
with torch.no_grad():
_UpperCamelCase : List[str] = model(lowercase__ , generator=lowercase__ , sample_posterior=lowercase__ ).sample
assert sample.shape == image.shape
_UpperCamelCase : Optional[Any] = sample[-1, -2:, -2:, :2].flatten().float().cpu()
_UpperCamelCase : Union[str, Any] = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice )
assert torch_all_close(lowercase__ , lowercase__ , atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.0_5_1_3, 0.0_2_8_9, 1.3_7_9_9, 0.2_1_6_6, -0.2_5_7_3, -0.0_8_7_1, 0.5_1_0_3, -0.0_9_9_9]],
[47, [-0.4_1_2_8, -0.1_3_2_0, -0.3_7_0_4, 0.1_9_6_5, -0.4_1_1_6, -0.2_3_3_2, -0.3_3_4_0, 0.2_2_4_7]],
# fmt: on
] )
@require_torch_gpu
def snake_case__ ( self : List[Any] , lowercase__ : int , lowercase__ : Tuple ) ->Dict:
'''simple docstring'''
_UpperCamelCase : Dict = self.get_sd_vae_model(fpaa=lowercase__ )
_UpperCamelCase : Tuple = self.get_sd_image(lowercase__ , fpaa=lowercase__ )
_UpperCamelCase : int = self.get_generator(lowercase__ )
with torch.no_grad():
_UpperCamelCase : Union[str, Any] = model(lowercase__ , generator=lowercase__ , sample_posterior=lowercase__ ).sample
assert sample.shape == image.shape
_UpperCamelCase : List[str] = sample[-1, -2:, :2, -2:].flatten().float().cpu()
_UpperCamelCase : int = torch.tensor(lowercase__ )
assert torch_all_close(lowercase__ , lowercase__ , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.1_6_0_9, 0.9_8_6_6, -0.0_4_8_7, -0.0_7_7_7, -0.2_7_1_6, 0.8_3_6_8, -0.2_0_5_5, -0.0_8_1_4], [-0.2_3_9_5, 0.0_0_9_8, 0.0_1_0_2, -0.0_7_0_9, -0.2_8_4_0, -0.0_2_7_4, -0.0_7_1_8, -0.1_8_2_4]],
[47, [-0.2_3_7_7, 0.1_1_4_7, 0.1_3_3_3, -0.4_8_4_1, -0.2_5_0_6, -0.0_8_0_5, -0.0_4_9_1, -0.4_0_8_5], [0.0_3_5_0, 0.0_8_4_7, 0.0_4_6_7, 0.0_3_4_4, -0.0_8_4_2, -0.0_5_4_7, -0.0_6_3_3, -0.1_1_3_1]],
# fmt: on
] )
def snake_case__ ( self : int , lowercase__ : int , lowercase__ : Any , lowercase__ : Optional[int] ) ->Any:
'''simple docstring'''
_UpperCamelCase : Tuple = self.get_sd_vae_model()
_UpperCamelCase : str = self.get_sd_image(lowercase__ )
with torch.no_grad():
_UpperCamelCase : List[Any] = model(lowercase__ ).sample
assert sample.shape == image.shape
_UpperCamelCase : int = sample[-1, -2:, -2:, :2].flatten().float().cpu()
_UpperCamelCase : Optional[Any] = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice )
assert torch_all_close(lowercase__ , lowercase__ , atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.2_0_5_1, -0.1_8_0_3, -0.2_3_1_1, -0.2_1_1_4, -0.3_2_9_2, -0.3_5_7_4, -0.2_9_5_3, -0.3_3_2_3]],
[37, [-0.2_6_3_2, -0.2_6_2_5, -0.2_1_9_9, -0.2_7_4_1, -0.4_5_3_9, -0.4_9_9_0, -0.3_7_2_0, -0.4_9_2_5]],
# fmt: on
] )
@require_torch_gpu
def snake_case__ ( self : Optional[int] , lowercase__ : Tuple , lowercase__ : Dict ) ->List[str]:
'''simple docstring'''
_UpperCamelCase : Tuple = self.get_sd_vae_model()
_UpperCamelCase : Any = self.get_sd_image(lowercase__ , shape=(3, 4, 64, 64) )
with torch.no_grad():
_UpperCamelCase : Dict = model.decode(lowercase__ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
_UpperCamelCase : str = sample[-1, -2:, :2, -2:].flatten().cpu()
_UpperCamelCase : Any = torch.tensor(lowercase__ )
assert torch_all_close(lowercase__ , lowercase__ , atol=1e-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.0_3_6_9, 0.0_2_0_7, -0.0_7_7_6, -0.0_6_8_2, -0.1_7_4_7, -0.1_9_3_0, -0.1_4_6_5, -0.2_0_3_9]],
[16, [-0.1_6_2_8, -0.2_1_3_4, -0.2_7_4_7, -0.2_6_4_2, -0.3_7_7_4, -0.4_4_0_4, -0.3_6_8_7, -0.4_2_7_7]],
# fmt: on
] )
@require_torch_gpu
def snake_case__ ( self : Union[str, Any] , lowercase__ : List[str] , lowercase__ : str ) ->Optional[int]:
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = self.get_sd_vae_model(fpaa=lowercase__ )
_UpperCamelCase : Tuple = self.get_sd_image(lowercase__ , shape=(3, 4, 64, 64) , fpaa=lowercase__ )
with torch.no_grad():
_UpperCamelCase : int = model.decode(lowercase__ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
_UpperCamelCase : str = sample[-1, -2:, :2, -2:].flatten().float().cpu()
_UpperCamelCase : Dict = torch.tensor(lowercase__ )
assert torch_all_close(lowercase__ , lowercase__ , atol=5e-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." )
def snake_case__ ( self : str , lowercase__ : str ) ->List[Any]:
'''simple docstring'''
_UpperCamelCase : str = self.get_sd_vae_model(fpaa=lowercase__ )
_UpperCamelCase : List[str] = self.get_sd_image(lowercase__ , shape=(3, 4, 64, 64) , fpaa=lowercase__ )
with torch.no_grad():
_UpperCamelCase : int = model.decode(lowercase__ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
_UpperCamelCase : Tuple = model.decode(lowercase__ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(lowercase__ , lowercase__ , atol=1e-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." )
def snake_case__ ( self : Optional[int] , lowercase__ : int ) ->str:
'''simple docstring'''
_UpperCamelCase : Optional[int] = self.get_sd_vae_model()
_UpperCamelCase : Any = self.get_sd_image(lowercase__ , shape=(3, 4, 64, 64) )
with torch.no_grad():
_UpperCamelCase : List[Any] = model.decode(lowercase__ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
_UpperCamelCase : Optional[int] = model.decode(lowercase__ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(lowercase__ , lowercase__ , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.3_0_0_1, 0.0_9_1_8, -2.6_9_8_4, -3.9_7_2_0, -3.2_0_9_9, -5.0_3_5_3, 1.7_3_3_8, -0.2_0_6_5, 3.4_2_6_7]],
[47, [-1.5_0_3_0, -4.3_8_7_1, -6.0_3_5_5, -9.1_1_5_7, -1.6_6_6_1, -2.7_8_5_3, 2.1_6_0_7, -5.0_8_2_3, 2.5_6_3_3]],
# fmt: on
] )
def snake_case__ ( self : Union[str, Any] , lowercase__ : List[str] , lowercase__ : Dict ) ->Optional[Any]:
'''simple docstring'''
_UpperCamelCase : Optional[Any] = self.get_sd_vae_model()
_UpperCamelCase : Any = self.get_sd_image(lowercase__ )
_UpperCamelCase : Optional[int] = self.get_generator(lowercase__ )
with torch.no_grad():
_UpperCamelCase : Union[str, Any] = model.encode(lowercase__ ).latent_dist
_UpperCamelCase : Dict = dist.sample(generator=lowercase__ )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
_UpperCamelCase : List[Any] = sample[0, -1, -3:, -3:].flatten().cpu()
_UpperCamelCase : int = torch.tensor(lowercase__ )
_UpperCamelCase : Union[str, Any] = 3e-3 if torch_device != "mps" else 1e-2
assert torch_all_close(lowercase__ , lowercase__ , atol=lowercase__ )
| 435 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
UpperCAmelCase : List[str] = {
'''configuration_ernie''': ['''ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ErnieConfig''', '''ErnieOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : List[Any] = [
'''ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ErnieForCausalLM''',
'''ErnieForMaskedLM''',
'''ErnieForMultipleChoice''',
'''ErnieForNextSentencePrediction''',
'''ErnieForPreTraining''',
'''ErnieForQuestionAnswering''',
'''ErnieForSequenceClassification''',
'''ErnieForTokenClassification''',
'''ErnieModel''',
'''ErniePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
UpperCAmelCase : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 77 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from typing import List
from unittest.mock import Mock
import torch
from torch.utils.data import DataLoader, IterableDataset, TensorDataset
from accelerate.accelerator import Accelerator
from accelerate.utils.dataclasses import DistributedType
class _A( snake_case__ ):
"""simple docstring"""
def __init__( self , _A ):
__A : Any = data
def __iter__( self ):
for element in self.data:
yield element
def _SCREAMING_SNAKE_CASE ( a=True ) -> Any:
__A : List[Any] = Accelerator(even_batches=a )
assert accelerator.num_processes == 2, "this script expects that two GPUs are available"
return accelerator
def _SCREAMING_SNAKE_CASE ( a , a , a , a = False ) -> str:
if iterable:
__A : int = DummyIterableDataset(torch.as_tensor(range(a ) ) )
else:
__A : Optional[Any] = TensorDataset(torch.as_tensor(range(a ) ) )
__A : Optional[Any] = DataLoader(a , batch_size=a )
__A : Optional[int] = accelerator.prepare(a )
return dl
def _SCREAMING_SNAKE_CASE ( a , a , a , a , a , ) -> Union[str, Any]:
__A : Optional[int] = create_dataloader(accelerator=a , dataset_size=a , batch_size=a )
__A : Tuple = [len(batch[0] ) for batch in dl]
if accelerator.process_index == 0:
assert batch_sizes == process_0_expected_batch_sizes
elif accelerator.process_index == 1:
assert batch_sizes == process_1_expected_batch_sizes
def _SCREAMING_SNAKE_CASE ( ) -> List[Any]:
__A : int = create_accelerator()
# without padding, we would expect a different number of batches
verify_dataloader_batch_sizes(
a , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1, 1] , )
# without padding, we would expect the same number of batches, but different sizes
verify_dataloader_batch_sizes(
a , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 2] , )
def _SCREAMING_SNAKE_CASE ( ) -> List[Any]:
__A : str = create_accelerator(even_batches=a )
verify_dataloader_batch_sizes(
a , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1] , )
verify_dataloader_batch_sizes(
a , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 1] , )
def _SCREAMING_SNAKE_CASE ( ) -> str:
__A : Optional[Any] = create_accelerator(even_batches=a )
__A : str = torch.nn.Linear(1 , 1 )
__A : Optional[int] = accelerator.prepare(a )
__A : Optional[int] = create_dataloader(a , dataset_size=3 , batch_size=1 )
__A : str = []
with accelerator.join_uneven_inputs([ddp_model] ):
for batch_idx, batch in enumerate(a ):
__A : Dict = ddp_model(batch[0].float() )
__A : List[str] = output.sum()
loss.backward()
batch_idxs.append(a )
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
assert batch_idxs == [0, 1]
elif accelerator.process_index == 1:
assert batch_idxs == [0]
def _SCREAMING_SNAKE_CASE ( a ) -> List[Any]:
with warnings.catch_warnings(record=a ) as w:
with accelerator.join_uneven_inputs([Mock()] ):
pass
assert issubclass(w[-1].category , a )
assert "only supported for multi-GPU" in str(w[-1].message )
def _SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
__A : int = True
__A : Union[str, Any] = False
__A : Optional[int] = create_accelerator(even_batches=a )
__A : int = torch.nn.Linear(1 , 1 )
__A : List[Any] = accelerator.prepare(a )
__A : int = create_dataloader(a , dataset_size=3 , batch_size=1 )
__A : Optional[int] = create_dataloader(a , dataset_size=3 , batch_size=1 )
with accelerator.join_uneven_inputs([ddp_model] , even_batches=a ):
__A : List[str] = train_dl.batch_sampler.even_batches
__A : Dict = valid_dl.batch_sampler.even_batches
assert train_dl_overridden_value == overridden_even_batches
assert valid_dl_overridden_value == overridden_even_batches
assert train_dl.batch_sampler.even_batches == default_even_batches
assert valid_dl.batch_sampler.even_batches == default_even_batches
def _SCREAMING_SNAKE_CASE ( ) -> List[Any]:
__A : Any = True
__A : List[Any] = False
__A : Tuple = create_accelerator(even_batches=a )
__A : List[str] = torch.nn.Linear(1 , 1 )
__A : Optional[Any] = accelerator.prepare(a )
create_dataloader(a , dataset_size=3 , batch_size=1 , iterable=a )
__A : int = create_dataloader(a , dataset_size=3 , batch_size=1 )
with warnings.catch_warnings():
warnings.filterwarnings('ignore' )
try:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=a ):
__A : Tuple = batch_dl.batch_sampler.even_batches
except AttributeError:
# ensure attribute error is not raised when processing iterable dl
raise AssertionError
assert batch_dl_overridden_value == overridden_even_batches
assert batch_dl.batch_sampler.even_batches == default_even_batches
def _SCREAMING_SNAKE_CASE ( ) -> Dict:
__A : Any = create_accelerator()
__A : Union[str, Any] = torch.nn.Linear(1 , 1 )
__A : str = accelerator.prepare(a )
create_dataloader(a , dataset_size=3 , batch_size=1 , iterable=a )
with warnings.catch_warnings(record=a ) as w:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=a ):
pass
assert issubclass(w[-1].category , a )
assert "only supported for map-style datasets" in str(w[-1].message )
def _SCREAMING_SNAKE_CASE ( ) -> List[str]:
__A : str = create_accelerator()
accelerator.print('Test that even_batches variable ensures uniform batches across processes' )
test_default_ensures_even_batch_sizes()
accelerator.print('Run tests with even_batches disabled' )
test_can_disable_even_batches()
accelerator.print('Test joining uneven inputs' )
test_can_join_uneven_inputs()
accelerator.print('Test overriding even_batches when joining uneven inputs' )
test_join_can_override_even_batches()
accelerator.print('Test overriding even_batches for mixed dataloader types' )
test_join_can_override_for_mixed_type_dataloaders()
accelerator.print('Test overriding even_batches raises a warning for iterable dataloaders' )
test_join_raises_warning_for_iterable_when_overriding_even_batches()
accelerator.print('Test join with non DDP distributed raises warning' )
__A : int = accelerator.state.distributed_type
__A : Tuple = DistributedType.FSDP
test_join_raises_warning_for_non_ddp_distributed(a )
__A : str = original_state
if __name__ == "__main__":
main()
| 77 | 1 |
'''simple docstring'''
import requests
_snake_case : Union[str, Any] = 'https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey='
def snake_case_ (UpperCamelCase : str ):
'''simple docstring'''
_a = requests.get(_NEWS_API + bbc_news_api_key ).json()
# each article in the list is a dict
for i, article in enumerate(bbc_news_page['''articles'''] , 1 ):
print(f'{i}.) {article["title"]}' )
if __name__ == "__main__":
fetch_bbc_news(bbc_news_api_key='<Your BBC News API key goes here>')
| 22 |
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
_lowerCamelCase : List[str] = 2_0_0
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
_lowerCamelCase : Any = 5_0
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
_lowerCamelCase : str = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1_0_0_0))
def _a ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ) -> tuple[str, float]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = len([g for position, g in enumerate(SCREAMING_SNAKE_CASE__ ) if g == main_target[position]] )
return (item, float(SCREAMING_SNAKE_CASE__ ))
def _a ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ) -> tuple[str, str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = random.randint(0 , len(SCREAMING_SNAKE_CASE__ ) - 1 )
SCREAMING_SNAKE_CASE__ : Tuple = parent_a[:random_slice] + parent_a[random_slice:]
SCREAMING_SNAKE_CASE__ : str = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def _a ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : list[str] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = list(SCREAMING_SNAKE_CASE__ )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
SCREAMING_SNAKE_CASE__ : Tuple = random.choice(SCREAMING_SNAKE_CASE__ )
return "".join(SCREAMING_SNAKE_CASE__ )
def _a ( SCREAMING_SNAKE_CASE__ : tuple[str, float] , SCREAMING_SNAKE_CASE__ : list[tuple[str, float]] , SCREAMING_SNAKE_CASE__ : list[str] , ) -> list[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = []
# Generate more children proportionally to the fitness score.
SCREAMING_SNAKE_CASE__ : List[str] = int(parent_a[1] * 1_00 ) + 1
SCREAMING_SNAKE_CASE__ : Tuple = 10 if child_n >= 10 else child_n
for _ in range(SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE__ : Optional[int] = population_score[random.randint(0 , SCREAMING_SNAKE_CASE__ )][0]
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Optional[Any] = crossover(parent_a[0] , SCREAMING_SNAKE_CASE__ )
# Append new string to the population list.
pop.append(mutate(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
pop.append(mutate(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
return pop
def _a ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : list[str] , SCREAMING_SNAKE_CASE__ : bool = True ) -> tuple[int, int, str]:
'''simple docstring'''
if N_POPULATION < N_SELECTED:
SCREAMING_SNAKE_CASE__ : str = f'''{N_POPULATION} must be bigger than {N_SELECTED}'''
raise ValueError(SCREAMING_SNAKE_CASE__ )
# Verify that the target contains no genes besides the ones inside genes variable.
SCREAMING_SNAKE_CASE__ : Optional[int] = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
SCREAMING_SNAKE_CASE__ : Dict = f'''{not_in_genes_list} is not in genes list, evolution cannot converge'''
raise ValueError(SCREAMING_SNAKE_CASE__ )
# Generate random starting population.
SCREAMING_SNAKE_CASE__ : List[Any] = []
for _ in range(SCREAMING_SNAKE_CASE__ ):
population.append("".join([random.choice(SCREAMING_SNAKE_CASE__ ) for i in range(len(SCREAMING_SNAKE_CASE__ ) )] ) )
# Just some logs to know what the algorithms is doing.
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : List[Any] = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(SCREAMING_SNAKE_CASE__ )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
SCREAMING_SNAKE_CASE__ : int = [evaluate(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for item in population]
# Check if there is a matching evolution.
SCREAMING_SNAKE_CASE__ : List[str] = sorted(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : x[1] , reverse=SCREAMING_SNAKE_CASE__ )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
f'''\nGeneration: {generation}'''
f'''\nTotal Population:{total_population}'''
f'''\nBest score: {population_score[0][1]}'''
f'''\nBest string: {population_score[0][0]}''' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
SCREAMING_SNAKE_CASE__ : str = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(SCREAMING_SNAKE_CASE__ )
# Normalize population score to be between 0 and 1.
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [
(item, score / len(SCREAMING_SNAKE_CASE__ )) for item, score in population_score
]
# This is selection
for i in range(SCREAMING_SNAKE_CASE__ ):
population.extend(select(population_score[int(SCREAMING_SNAKE_CASE__ )] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(SCREAMING_SNAKE_CASE__ ) > N_POPULATION:
break
if __name__ == "__main__":
_lowerCamelCase : Dict = (
'''This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!'''
)
_lowerCamelCase : Tuple = list(
''' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm'''
'''nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\'''
)
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : List[str] = basic(target_str, genes_list)
print(
f"\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}"
)
| 663 | 0 |
lowerCamelCase_ : Union[str, Any] = {
"a": "AAAAA",
"b": "AAAAB",
"c": "AAABA",
"d": "AAABB",
"e": "AABAA",
"f": "AABAB",
"g": "AABBA",
"h": "AABBB",
"i": "ABAAA",
"j": "BBBAA",
"k": "ABAAB",
"l": "ABABA",
"m": "ABABB",
"n": "ABBAA",
"o": "ABBAB",
"p": "ABBBA",
"q": "ABBBB",
"r": "BAAAA",
"s": "BAAAB",
"t": "BAABA",
"u": "BAABB",
"v": "BBBAB",
"w": "BABAA",
"x": "BABAB",
"y": "BABBA",
"z": "BABBB",
" ": " ",
}
lowerCamelCase_ : Dict = {value: key for key, value in encode_dict.items()}
def __lowercase( __snake_case : str ) -> str:
__snake_case = ''
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception('encode() accepts only letters of the alphabet and spaces' )
return encoded
def __lowercase( __snake_case : str ) -> str:
if set(__snake_case ) - {"A", "B", " "} != set():
raise Exception('decode() accepts only \'A\', \'B\' and spaces' )
__snake_case = ''
for word in coded.split():
while len(__snake_case ) != 0:
decoded += decode_dict[word[:5]]
__snake_case = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 345 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinvaConfig, SwinvaForImageClassification
def __lowercase( __snake_case : Tuple ) -> str:
__snake_case = SwinvaConfig()
__snake_case = swinva_name.split('_' )
__snake_case = name_split[1]
if "to" in name_split[3]:
__snake_case = int(name_split[3][-3:] )
else:
__snake_case = int(name_split[3] )
if "to" in name_split[2]:
__snake_case = int(name_split[2][-2:] )
else:
__snake_case = int(name_split[2][6:] )
if model_size == "tiny":
__snake_case = 96
__snake_case = (2, 2, 6, 2)
__snake_case = (3, 6, 12, 24)
elif model_size == "small":
__snake_case = 96
__snake_case = (2, 2, 18, 2)
__snake_case = (3, 6, 12, 24)
elif model_size == "base":
__snake_case = 1_28
__snake_case = (2, 2, 18, 2)
__snake_case = (4, 8, 16, 32)
else:
__snake_case = 1_92
__snake_case = (2, 2, 18, 2)
__snake_case = (6, 12, 24, 48)
if "to" in swinva_name:
__snake_case = (12, 12, 12, 6)
if ("22k" in swinva_name) and ("to" not in swinva_name):
__snake_case = 2_18_41
__snake_case = 'huggingface/label-files'
__snake_case = 'imagenet-22k-id2label.json'
__snake_case = json.load(open(hf_hub_download(__snake_case ,__snake_case ,repo_type='dataset' ) ,'r' ) )
__snake_case = {int(__snake_case ): v for k, v in idalabel.items()}
__snake_case = idalabel
__snake_case = {v: k for k, v in idalabel.items()}
else:
__snake_case = 10_00
__snake_case = 'huggingface/label-files'
__snake_case = 'imagenet-1k-id2label.json'
__snake_case = json.load(open(hf_hub_download(__snake_case ,__snake_case ,repo_type='dataset' ) ,'r' ) )
__snake_case = {int(__snake_case ): v for k, v in idalabel.items()}
__snake_case = idalabel
__snake_case = {v: k for k, v in idalabel.items()}
__snake_case = img_size
__snake_case = num_classes
__snake_case = embed_dim
__snake_case = depths
__snake_case = num_heads
__snake_case = window_size
return config
def __lowercase( __snake_case : List[str] ) -> Any:
if "patch_embed.proj" in name:
__snake_case = name.replace('patch_embed.proj' ,'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
__snake_case = name.replace('patch_embed.norm' ,'embeddings.norm' )
if "layers" in name:
__snake_case = 'encoder.' + name
if "attn.proj" in name:
__snake_case = name.replace('attn.proj' ,'attention.output.dense' )
if "attn" in name:
__snake_case = name.replace('attn' ,'attention.self' )
if "norm1" in name:
__snake_case = name.replace('norm1' ,'layernorm_before' )
if "norm2" in name:
__snake_case = name.replace('norm2' ,'layernorm_after' )
if "mlp.fc1" in name:
__snake_case = name.replace('mlp.fc1' ,'intermediate.dense' )
if "mlp.fc2" in name:
__snake_case = name.replace('mlp.fc2' ,'output.dense' )
if "q_bias" in name:
__snake_case = name.replace('q_bias' ,'query.bias' )
if "k_bias" in name:
__snake_case = name.replace('k_bias' ,'key.bias' )
if "v_bias" in name:
__snake_case = name.replace('v_bias' ,'value.bias' )
if "cpb_mlp" in name:
__snake_case = name.replace('cpb_mlp' ,'continuous_position_bias_mlp' )
if name == "norm.weight":
__snake_case = 'layernorm.weight'
if name == "norm.bias":
__snake_case = 'layernorm.bias'
if "head" in name:
__snake_case = name.replace('head' ,'classifier' )
else:
__snake_case = 'swinv2.' + name
return name
def __lowercase( __snake_case : str ,__snake_case : Optional[int] ) -> str:
for key in orig_state_dict.copy().keys():
__snake_case = orig_state_dict.pop(__snake_case )
if "mask" in key:
continue
elif "qkv" in key:
__snake_case = key.split('.' )
__snake_case = int(key_split[1] )
__snake_case = int(key_split[3] )
__snake_case = model.swinva.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
__snake_case = val[:dim, :]
__snake_case = val[dim : dim * 2, :]
__snake_case = val[-dim:, :]
else:
__snake_case = val[:dim]
__snake_case = val[
dim : dim * 2
]
__snake_case = val[-dim:]
else:
__snake_case = val
return orig_state_dict
def __lowercase( __snake_case : Tuple ,__snake_case : Any ) -> Tuple:
__snake_case = timm.create_model(__snake_case ,pretrained=__snake_case )
timm_model.eval()
__snake_case = get_swinva_config(__snake_case )
__snake_case = SwinvaForImageClassification(__snake_case )
model.eval()
__snake_case = convert_state_dict(timm_model.state_dict() ,__snake_case )
model.load_state_dict(__snake_case )
__snake_case = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__snake_case = AutoImageProcessor.from_pretrained('microsoft/{}'.format(swinva_name.replace('_' ,'-' ) ) )
__snake_case = Image.open(requests.get(__snake_case ,stream=__snake_case ).raw )
__snake_case = image_processor(images=__snake_case ,return_tensors='pt' )
__snake_case = timm_model(inputs['pixel_values'] )
__snake_case = model(**__snake_case ).logits
assert torch.allclose(__snake_case ,__snake_case ,atol=1e-3 )
print(f'''Saving model {swinva_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__snake_case )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__snake_case )
model.push_to_hub(
repo_path_or_name=Path(__snake_case ,__snake_case ) ,organization='nandwalritik' ,commit_message='Add model' ,)
if __name__ == "__main__":
lowerCamelCase_ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--swinv2_name",
default="swinv2_tiny_patch4_window8_256",
type=str,
help="Name of the Swinv2 timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
lowerCamelCase_ : Tuple = parser.parse_args()
convert_swinva_checkpoint(args.swinva_name, args.pytorch_dump_folder_path)
| 345 | 1 |
"""simple docstring"""
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class UpperCAmelCase_ :
def __init__( self , a=2 , a=3 , a=6_4 , a=None ) -> Optional[int]:
lowercase__ : Dict = np.random.default_rng(a )
lowercase__ : Any = length
lowercase__ : Tuple = rng.normal(size=(length,) ).astype(np.floataa )
lowercase__ : Union[str, Any] = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self ) -> Optional[Any]:
return self.length
def __getitem__( self , a ) -> List[str]:
return {"x": self.x[i], "y": self.y[i]}
class UpperCAmelCase_ ( torch.nn.Module):
def __init__( self , a=0 , a=0 , a=False ) -> str:
super().__init__()
lowercase__ : Union[str, Any] = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
lowercase__ : Union[str, Any] = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
lowercase__ : int = True
def _UpperCAmelCase ( self , a=None ) -> int:
if self.first_batch:
print(f"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" )
lowercase__ : Dict = False
return x * self.a[0] + self.b[0]
class UpperCAmelCase_ ( torch.nn.Module):
def __init__( self , a=0 , a=0 , a=False ) -> Optional[Any]:
super().__init__()
lowercase__ : Tuple = torch.nn.Parameter(torch.tensor(a ).float() )
lowercase__ : Any = torch.nn.Parameter(torch.tensor(a ).float() )
lowercase__ : Any = True
def _UpperCAmelCase ( self , a=None ) -> Dict:
if self.first_batch:
print(f"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" )
lowercase__ : List[str] = False
return x * self.a + self.b
def a_ ( _lowerCAmelCase : int , _lowerCAmelCase : int = 16 ):
'''simple docstring'''
from datasets import load_dataset
from transformers import AutoTokenizer
lowercase__ : Union[str, Any] = AutoTokenizer.from_pretrained('bert-base-cased' )
lowercase__ : Optional[Any] = {'train': 'tests/test_samples/MRPC/train.csv', 'validation': 'tests/test_samples/MRPC/dev.csv'}
lowercase__ : List[str] = load_dataset('csv' , data_files=_lowerCAmelCase )
lowercase__ : List[str] = datasets['train'].unique('label' )
lowercase__ : Tuple = {v: i for i, v in enumerate(_lowerCAmelCase )}
def tokenize_function(_lowerCAmelCase : Tuple ):
# max_length=None => use the model max length (it's actually the default)
lowercase__ : Any = tokenizer(
examples['sentence1'] , examples['sentence2'] , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase , padding='max_length' )
if "label" in examples:
lowercase__ : Any = [label_to_id[l] for l in examples['label']]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowercase__ : Optional[Any] = datasets.map(
_lowerCAmelCase , batched=_lowerCAmelCase , remove_columns=['sentence1', 'sentence2', 'label'] , )
def collate_fn(_lowerCAmelCase : Optional[int] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_lowerCAmelCase , padding='max_length' , max_length=128 , return_tensors='pt' )
return tokenizer.pad(_lowerCAmelCase , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
lowercase__ : List[Any] = DataLoader(tokenized_datasets['train'] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=2 )
lowercase__ : str = DataLoader(tokenized_datasets['validation'] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=1 )
return train_dataloader, eval_dataloader
| 599 | """simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class UpperCAmelCase_ ( _a , unittest.TestCase):
lowerCamelCase__ : Optional[Any] = DiTPipeline
lowerCamelCase__ : Dict = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
lowerCamelCase__ : Union[str, Any] = PipelineTesterMixin.required_optional_params - {
"latents",
"num_images_per_prompt",
"callback",
"callback_steps",
}
lowerCamelCase__ : Dict = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
lowerCamelCase__ : Optional[Any] = False
def _UpperCAmelCase ( self ) -> Any:
torch.manual_seed(0 )
lowercase__ : Dict = TransformeraDModel(
sample_size=1_6 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=a , activation_fn='gelu-approximate' , num_embeds_ada_norm=1_0_0_0 , norm_type='ada_norm_zero' , norm_elementwise_affine=a , )
lowercase__ : int = AutoencoderKL()
lowercase__ : Dict = DDIMScheduler()
lowercase__ : List[str] = {'transformer': transformer.eval(), 'vae': vae.eval(), 'scheduler': scheduler}
return components
def _UpperCAmelCase ( self , a , a=0 ) -> Dict:
if str(a ).startswith('mps' ):
lowercase__ : Union[str, Any] = torch.manual_seed(a )
else:
lowercase__ : List[str] = torch.Generator(device=a ).manual_seed(a )
lowercase__ : Optional[int] = {
'class_labels': [1],
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : str = 'cpu'
lowercase__ : Any = self.get_dummy_components()
lowercase__ : List[Any] = self.pipeline_class(**a )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowercase__ : str = self.get_dummy_inputs(a )
lowercase__ : List[str] = pipe(**a ).images
lowercase__ : int = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 1_6, 1_6, 3) )
lowercase__ : Any = np.array([0.2_946, 0.6_601, 0.4_329, 0.3_296, 0.4_144, 0.5_319, 0.7_273, 0.5_013, 0.4_457] )
lowercase__ : Dict = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(a , 1e-3 )
def _UpperCAmelCase ( self ) -> Any:
self._test_inference_batch_single_identical(relax_max_difference=a , expected_max_diff=1e-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def _UpperCAmelCase ( self ) -> int:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@require_torch_gpu
@slow
class UpperCAmelCase_ ( unittest.TestCase):
def _UpperCAmelCase ( self ) -> Any:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCAmelCase ( self ) -> Dict:
lowercase__ : Optional[int] = torch.manual_seed(0 )
lowercase__ : Any = DiTPipeline.from_pretrained('facebook/DiT-XL-2-256' )
pipe.to('cuda' )
lowercase__ : List[Any] = ['vase', 'umbrella', 'white shark', 'white wolf']
lowercase__ : Optional[int] = pipe.get_label_ids(a )
lowercase__ : Optional[int] = pipe(a , generator=a , num_inference_steps=4_0 , output_type='np' ).images
for word, image in zip(a , a ):
lowercase__ : Tuple = load_numpy(
f"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy""" )
assert np.abs((expected_image - image).max() ) < 1e-2
def _UpperCAmelCase ( self ) -> Dict:
lowercase__ : List[Any] = DiTPipeline.from_pretrained('facebook/DiT-XL-2-512' )
lowercase__ : Dict = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to('cuda' )
lowercase__ : Tuple = ['vase', 'umbrella']
lowercase__ : List[str] = pipe.get_label_ids(a )
lowercase__ : Optional[Any] = torch.manual_seed(0 )
lowercase__ : Optional[Any] = pipe(a , generator=a , num_inference_steps=2_5 , output_type='np' ).images
for word, image in zip(a , a ):
lowercase__ : int = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
f"""/dit/{word}_512.npy""" )
assert np.abs((expected_image - image).max() ) < 1e-1
| 599 | 1 |
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
lowercase_ = datasets.utils.logging.get_logger(__name__)
lowercase_ = ["names", "prefix"]
lowercase_ = ["warn_bad_lines", "error_bad_lines", "mangle_dupe_cols"]
lowercase_ = ["encoding_errors", "on_bad_lines"]
lowercase_ = ["date_format"]
@dataclass
class SCREAMING_SNAKE_CASE__ ( datasets.BuilderConfig ):
A : int = ","
A : Optional[Any] = None
A : Tuple = "infer"
A : int = None
A : Optional[Any] = None
A : str = None
A : Optional[int] = None
A : int = None
A : Dict = True
A : Union[str, Any] = None
A : Optional[int] = None
A : List[str] = None
A : List[str] = None
A : Tuple = False
A : Optional[Any] = None
A : Optional[Any] = None
A : str = None
A : List[Any] = True
A : Tuple = True
A : str = False
A : Any = True
A : Tuple = None
A : int = "."
A : Optional[int] = None
A : str = "\""
A : str = 0
A : str = None
A : Optional[Any] = None
A : str = None
A : Tuple = None
A : Optional[Any] = True
A : List[Any] = True
A : Dict = 0
A : Union[str, Any] = True
A : int = False
A : str = None
A : Union[str, Any] = 10000
A : Optional[int] = None
A : Dict = "strict"
A : Union[str, Any] = "error"
A : Dict = None
def snake_case__ ( self : Optional[int] ):
if self.delimiter is not None:
__snake_case : Optional[Any] = self.delimiter
if self.column_names is not None:
__snake_case : int = self.column_names
@property
def snake_case__ ( self : List[Any] ):
__snake_case : Union[str, Any] = {
"sep": self.sep,
"header": self.header,
"names": self.names,
"index_col": self.index_col,
"usecols": self.usecols,
"prefix": self.prefix,
"mangle_dupe_cols": self.mangle_dupe_cols,
"engine": self.engine,
"converters": self.converters,
"true_values": self.true_values,
"false_values": self.false_values,
"skipinitialspace": self.skipinitialspace,
"skiprows": self.skiprows,
"nrows": self.nrows,
"na_values": self.na_values,
"keep_default_na": self.keep_default_na,
"na_filter": self.na_filter,
"verbose": self.verbose,
"skip_blank_lines": self.skip_blank_lines,
"thousands": self.thousands,
"decimal": self.decimal,
"lineterminator": self.lineterminator,
"quotechar": self.quotechar,
"quoting": self.quoting,
"escapechar": self.escapechar,
"comment": self.comment,
"encoding": self.encoding,
"dialect": self.dialect,
"error_bad_lines": self.error_bad_lines,
"warn_bad_lines": self.warn_bad_lines,
"skipfooter": self.skipfooter,
"doublequote": self.doublequote,
"memory_map": self.memory_map,
"float_precision": self.float_precision,
"chunksize": self.chunksize,
"encoding_errors": self.encoding_errors,
"on_bad_lines": self.on_bad_lines,
"date_format": self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , _lowerCAmelCase ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class SCREAMING_SNAKE_CASE__ ( datasets.ArrowBasedBuilder ):
A : Dict = CsvConfig
def snake_case__ ( self : Union[str, Any] ):
return datasets.DatasetInfo(features=self.config.features )
def snake_case__ ( self : Any , _lowerCAmelCase : List[str] ):
if not self.config.data_files:
raise ValueError(f'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
__snake_case : Optional[Any] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_lowerCAmelCase , (str, list, tuple) ):
__snake_case : List[str] = data_files
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
__snake_case : str = [files]
__snake_case : Union[str, Any] = [dl_manager.iter_files(_lowerCAmelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
__snake_case : str = []
for split_name, files in data_files.items():
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
__snake_case : Any = [files]
__snake_case : int = [dl_manager.iter_files(_lowerCAmelCase ) for file in files]
splits.append(datasets.SplitGenerator(name=_lowerCAmelCase , gen_kwargs={"""files""": files} ) )
return splits
def snake_case__ ( self : Optional[Any] , _lowerCAmelCase : pa.Table ):
if self.config.features is not None:
__snake_case : Tuple = self.config.features.arrow_schema
if all(not require_storage_cast(_lowerCAmelCase ) for feature in self.config.features.values() ):
# cheaper cast
__snake_case : int = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=_lowerCAmelCase )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
__snake_case : int = table_cast(_lowerCAmelCase , _lowerCAmelCase )
return pa_table
def snake_case__ ( self : List[Any] , _lowerCAmelCase : str ):
__snake_case : List[str] = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
__snake_case : Optional[int] = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(_lowerCAmelCase ) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(_lowerCAmelCase ) ):
__snake_case : List[Any] = pd.read_csv(_lowerCAmelCase , iterator=_lowerCAmelCase , dtype=_lowerCAmelCase , **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(_lowerCAmelCase ):
__snake_case : List[str] = pa.Table.from_pandas(_lowerCAmelCase )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(_lowerCAmelCase )
except ValueError as e:
logger.error(f'''Failed to read file \'{file}\' with error {type(_lowerCAmelCase )}: {e}''' )
raise
| 704 | from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowercase_ = logging.get_logger(__name__)
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
if isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(__SCREAMING_SNAKE_CASE ):
return [[videos]]
raise ValueError(F'''Could not make batched video from {videos}''' )
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
A : Any = ["pixel_values"]
def __init__( self : Optional[int] , _lowerCAmelCase : bool = True , _lowerCAmelCase : Dict[str, int] = None , _lowerCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , _lowerCAmelCase : bool = True , _lowerCAmelCase : Dict[str, int] = None , _lowerCAmelCase : bool = True , _lowerCAmelCase : Union[int, float] = 1 / 2_55 , _lowerCAmelCase : bool = True , _lowerCAmelCase : Optional[Union[float, List[float]]] = None , _lowerCAmelCase : Optional[Union[float, List[float]]] = None , **_lowerCAmelCase : Union[str, Any] , ):
super().__init__(**_lowerCAmelCase )
__snake_case : Tuple = size if size is not None else {"""shortest_edge""": 2_24}
__snake_case : List[Any] = get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase )
__snake_case : int = crop_size if crop_size is not None else {"""height""": 2_24, """width""": 2_24}
__snake_case : List[Any] = get_size_dict(_lowerCAmelCase , param_name="""crop_size""" )
__snake_case : Union[str, Any] = do_resize
__snake_case : Optional[Any] = size
__snake_case : int = do_center_crop
__snake_case : Dict = crop_size
__snake_case : Dict = resample
__snake_case : Tuple = do_rescale
__snake_case : Optional[int] = rescale_factor
__snake_case : str = do_normalize
__snake_case : int = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__snake_case : Union[str, Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def snake_case__ ( self : Optional[int] , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : Dict[str, int] , _lowerCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , _lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_lowerCAmelCase : Optional[Any] , ):
__snake_case : List[str] = get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase )
if "shortest_edge" in size:
__snake_case : Tuple = get_resize_output_image_size(_lowerCAmelCase , size["""shortest_edge"""] , default_to_square=_lowerCAmelCase )
elif "height" in size and "width" in size:
__snake_case : List[Any] = (size["""height"""], size["""width"""])
else:
raise ValueError(f'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' )
return resize(_lowerCAmelCase , size=_lowerCAmelCase , resample=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def snake_case__ ( self : str , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : Dict[str, int] , _lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_lowerCAmelCase : Optional[Any] , ):
__snake_case : List[str] = get_size_dict(_lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' )
return center_crop(_lowerCAmelCase , size=(size["""height"""], size["""width"""]) , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def snake_case__ ( self : Tuple , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : Union[int, float] , _lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_lowerCAmelCase : Optional[Any] , ):
return rescale(_lowerCAmelCase , scale=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def snake_case__ ( self : str , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : Union[float, List[float]] , _lowerCAmelCase : Union[float, List[float]] , _lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_lowerCAmelCase : Dict , ):
return normalize(_lowerCAmelCase , mean=_lowerCAmelCase , std=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def snake_case__ ( self : Tuple , _lowerCAmelCase : ImageInput , _lowerCAmelCase : bool = None , _lowerCAmelCase : Dict[str, int] = None , _lowerCAmelCase : PILImageResampling = None , _lowerCAmelCase : bool = None , _lowerCAmelCase : Dict[str, int] = None , _lowerCAmelCase : bool = None , _lowerCAmelCase : float = None , _lowerCAmelCase : bool = None , _lowerCAmelCase : Optional[Union[float, List[float]]] = None , _lowerCAmelCase : Optional[Union[float, List[float]]] = None , _lowerCAmelCase : Optional[ChannelDimension] = ChannelDimension.FIRST , ):
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
__snake_case : Tuple = to_numpy_array(_lowerCAmelCase )
if do_resize:
__snake_case : List[Any] = self.resize(image=_lowerCAmelCase , size=_lowerCAmelCase , resample=_lowerCAmelCase )
if do_center_crop:
__snake_case : Dict = self.center_crop(_lowerCAmelCase , size=_lowerCAmelCase )
if do_rescale:
__snake_case : int = self.rescale(image=_lowerCAmelCase , scale=_lowerCAmelCase )
if do_normalize:
__snake_case : List[Any] = self.normalize(image=_lowerCAmelCase , mean=_lowerCAmelCase , std=_lowerCAmelCase )
__snake_case : Optional[Any] = to_channel_dimension_format(_lowerCAmelCase , _lowerCAmelCase )
return image
def snake_case__ ( self : List[str] , _lowerCAmelCase : ImageInput , _lowerCAmelCase : bool = None , _lowerCAmelCase : Dict[str, int] = None , _lowerCAmelCase : PILImageResampling = None , _lowerCAmelCase : bool = None , _lowerCAmelCase : Dict[str, int] = None , _lowerCAmelCase : bool = None , _lowerCAmelCase : float = None , _lowerCAmelCase : bool = None , _lowerCAmelCase : Optional[Union[float, List[float]]] = None , _lowerCAmelCase : Optional[Union[float, List[float]]] = None , _lowerCAmelCase : Optional[Union[str, TensorType]] = None , _lowerCAmelCase : ChannelDimension = ChannelDimension.FIRST , **_lowerCAmelCase : Union[str, Any] , ):
__snake_case : Optional[int] = do_resize if do_resize is not None else self.do_resize
__snake_case : Any = resample if resample is not None else self.resample
__snake_case : Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop
__snake_case : List[Any] = do_rescale if do_rescale is not None else self.do_rescale
__snake_case : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
__snake_case : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
__snake_case : List[str] = image_mean if image_mean is not None else self.image_mean
__snake_case : List[str] = image_std if image_std is not None else self.image_std
__snake_case : Optional[Any] = size if size is not None else self.size
__snake_case : int = get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase )
__snake_case : Any = crop_size if crop_size is not None else self.crop_size
__snake_case : List[Any] = get_size_dict(_lowerCAmelCase , param_name="""crop_size""" )
if not valid_images(_lowerCAmelCase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
__snake_case : Optional[Any] = make_batched(_lowerCAmelCase )
__snake_case : int = [
[
self._preprocess_image(
image=_lowerCAmelCase , do_resize=_lowerCAmelCase , size=_lowerCAmelCase , resample=_lowerCAmelCase , do_center_crop=_lowerCAmelCase , crop_size=_lowerCAmelCase , do_rescale=_lowerCAmelCase , rescale_factor=_lowerCAmelCase , do_normalize=_lowerCAmelCase , image_mean=_lowerCAmelCase , image_std=_lowerCAmelCase , data_format=_lowerCAmelCase , )
for img in video
]
for video in videos
]
__snake_case : Optional[int] = {"""pixel_values""": videos}
return BatchFeature(data=_lowerCAmelCase , tensor_type=_lowerCAmelCase )
| 390 | 0 |
'''simple docstring'''
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
'''huggingface/informer-tourism-monthly''': (
'''https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json'''
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class lowerCAmelCase ( A ):
lowerCAmelCase_ = "informer"
lowerCAmelCase_ = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
"num_hidden_layers": "encoder_layers",
}
def __init__( self : List[Any] , __lowercase : Optional[int] = None , __lowercase : Optional[int] = None , __lowercase : str = "student_t" , __lowercase : str = "nll" , __lowercase : int = 1 , __lowercase : List[int] = None , __lowercase : Optional[Union[str, bool]] = "mean" , __lowercase : int = 0 , __lowercase : int = 0 , __lowercase : int = 0 , __lowercase : int = 0 , __lowercase : Optional[List[int]] = None , __lowercase : Optional[List[int]] = None , __lowercase : int = 64 , __lowercase : int = 32 , __lowercase : int = 32 , __lowercase : int = 2 , __lowercase : int = 2 , __lowercase : int = 2 , __lowercase : int = 2 , __lowercase : bool = True , __lowercase : str = "gelu" , __lowercase : float = 0.0_5 , __lowercase : float = 0.1 , __lowercase : float = 0.1 , __lowercase : float = 0.1 , __lowercase : float = 0.1 , __lowercase : int = 100 , __lowercase : float = 0.0_2 , __lowercase : str=True , __lowercase : str = "prob" , __lowercase : int = 5 , __lowercase : bool = True , **__lowercase : str , ):
"""simple docstring"""
__lowercase =prediction_length
__lowercase =context_length or prediction_length
__lowercase =distribution_output
__lowercase =loss
__lowercase =input_size
__lowercase =num_time_features
__lowercase =lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
__lowercase =scaling
__lowercase =num_dynamic_real_features
__lowercase =num_static_real_features
__lowercase =num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(__lowercase ) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`' )
__lowercase =cardinality
else:
__lowercase =[0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(__lowercase ) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`' )
__lowercase =embedding_dimension
else:
__lowercase =[min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
__lowercase =num_parallel_samples
# Transformer architecture configuration
__lowercase =input_size * len(self.lags_sequence ) + self._number_of_features
__lowercase =d_model
__lowercase =encoder_attention_heads
__lowercase =decoder_attention_heads
__lowercase =encoder_ffn_dim
__lowercase =decoder_ffn_dim
__lowercase =encoder_layers
__lowercase =decoder_layers
__lowercase =dropout
__lowercase =attention_dropout
__lowercase =activation_dropout
__lowercase =encoder_layerdrop
__lowercase =decoder_layerdrop
__lowercase =activation_function
__lowercase =init_std
__lowercase =use_cache
# Informer
__lowercase =attention_type
__lowercase =sampling_factor
__lowercase =distil
super().__init__(is_encoder_decoder=__lowercase , **__lowercase )
@property
def snake_case ( self : Optional[int] ):
"""simple docstring"""
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 119 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase = {
'''configuration_owlvit''': [
'''OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''OwlViTConfig''',
'''OwlViTOnnxConfig''',
'''OwlViTTextConfig''',
'''OwlViTVisionConfig''',
],
'''processing_owlvit''': ['''OwlViTProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ['''OwlViTFeatureExtractor''']
UpperCAmelCase = ['''OwlViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''OwlViTModel''',
'''OwlViTPreTrainedModel''',
'''OwlViTTextModel''',
'''OwlViTVisionModel''',
'''OwlViTForObjectDetection''',
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 119 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
__UpperCamelCase : int = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
__UpperCamelCase : List[Any] = TaTokenizerFast
__UpperCamelCase : Dict = {"configuration_mt5": ["MT5Config", "MT5OnnxConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Any = [
"MT5EncoderModel",
"MT5ForConditionalGeneration",
"MT5ForQuestionAnswering",
"MT5Model",
"MT5PreTrainedModel",
"MT5Stack",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : str = ["TFMT5EncoderModel", "TFMT5ForConditionalGeneration", "TFMT5Model"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[int] = ["FlaxMT5EncoderModel", "FlaxMT5ForConditionalGeneration", "FlaxMT5Model"]
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
__UpperCamelCase : Tuple = _LazyModule(
__name__,
globals()["__file__"],
_import_structure,
extra_objects={"MT5Tokenizer": MTaTokenizer, "MT5TokenizerFast": MTaTokenizerFast},
module_spec=__spec__,
)
| 712 |
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class __magic_name__ ( __lowerCAmelCase , unittest.TestCase):
A: Optional[int] = BarthezTokenizer
A: Optional[int] = BarthezTokenizerFast
A: str = True
A: Optional[int] = True
def UpperCAmelCase__ ( self : List[Any] ) -> str:
'''simple docstring'''
super().setUp()
UpperCamelCase__ : Optional[Any] = BarthezTokenizerFast.from_pretrained('''moussaKam/mbarthez''' )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=lowerCamelCase__ )
UpperCamelCase__ : int = tokenizer
def UpperCAmelCase__ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
UpperCamelCase__ : Optional[int] = '''<pad>'''
UpperCamelCase__ : List[str] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase__ ) , lowerCamelCase__ )
def UpperCAmelCase__ ( self : int ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ : int = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''<mask>''' )
self.assertEqual(len(lowerCamelCase__ ) , 101122 )
def UpperCAmelCase__ ( self : Optional[Any] ) -> str:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 101122 )
@require_torch
def UpperCAmelCase__ ( self : Dict ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ : Dict = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
UpperCamelCase__ : Any = [0, 57, 3018, 70307, 91, 2]
UpperCamelCase__ : Optional[int] = self.tokenizer(
lowerCamelCase__ , max_length=len(lowerCamelCase__ ) , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , return_tensors='''pt''' )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
UpperCamelCase__ : List[Any] = batch.input_ids.tolist()[0]
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
def UpperCAmelCase__ ( self : str ) -> List[Any]:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
UpperCamelCase__ : List[Any] = self.get_tokenizer()
UpperCamelCase__ : Optional[int] = self.get_rust_tokenizer()
UpperCamelCase__ : Any = '''I was born in 92000, and this is falsé.'''
UpperCamelCase__ : List[str] = tokenizer.tokenize(lowerCamelCase__ )
UpperCamelCase__ : Tuple = rust_tokenizer.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ : Tuple = tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
UpperCamelCase__ : str = rust_tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ : Tuple = self.get_rust_tokenizer()
UpperCamelCase__ : Optional[int] = tokenizer.encode(lowerCamelCase__ )
UpperCamelCase__ : str = rust_tokenizer.encode(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
@slow
def UpperCAmelCase__ ( self : List[str] ) -> Dict:
'''simple docstring'''
UpperCamelCase__ : Optional[int] = {'''input_ids''': [[0, 490, 14328, 4507, 354, 47, 43669, 95, 25, 78117, 20215, 19779, 190, 22, 400, 4, 35343, 80310, 603, 86, 24937, 105, 33438, 94762, 196, 39642, 7, 15, 15933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10534, 87, 25, 66, 3358, 196, 55289, 8, 82961, 81, 2204, 75203, 7, 15, 763, 12956, 216, 178, 14328, 9595, 1377, 69693, 7, 448, 71021, 196, 18106, 1437, 13974, 108, 9083, 4, 49315, 7, 39, 86, 1326, 2793, 46333, 4, 448, 196, 74588, 7, 49315, 7, 39, 21, 822, 38470, 74, 21, 66723, 62480, 8, 22050, 5, 2]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
UpperCamelCase__ : int = [
'''Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '''
'''utilisé principalement dans le domaine du traitement automatique des langues (TAL).''',
'''À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '''
'''pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '''
'''telles que la traduction et la synthèse de texte.''',
]
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase__ , model_name='''moussaKam/mbarthez''' , revision='''c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6''' , sequences=lowerCamelCase__ , )
| 106 | 0 |
'''simple docstring'''
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
lowercase__ =getLogger(__name__)
lowercase__ ='cuda' if torch.cuda.is_available() else 'cpu'
def UpperCamelCase_ ( A__ , A__ , A__ , A__ = 8 , A__ = DEFAULT_DEVICE , A__=False , A__="summarization" , A__=None , **A__ , ):
a_ = Path(A__ ).open("""w""" , encoding="""utf-8""" )
a_ = str(A__ )
a_ = AutoModelForSeqaSeqLM.from_pretrained(A__ ).to(A__ )
if fpaa:
a_ = model.half()
a_ = AutoTokenizer.from_pretrained(A__ )
logger.info(F'''Inferred tokenizer type: {tokenizer.__class__}''' ) # if this is wrong, check config.model_type.
a_ = time.time()
# update config with task specific params
use_task_specific_params(A__ , A__ )
if prefix is None:
a_ = prefix or getattr(model.config , """prefix""" , """""" ) or """"""
for examples_chunk in tqdm(list(chunks(A__ , A__ ) ) ):
a_ = [prefix + text for text in examples_chunk]
a_ = tokenizer(A__ , return_tensors="""pt""" , truncation=A__ , padding="""longest""" ).to(A__ )
a_ = model.generate(
input_ids=batch.input_ids , attention_mask=batch.attention_mask , **A__ , )
a_ = tokenizer.batch_decode(A__ , skip_special_tokens=A__ , clean_up_tokenization_spaces=A__ )
for hypothesis in dec:
fout.write(hypothesis + """\n""" )
fout.flush()
fout.close()
a_ = int(time.time() - start_time ) # seconds
a_ = len(A__ )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )}
def UpperCamelCase_ ( ):
return datetime.datetime.now().strftime("""%Y-%m-%d %H:%M:%S""" )
def UpperCamelCase_ ( A__=True ):
a_ = argparse.ArgumentParser()
parser.add_argument("""model_name""" , type=A__ , help="""like facebook/bart-large-cnn,t5-base, etc.""" )
parser.add_argument("""input_path""" , type=A__ , help="""like cnn_dm/test.source""" )
parser.add_argument("""save_path""" , type=A__ , help="""where to save summaries""" )
parser.add_argument("""--reference_path""" , type=A__ , required=A__ , help="""like cnn_dm/test.target""" )
parser.add_argument("""--score_path""" , type=A__ , required=A__ , default="""metrics.json""" , help="""where to save metrics""" )
parser.add_argument("""--device""" , type=A__ , required=A__ , default=A__ , help="""cuda, cuda:1, cpu etc.""" )
parser.add_argument(
"""--prefix""" , type=A__ , required=A__ , default=A__ , help="""will be added to the begininng of src examples""" )
parser.add_argument("""--task""" , type=A__ , default="""summarization""" , help="""used for task_specific_params + metrics""" )
parser.add_argument("""--bs""" , type=A__ , default=8 , required=A__ , help="""batch size""" )
parser.add_argument(
"""--n_obs""" , type=A__ , default=-1 , required=A__ , help="""How many observations. Defaults to all.""" )
parser.add_argument("""--fp16""" , action="""store_true""" )
parser.add_argument("""--dump-args""" , action="""store_true""" , help="""print the custom hparams with the results""" )
parser.add_argument(
"""--info""" , nargs="""?""" , type=A__ , const=datetime_now() , help=(
"""use in conjunction w/ --dump-args to print with the results whatever other info you'd like, e.g."""
""" lang=en-ru. If no value is passed, the current datetime string will be used."""
) , )
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
a_ , a_ = parser.parse_known_args()
a_ = parse_numeric_n_bool_cl_kwargs(A__ )
if parsed_args and verbose:
print(F'''parsed the following generate kwargs: {parsed_args}''' )
a_ = [""" """ + x.rstrip() if """t5""" in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
a_ = examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=A__ )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(F'''score_path {args.score_path} will be overwritten unless you type ctrl-c.''' )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError("""Can't mix --fp16 and --device cpu""" )
a_ = generate_summaries_or_translations(
A__ , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **A__ , )
if args.reference_path is None:
return {}
# Compute scores
a_ = calculate_bleu if """translation""" in args.task else calculate_rouge
a_ = [x.rstrip() for x in open(args.save_path ).readlines()]
a_ = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(A__ )]
a_ = score_fn(A__ , A__ )
scores.update(A__ )
if args.dump_args:
scores.update(A__ )
if args.info:
a_ = args.info
if verbose:
print(A__ )
if args.score_path is not None:
json.dump(A__ , open(args.score_path , """w""" ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 263 |
'''simple docstring'''
import math
def UpperCamelCase_ ( A__ ):
return math.sqrt(A__ ) * math.sqrt(A__ ) == num
def UpperCamelCase_ ( A__ ):
a_ = 0
a_ = n
while left <= right:
a_ = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
a_ = mid - 1
else:
a_ = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 263 | 1 |
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import NezhaConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
)
from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST
class __SCREAMING_SNAKE_CASE :
def __init__( self , lowerCamelCase , lowerCamelCase=13 , lowerCamelCase=7 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=99 , lowerCamelCase=32 , lowerCamelCase=5 , lowerCamelCase=4 , lowerCamelCase=37 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=128 , lowerCamelCase=32 , lowerCamelCase=16 , lowerCamelCase=2 , lowerCamelCase=0.02 , lowerCamelCase=3 , lowerCamelCase=4 , lowerCamelCase=None , ) ->int:
'''simple docstring'''
__a = parent
__a = batch_size
__a = seq_length
__a = is_training
__a = use_input_mask
__a = use_token_type_ids
__a = use_labels
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = type_sequence_label_size
__a = initializer_range
__a = num_labels
__a = num_choices
__a = scope
def __UpperCamelCase ( self ) ->Any:
'''simple docstring'''
__a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a = None
if self.use_input_mask:
__a = random_attention_mask([self.batch_size, self.seq_length] )
__a = None
if self.use_token_type_ids:
__a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__a = None
__a = None
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__a = ids_tensor([self.batch_size] , self.num_choices )
__a = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCamelCase ( self ) ->Dict:
'''simple docstring'''
return NezhaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase , initializer_range=self.initializer_range , )
def __UpperCamelCase ( self ) ->Dict:
'''simple docstring'''
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) = self.prepare_config_and_inputs()
__a = True
__a = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__a = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def __UpperCamelCase ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) ->int:
'''simple docstring'''
__a = NezhaModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = model(lowerCamelCase , attention_mask=lowerCamelCase , token_type_ids=lowerCamelCase )
__a = model(lowerCamelCase , token_type_ids=lowerCamelCase )
__a = model(lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __UpperCamelCase ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ) ->int:
'''simple docstring'''
__a = True
__a = NezhaModel(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = model(
lowerCamelCase , attention_mask=lowerCamelCase , token_type_ids=lowerCamelCase , encoder_hidden_states=lowerCamelCase , encoder_attention_mask=lowerCamelCase , )
__a = model(
lowerCamelCase , attention_mask=lowerCamelCase , token_type_ids=lowerCamelCase , encoder_hidden_states=lowerCamelCase , )
__a = model(lowerCamelCase , attention_mask=lowerCamelCase , token_type_ids=lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __UpperCamelCase ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) ->Tuple:
'''simple docstring'''
__a = NezhaForMaskedLM(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = model(lowerCamelCase , attention_mask=lowerCamelCase , token_type_ids=lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) ->List[str]:
'''simple docstring'''
__a = NezhaForNextSentencePrediction(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = model(
lowerCamelCase , attention_mask=lowerCamelCase , token_type_ids=lowerCamelCase , labels=lowerCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def __UpperCamelCase ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) ->Dict:
'''simple docstring'''
__a = NezhaForPreTraining(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = model(
lowerCamelCase , attention_mask=lowerCamelCase , token_type_ids=lowerCamelCase , labels=lowerCamelCase , next_sentence_label=lowerCamelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def __UpperCamelCase ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) ->Any:
'''simple docstring'''
__a = NezhaForQuestionAnswering(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = model(
lowerCamelCase , attention_mask=lowerCamelCase , token_type_ids=lowerCamelCase , start_positions=lowerCamelCase , end_positions=lowerCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCamelCase ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) ->List[Any]:
'''simple docstring'''
__a = self.num_labels
__a = NezhaForSequenceClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = model(lowerCamelCase , attention_mask=lowerCamelCase , token_type_ids=lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCamelCase ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) ->Union[str, Any]:
'''simple docstring'''
__a = self.num_labels
__a = NezhaForTokenClassification(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = model(lowerCamelCase , attention_mask=lowerCamelCase , token_type_ids=lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCamelCase ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) ->Optional[int]:
'''simple docstring'''
__a = self.num_choices
__a = NezhaForMultipleChoice(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a = model(
lowerCamelCase , attention_mask=lowerCamelCase , token_type_ids=lowerCamelCase , labels=lowerCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __UpperCamelCase ( self ) ->Any:
'''simple docstring'''
__a = self.prepare_config_and_inputs()
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) = config_and_inputs
__a = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
__a =(
(
NezhaModel,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
)
if is_torch_available()
else ()
)
__a =(
{
"feature-extraction": NezhaModel,
"fill-mask": NezhaForMaskedLM,
"question-answering": NezhaForQuestionAnswering,
"text-classification": NezhaForSequenceClassification,
"token-classification": NezhaForTokenClassification,
"zero-shot": NezhaForSequenceClassification,
}
if is_torch_available()
else {}
)
__a =True
def __UpperCamelCase ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase=False ) ->Any:
'''simple docstring'''
__a = super()._prepare_for_class(lowerCamelCase , lowerCamelCase , return_labels=lowerCamelCase )
if return_labels:
if model_class in get_values(lowerCamelCase ):
__a = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=lowerCamelCase )
__a = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase )
return inputs_dict
def __UpperCamelCase ( self ) ->Dict:
'''simple docstring'''
__a = NezhaModelTester(self )
__a = ConfigTester(self , config_class=lowerCamelCase , hidden_size=37 )
def __UpperCamelCase ( self ) ->Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
def __UpperCamelCase ( self ) ->Optional[int]:
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def __UpperCamelCase ( self ) ->str:
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*lowerCamelCase )
def __UpperCamelCase ( self ) ->List[Any]:
'''simple docstring'''
# This regression test was failing with PyTorch < 1.3
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) = self.model_tester.prepare_config_and_inputs_for_decoder()
__a = None
self.model_tester.create_and_check_model_as_decoder(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , )
def __UpperCamelCase ( self ) ->str:
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCamelCase )
def __UpperCamelCase ( self ) ->Optional[Any]:
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowerCamelCase )
def __UpperCamelCase ( self ) ->Dict:
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_next_sequence_prediction(*lowerCamelCase )
def __UpperCamelCase ( self ) ->List[Any]:
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCamelCase )
def __UpperCamelCase ( self ) ->int:
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCamelCase )
def __UpperCamelCase ( self ) ->Optional[Any]:
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCamelCase )
def __UpperCamelCase ( self ) ->Union[str, Any]:
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCamelCase )
@slow
def __UpperCamelCase ( self ) ->Optional[int]:
'''simple docstring'''
for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a = NezhaModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
@slow
@require_torch_gpu
def __UpperCamelCase ( self ) ->List[str]:
'''simple docstring'''
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# NezhaForMultipleChoice behaves incorrectly in JIT environments.
if model_class == NezhaForMultipleChoice:
return
__a = True
__a = model_class(config=lowerCamelCase )
__a = self._prepare_for_class(lowerCamelCase , lowerCamelCase )
__a = torch.jit.trace(
lowerCamelCase , (inputs_dict['input_ids'].to('cpu' ), inputs_dict['attention_mask'].to('cpu' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(lowerCamelCase , os.path.join(lowerCamelCase , 'bert.pt' ) )
__a = torch.jit.load(os.path.join(lowerCamelCase , 'bert.pt' ) , map_location=lowerCamelCase )
loaded(inputs_dict['input_ids'].to(lowerCamelCase ) , inputs_dict['attention_mask'].to(lowerCamelCase ) )
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def __UpperCamelCase ( self ) ->List[str]:
'''simple docstring'''
__a = NezhaModel.from_pretrained('sijunhe/nezha-cn-base' )
__a = torch.tensor([[0, 1, 2, 3, 4, 5]] )
__a = torch.tensor([[0, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__a = model(lowerCamelCase , attention_mask=lowerCamelCase )[0]
__a = torch.Size((1, 6, 768) )
self.assertEqual(output.shape , lowerCamelCase )
__a = torch.tensor([[[0.0685, 0.2441, 0.1102], [0.0600, 0.1906, 0.1349], [0.0221, 0.0819, 0.0586]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowerCamelCase , atol=1e-4 ) )
@slow
def __UpperCamelCase ( self ) ->List[Any]:
'''simple docstring'''
__a = NezhaForMaskedLM.from_pretrained('sijunhe/nezha-cn-base' )
__a = torch.tensor([[0, 1, 2, 3, 4, 5]] )
__a = torch.tensor([[1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__a = model(lowerCamelCase , attention_mask=lowerCamelCase )[0]
__a = torch.Size((1, 6, 2_1128) )
self.assertEqual(output.shape , lowerCamelCase )
__a = torch.tensor(
[[-2.7939, -1.7902, -2.2189], [-2.8585, -1.8908, -2.3723], [-2.6499, -1.7750, -2.2558]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowerCamelCase , atol=1e-4 ) ) | 270 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCamelCase : Dict = logging.get_logger(__name__)
__UpperCamelCase : Optional[int] = {
"""facebook/xmod-base""": """https://huggingface.co/facebook/xmod-base/resolve/main/config.json""",
"""facebook/xmod-large-prenorm""": """https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json""",
"""facebook/xmod-base-13-125k""": """https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json""",
"""facebook/xmod-base-30-125k""": """https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json""",
"""facebook/xmod-base-30-195k""": """https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json""",
"""facebook/xmod-base-60-125k""": """https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json""",
"""facebook/xmod-base-60-265k""": """https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json""",
"""facebook/xmod-base-75-125k""": """https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json""",
"""facebook/xmod-base-75-269k""": """https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json""",
}
class __SCREAMING_SNAKE_CASE ( _lowerCAmelCase ):
__a ="xmod"
def __init__( self , lowerCamelCase=3_0522 , lowerCamelCase=768 , lowerCamelCase=12 , lowerCamelCase=12 , lowerCamelCase=3072 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=512 , lowerCamelCase=2 , lowerCamelCase=0.02 , lowerCamelCase=1e-12 , lowerCamelCase=1 , lowerCamelCase=0 , lowerCamelCase=2 , lowerCamelCase="absolute" , lowerCamelCase=True , lowerCamelCase=None , lowerCamelCase=False , lowerCamelCase=2 , lowerCamelCase=False , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=("en_XX",) , lowerCamelCase=None , **lowerCamelCase , ) ->Any:
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase , bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , **lowerCamelCase )
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = hidden_act
__a = intermediate_size
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = initializer_range
__a = layer_norm_eps
__a = position_embedding_type
__a = use_cache
__a = classifier_dropout
__a = pre_norm
__a = adapter_reduction_factor
__a = adapter_layer_norm
__a = adapter_reuse_layer_norm
__a = ln_before_adapter
__a = list(lowerCamelCase )
__a = default_language
class __SCREAMING_SNAKE_CASE ( _lowerCAmelCase ):
@property
def __UpperCamelCase ( self ) ->Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
__a = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__a = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] ) | 270 | 1 |
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class lowercase ( unittest.TestCase):
"""simple docstring"""
@property
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[int]:
torch.manual_seed(0 )
UpperCAmelCase_= UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
def _SCREAMING_SNAKE_CASE ( self : int ) -> Tuple:
UpperCAmelCase_= self.dummy_uncond_unet
UpperCAmelCase_= PNDMScheduler()
UpperCAmelCase_= PNDMPipeline(unet=lowercase_ , scheduler=lowercase_ )
pndm.to(lowercase_ )
pndm.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase_= torch.manual_seed(0 )
UpperCAmelCase_= pndm(generator=lowercase_ , num_inference_steps=20 , output_type="""numpy""" ).images
UpperCAmelCase_= torch.manual_seed(0 )
UpperCAmelCase_= pndm(generator=lowercase_ , num_inference_steps=20 , output_type="""numpy""" , return_dict=lowercase_ )[0]
UpperCAmelCase_= image[0, -3:, -3:, -1]
UpperCAmelCase_= image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase_= np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class lowercase ( unittest.TestCase):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
UpperCAmelCase_= "google/ddpm-cifar10-32"
UpperCAmelCase_= UNetaDModel.from_pretrained(lowercase_ )
UpperCAmelCase_= PNDMScheduler()
UpperCAmelCase_= PNDMPipeline(unet=lowercase_ , scheduler=lowercase_ )
pndm.to(lowercase_ )
pndm.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase_= torch.manual_seed(0 )
UpperCAmelCase_= pndm(generator=lowercase_ , output_type="""numpy""" ).images
UpperCAmelCase_= image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase_= np.array([0.1_564, 0.14_645, 0.1_406, 0.14_715, 0.12_425, 0.14_045, 0.13_115, 0.12_175, 0.125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 593 | from copy import deepcopy
from typing import Optional, Union
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_tf_available, is_torch_available
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
class lowercase_ ( __snake_case ):
_lowerCamelCase = ['image_processor']
_lowerCamelCase = 'SamImageProcessor'
def __init__( self , lowercase_ ):
super().__init__(lowercase_ )
_snake_case : Optional[Any] = self.image_processor
_snake_case : Tuple = -10
_snake_case : str = self.image_processor.size["longest_edge"]
def __call__( self , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_ = None , **lowercase_ , ):
_snake_case : List[Any] = self.image_processor(
lowercase_ , return_tensors=lowercase_ , **lowercase_ , )
# pop arguments that are not used in the foward but used nevertheless
_snake_case : Any = encoding_image_processor["original_sizes"]
if hasattr(lowercase_ , "numpy" ): # Checks if Torch or TF tensor
_snake_case : int = original_sizes.numpy()
_snake_case ,_snake_case ,_snake_case : Union[str, Any] = self._check_and_preprocess_points(
input_points=lowercase_ , input_labels=lowercase_ , input_boxes=lowercase_ , )
_snake_case : Dict = self._normalize_and_convert(
lowercase_ , lowercase_ , input_points=lowercase_ , input_labels=lowercase_ , input_boxes=lowercase_ , return_tensors=lowercase_ , )
return encoding_image_processor
def UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_="pt" , ):
if input_points is not None:
if len(lowercase_ ) != len(lowercase_ ):
_snake_case : int = [
self._normalize_coordinates(self.target_size , lowercase_ , original_sizes[0] ) for point in input_points
]
else:
_snake_case : Dict = [
self._normalize_coordinates(self.target_size , lowercase_ , lowercase_ )
for point, original_size in zip(lowercase_ , lowercase_ )
]
# check that all arrays have the same shape
if not all(point.shape == input_points[0].shape for point in input_points ):
if input_labels is not None:
_snake_case ,_snake_case : int = self._pad_points_and_labels(lowercase_ , lowercase_ )
_snake_case : Any = np.array(lowercase_ )
if input_labels is not None:
_snake_case : Optional[Any] = np.array(lowercase_ )
if input_boxes is not None:
if len(lowercase_ ) != len(lowercase_ ):
_snake_case : Optional[Any] = [
self._normalize_coordinates(self.target_size , lowercase_ , original_sizes[0] , is_bounding_box=lowercase_ )
for box in input_boxes
]
else:
_snake_case : List[str] = [
self._normalize_coordinates(self.target_size , lowercase_ , lowercase_ , is_bounding_box=lowercase_ )
for box, original_size in zip(lowercase_ , lowercase_ )
]
_snake_case : Tuple = np.array(lowercase_ )
if input_boxes is not None:
if return_tensors == "pt":
_snake_case : List[str] = torch.from_numpy(lowercase_ )
# boxes batch size of 1 by default
_snake_case : Optional[Any] = input_boxes.unsqueeze(1 ) if len(input_boxes.shape ) != 3 else input_boxes
elif return_tensors == "tf":
_snake_case : List[str] = tf.convert_to_tensor(lowercase_ )
# boxes batch size of 1 by default
_snake_case : Optional[int] = tf.expand_dims(lowercase_ , 1 ) if len(input_boxes.shape ) != 3 else input_boxes
encoding_image_processor.update({"input_boxes": input_boxes} )
if input_points is not None:
if return_tensors == "pt":
_snake_case : Tuple = torch.from_numpy(lowercase_ )
# point batch size of 1 by default
_snake_case : int = input_points.unsqueeze(1 ) if len(input_points.shape ) != 4 else input_points
elif return_tensors == "tf":
_snake_case : List[str] = tf.convert_to_tensor(lowercase_ )
# point batch size of 1 by default
_snake_case : Tuple = tf.expand_dims(lowercase_ , 1 ) if len(input_points.shape ) != 4 else input_points
encoding_image_processor.update({"input_points": input_points} )
if input_labels is not None:
if return_tensors == "pt":
_snake_case : Dict = torch.from_numpy(lowercase_ )
# point batch size of 1 by default
_snake_case : str = input_labels.unsqueeze(1 ) if len(input_labels.shape ) != 3 else input_labels
elif return_tensors == "tf":
_snake_case : Optional[Any] = tf.convert_to_tensor(lowercase_ )
# point batch size of 1 by default
_snake_case : List[Any] = tf.expand_dims(lowercase_ , 1 ) if len(input_labels.shape ) != 3 else input_labels
encoding_image_processor.update({"input_labels": input_labels} )
return encoding_image_processor
def UpperCamelCase ( self , lowercase_ , lowercase_ ):
_snake_case : List[Any] = max([point.shape[0] for point in input_points] )
_snake_case : List[str] = []
for i, point in enumerate(lowercase_ ):
if point.shape[0] != expected_nb_points:
_snake_case : Optional[Any] = np.concatenate(
[point, np.zeros((expected_nb_points - point.shape[0], 2) ) + self.point_pad_value] , axis=0 )
_snake_case : Union[str, Any] = np.append(input_labels[i] , [self.point_pad_value] )
processed_input_points.append(lowercase_ )
_snake_case : Optional[Any] = processed_input_points
return input_points, input_labels
def UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_=False ):
_snake_case ,_snake_case : Optional[int] = original_size
_snake_case ,_snake_case : List[str] = self.image_processor._get_preprocess_shape(lowercase_ , longest_edge=lowercase_ )
_snake_case : Optional[Any] = deepcopy(lowercase_ ).astype(lowercase_ )
if is_bounding_box:
_snake_case : str = coords.reshape(-1 , 2 , 2 )
_snake_case : Optional[Any] = coords[..., 0] * (new_w / old_w)
_snake_case : Dict = coords[..., 1] * (new_h / old_h)
if is_bounding_box:
_snake_case : Optional[Any] = coords.reshape(-1 , 4 )
return coords
def UpperCamelCase ( self , lowercase_=None , lowercase_=None , lowercase_=None , ):
if input_points is not None:
if hasattr(lowercase_ , "numpy" ): # Checks for TF or Torch tensor
_snake_case : Union[str, Any] = input_points.numpy().tolist()
if not isinstance(lowercase_ , lowercase_ ) or not isinstance(input_points[0] , lowercase_ ):
raise ValueError("Input points must be a list of list of floating points." )
_snake_case : Any = [np.array(lowercase_ ) for input_point in input_points]
else:
_snake_case : Optional[int] = None
if input_labels is not None:
if hasattr(lowercase_ , "numpy" ):
_snake_case : Tuple = input_labels.numpy().tolist()
if not isinstance(lowercase_ , lowercase_ ) or not isinstance(input_labels[0] , lowercase_ ):
raise ValueError("Input labels must be a list of list integers." )
_snake_case : Tuple = [np.array(lowercase_ ) for label in input_labels]
else:
_snake_case : Optional[Any] = None
if input_boxes is not None:
if hasattr(lowercase_ , "numpy" ):
_snake_case : List[str] = input_boxes.numpy().tolist()
if (
not isinstance(lowercase_ , lowercase_ )
or not isinstance(input_boxes[0] , lowercase_ )
or not isinstance(input_boxes[0][0] , lowercase_ )
):
raise ValueError("Input boxes must be a list of list of list of floating points." )
_snake_case : List[Any] = [np.array(lowercase_ ).astype(np.floataa ) for box in input_boxes]
else:
_snake_case : Optional[int] = None
return input_points, input_labels, input_boxes
@property
def UpperCamelCase ( self ):
_snake_case : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(lowercase_ ) )
def UpperCamelCase ( self , *lowercase_ , **lowercase_ ):
return self.image_processor.post_process_masks(*lowercase_ , **lowercase_ ) | 670 | 0 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class _A ( unittest.TestCase ):
'''simple docstring'''
_lowercase = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
_lowercase = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def __lowerCAmelCase ( self : Union[str, Any] , lowerCamelCase : Tuple , lowerCamelCase : List[str] , lowerCamelCase : Dict )-> str:
snake_case__ : List[Any] = TextaTextGenerationPipeline(model=lowerCamelCase , tokenizer=lowerCamelCase )
return generator, ["Something to write", "Something else"]
def __lowerCAmelCase ( self : Any , lowerCamelCase : int , lowerCamelCase : Any )-> str:
snake_case__ : Tuple = generator("""Something there""" )
self.assertEqual(lowerCamelCase , [{"""generated_text""": ANY(lowerCamelCase )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]["""generated_text"""].startswith("""Something there""" ) )
snake_case__ : Any = generator(["""This is great !""", """Something else"""] , num_return_sequences=2 , do_sample=lowerCamelCase )
self.assertEqual(
lowerCamelCase , [
[{"""generated_text""": ANY(lowerCamelCase )}, {"""generated_text""": ANY(lowerCamelCase )}],
[{"""generated_text""": ANY(lowerCamelCase )}, {"""generated_text""": ANY(lowerCamelCase )}],
] , )
snake_case__ : int = generator(
["""This is great !""", """Something else"""] , num_return_sequences=2 , batch_size=2 , do_sample=lowerCamelCase )
self.assertEqual(
lowerCamelCase , [
[{"""generated_text""": ANY(lowerCamelCase )}, {"""generated_text""": ANY(lowerCamelCase )}],
[{"""generated_text""": ANY(lowerCamelCase )}, {"""generated_text""": ANY(lowerCamelCase )}],
] , )
with self.assertRaises(lowerCamelCase ):
generator(4 )
@require_torch
def __lowerCAmelCase ( self : List[str] )-> Optional[int]:
snake_case__ : str = pipeline("""text2text-generation""" , model="""patrickvonplaten/t5-tiny-random""" , framework="""pt""" )
# do_sample=False necessary for reproducibility
snake_case__ : int = generator("""Something there""" , do_sample=lowerCamelCase )
self.assertEqual(lowerCamelCase , [{"""generated_text""": """"""}] )
snake_case__ : List[Any] = 3
snake_case__ : Any = generator(
"""Something there""" , num_return_sequences=lowerCamelCase , num_beams=lowerCamelCase , )
snake_case__ : int = [
{"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide Beide"""},
{"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide"""},
{"""generated_text""": """"""},
]
self.assertEqual(lowerCamelCase , lowerCamelCase )
snake_case__ : Tuple = generator("""This is a test""" , do_sample=lowerCamelCase , num_return_sequences=2 , return_tensors=lowerCamelCase )
self.assertEqual(
lowerCamelCase , [
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
] , )
snake_case__ : Any = generator.model.config.eos_token_id
snake_case__ : Optional[Any] = """<pad>"""
snake_case__ : Tuple = generator(
["""This is a test""", """This is a second test"""] , do_sample=lowerCamelCase , num_return_sequences=2 , batch_size=2 , return_tensors=lowerCamelCase , )
self.assertEqual(
lowerCamelCase , [
[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
],
[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
],
] , )
@require_tf
def __lowerCAmelCase ( self : Dict )-> Dict:
snake_case__ : Optional[Any] = pipeline("""text2text-generation""" , model="""patrickvonplaten/t5-tiny-random""" , framework="""tf""" )
# do_sample=False necessary for reproducibility
snake_case__ : List[str] = generator("""Something there""" , do_sample=lowerCamelCase )
self.assertEqual(lowerCamelCase , [{"""generated_text""": """"""}] )
| 172 |
'''simple docstring'''
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class _A :
'''simple docstring'''
@staticmethod
def __lowerCAmelCase ( *lowerCamelCase : Union[str, Any] , **lowerCamelCase : Union[str, Any] )-> Dict:
pass
def lowerCAmelCase__ ( UpperCAmelCase ):
"""simple docstring"""
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
lowerCAmelCase__ = (
'https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png'
)
@is_pipeline_test
@require_torch
@require_vision
class _A ( unittest.TestCase ):
'''simple docstring'''
_lowercase = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def __lowerCAmelCase ( self : Optional[int] , lowerCamelCase : Any , lowerCamelCase : List[str] , lowerCamelCase : Optional[Any] )-> Optional[int]:
snake_case__ : Union[str, Any] = pipeline(
"""document-question-answering""" , model=lowerCamelCase , tokenizer=lowerCamelCase , image_processor=lowerCamelCase )
snake_case__ : int = INVOICE_URL
snake_case__ : List[Any] = list(zip(*apply_tesseract(load_image(lowerCamelCase ) , lowerCamelCase , """""" ) ) )
snake_case__ : Dict = """What is the placebo?"""
snake_case__ : int = [
{
"""image""": load_image(lowerCamelCase ),
"""question""": question,
},
{
"""image""": image,
"""question""": question,
},
{
"""image""": image,
"""question""": question,
"""word_boxes""": word_boxes,
},
]
return dqa_pipeline, examples
def __lowerCAmelCase ( self : int , lowerCamelCase : str , lowerCamelCase : List[str] )-> Union[str, Any]:
snake_case__ : List[Any] = dqa_pipeline(lowerCamelCase , top_k=2 )
self.assertEqual(
lowerCamelCase , [
[
{"""score""": ANY(lowerCamelCase ), """answer""": ANY(lowerCamelCase ), """start""": ANY(lowerCamelCase ), """end""": ANY(lowerCamelCase )},
{"""score""": ANY(lowerCamelCase ), """answer""": ANY(lowerCamelCase ), """start""": ANY(lowerCamelCase ), """end""": ANY(lowerCamelCase )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def __lowerCAmelCase ( self : int )-> List[Any]:
snake_case__ : List[Any] = pipeline("""document-question-answering""" , model="""hf-internal-testing/tiny-random-layoutlmv2""" )
snake_case__ : int = INVOICE_URL
snake_case__ : List[Any] = """How many cats are there?"""
snake_case__ : Optional[int] = [
{"""score""": 0.0_001, """answer""": """oy 2312/2019""", """start""": 38, """end""": 39},
{"""score""": 0.0_001, """answer""": """oy 2312/2019 DUE""", """start""": 38, """end""": 40},
]
snake_case__ : Dict = dqa_pipeline(image=lowerCamelCase , question=lowerCamelCase , top_k=2 )
self.assertEqual(nested_simplify(lowerCamelCase , decimals=4 ) , lowerCamelCase )
snake_case__ : str = dqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(nested_simplify(lowerCamelCase , decimals=4 ) , lowerCamelCase )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
snake_case__ : Optional[Any] = """./tests/fixtures/tests_samples/COCO/000000039769.png"""
snake_case__ : Union[str, Any] = dqa_pipeline(image=lowerCamelCase , question=lowerCamelCase , top_k=2 )
self.assertEqual(lowerCamelCase , [] )
# We can optionnally pass directly the words and bounding boxes
snake_case__ : Tuple = """./tests/fixtures/tests_samples/COCO/000000039769.png"""
snake_case__ : Optional[int] = []
snake_case__ : List[Any] = []
snake_case__ : Tuple = dqa_pipeline(image=lowerCamelCase , question=lowerCamelCase , words=lowerCamelCase , boxes=lowerCamelCase , top_k=2 )
self.assertEqual(lowerCamelCase , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def __lowerCAmelCase ( self : int )-> Any:
snake_case__ : List[str] = pipeline(
"""document-question-answering""" , model="""tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa""" , revision="""9977165""" , )
snake_case__ : List[Any] = INVOICE_URL
snake_case__ : Optional[Any] = """What is the invoice number?"""
snake_case__ : Any = dqa_pipeline(image=lowerCamelCase , question=lowerCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase , decimals=4 ) , [
{"""score""": 0.9_944, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.0_009, """answer""": """us-001""", """start""": 16, """end""": 16},
] , )
snake_case__ : Union[str, Any] = dqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase , decimals=4 ) , [
{"""score""": 0.9_944, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.0_009, """answer""": """us-001""", """start""": 16, """end""": 16},
] , )
snake_case__ : Dict = dqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase , decimals=4 ) , [
[
{"""score""": 0.9_944, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.0_009, """answer""": """us-001""", """start""": 16, """end""": 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def __lowerCAmelCase ( self : List[Any] )-> Any:
snake_case__ : Optional[Any] = pipeline(
"""document-question-answering""" , model="""tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa""" , revision="""9977165""" , max_seq_len=50 , )
snake_case__ : Dict = INVOICE_URL
snake_case__ : Tuple = """What is the invoice number?"""
snake_case__ : Optional[Any] = dqa_pipeline(image=lowerCamelCase , question=lowerCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase , decimals=4 ) , [
{"""score""": 0.9_974, """answer""": """1110212019""", """start""": 23, """end""": 23},
{"""score""": 0.9_948, """answer""": """us-001""", """start""": 16, """end""": 16},
] , )
snake_case__ : int = dqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase , decimals=4 ) , [
{"""score""": 0.9_974, """answer""": """1110212019""", """start""": 23, """end""": 23},
{"""score""": 0.9_948, """answer""": """us-001""", """start""": 16, """end""": 16},
] , )
snake_case__ : List[Any] = dqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase , decimals=4 ) , [
[
{"""score""": 0.9_974, """answer""": """1110212019""", """start""": 23, """end""": 23},
{"""score""": 0.9_948, """answer""": """us-001""", """start""": 16, """end""": 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def __lowerCAmelCase ( self : List[str] )-> Dict:
snake_case__ : Optional[Any] = AutoTokenizer.from_pretrained(
"""impira/layoutlm-document-qa""" , revision="""3dc6de3""" , add_prefix_space=lowerCamelCase )
snake_case__ : Tuple = pipeline(
"""document-question-answering""" , model="""impira/layoutlm-document-qa""" , tokenizer=lowerCamelCase , revision="""3dc6de3""" , )
snake_case__ : Optional[int] = INVOICE_URL
snake_case__ : Union[str, Any] = """What is the invoice number?"""
snake_case__ : Dict = dqa_pipeline(image=lowerCamelCase , question=lowerCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase , decimals=4 ) , [
{"""score""": 0.4_251, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.0_819, """answer""": """1110212019""", """start""": 23, """end""": 23},
] , )
snake_case__ : int = dqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase , decimals=4 ) , [
{"""score""": 0.4_251, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.0_819, """answer""": """1110212019""", """start""": 23, """end""": 23},
] , )
snake_case__ : int = dqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase , decimals=4 ) , [
[
{"""score""": 0.4_251, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.0_819, """answer""": """1110212019""", """start""": 23, """end""": 23},
]
]
* 2 , )
snake_case__ : Tuple = list(zip(*apply_tesseract(load_image(lowerCamelCase ) , lowerCamelCase , """""" ) ) )
# This model should also work if `image` is set to None
snake_case__ : Dict = dqa_pipeline({"""image""": None, """word_boxes""": word_boxes, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase , decimals=4 ) , [
{"""score""": 0.4_251, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.0_819, """answer""": """1110212019""", """start""": 23, """end""": 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def __lowerCAmelCase ( self : int )-> str:
snake_case__ : Dict = AutoTokenizer.from_pretrained(
"""impira/layoutlm-document-qa""" , revision="""3dc6de3""" , add_prefix_space=lowerCamelCase )
snake_case__ : List[Any] = pipeline(
"""document-question-answering""" , model="""impira/layoutlm-document-qa""" , tokenizer=lowerCamelCase , revision="""3dc6de3""" , max_seq_len=50 , )
snake_case__ : Any = INVOICE_URL
snake_case__ : List[Any] = """What is the invoice number?"""
snake_case__ : int = dqa_pipeline(image=lowerCamelCase , question=lowerCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase , decimals=4 ) , [
{"""score""": 0.9_999, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.9_998, """answer""": """us-001""", """start""": 16, """end""": 16},
] , )
snake_case__ : str = dqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase , decimals=4 ) , [
[
{"""score""": 0.9_999, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.9_998, """answer""": """us-001""", """start""": 16, """end""": 16},
]
]
* 2 , )
snake_case__ : Dict = list(zip(*apply_tesseract(load_image(lowerCamelCase ) , lowerCamelCase , """""" ) ) )
# This model should also work if `image` is set to None
snake_case__ : Optional[Any] = dqa_pipeline({"""image""": None, """word_boxes""": word_boxes, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase , decimals=4 ) , [
{"""score""": 0.9_999, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.9_998, """answer""": """us-001""", """start""": 16, """end""": 16},
] , )
@slow
@require_torch
def __lowerCAmelCase ( self : int )-> Tuple:
snake_case__ : Optional[int] = pipeline(
"""document-question-answering""" , model="""naver-clova-ix/donut-base-finetuned-docvqa""" , tokenizer=AutoTokenizer.from_pretrained("""naver-clova-ix/donut-base-finetuned-docvqa""" ) , feature_extractor="""naver-clova-ix/donut-base-finetuned-docvqa""" , )
snake_case__ : str = INVOICE_URL
snake_case__ : Tuple = """What is the invoice number?"""
snake_case__ : Tuple = dqa_pipeline(image=lowerCamelCase , question=lowerCamelCase , top_k=2 )
self.assertEqual(nested_simplify(lowerCamelCase , decimals=4 ) , [{"""answer""": """us-001"""}] )
@require_tf
@unittest.skip("""Document question answering not implemented in TF""" )
def __lowerCAmelCase ( self : int )-> List[Any]:
pass
| 172 | 1 |
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
UpperCAmelCase_ = get_tests_dir("fixtures")
UpperCAmelCase_ = get_tests_dir("fixtures/dummy_feature_extractor_config.json")
UpperCAmelCase_ = get_tests_dir("fixtures/dummy-config.json")
class __UpperCamelCase ( unittest.TestCase ):
def UpperCamelCase( self ):
_UpperCAmelCase = 0
def UpperCamelCase( self ):
_UpperCAmelCase = AutoFeatureExtractor.from_pretrained('''facebook/wav2vec2-base-960h''' )
self.assertIsInstance(_UpperCamelCase , _UpperCamelCase )
def UpperCamelCase( self ):
_UpperCAmelCase = AutoFeatureExtractor.from_pretrained(_UpperCamelCase )
self.assertIsInstance(_UpperCamelCase , _UpperCamelCase )
def UpperCamelCase( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
_UpperCAmelCase = AutoFeatureExtractor.from_pretrained(_UpperCamelCase ).to_dict()
config_dict.pop('''feature_extractor_type''' )
_UpperCAmelCase = WavaVecaFeatureExtractor(**_UpperCamelCase )
# save in new folder
model_config.save_pretrained(_UpperCamelCase )
config.save_pretrained(_UpperCamelCase )
_UpperCAmelCase = AutoFeatureExtractor.from_pretrained(_UpperCamelCase )
# make sure private variable is not incorrectly saved
_UpperCAmelCase = json.loads(config.to_json_string() )
self.assertTrue('''_processor_class''' not in dict_as_saved )
self.assertIsInstance(_UpperCamelCase , _UpperCamelCase )
def UpperCamelCase( self ):
_UpperCAmelCase = AutoFeatureExtractor.from_pretrained(_UpperCamelCase )
self.assertIsInstance(_UpperCamelCase , _UpperCamelCase )
def UpperCamelCase( self ):
with self.assertRaisesRegex(
_UpperCamelCase , '''bert-base is not a local folder and is not a valid model identifier''' ):
_UpperCAmelCase = AutoFeatureExtractor.from_pretrained('''bert-base''' )
def UpperCamelCase( self ):
with self.assertRaisesRegex(
_UpperCamelCase , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
_UpperCAmelCase = AutoFeatureExtractor.from_pretrained(_UpperCamelCase , revision='''aaaaaa''' )
def UpperCamelCase( self ):
with self.assertRaisesRegex(
_UpperCamelCase , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ):
_UpperCAmelCase = AutoFeatureExtractor.from_pretrained('''hf-internal-testing/config-no-model''' )
def UpperCamelCase( self ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(_UpperCamelCase ):
_UpperCAmelCase = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_UpperCamelCase ):
_UpperCAmelCase = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' , trust_remote_code=_UpperCamelCase )
_UpperCAmelCase = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' , trust_remote_code=_UpperCamelCase )
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(_UpperCamelCase )
_UpperCAmelCase = AutoFeatureExtractor.from_pretrained(_UpperCamelCase , trust_remote_code=_UpperCamelCase )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
def UpperCamelCase( self ):
try:
AutoConfig.register('''custom''' , _UpperCamelCase )
AutoFeatureExtractor.register(_UpperCamelCase , _UpperCamelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_UpperCamelCase ):
AutoFeatureExtractor.register(_UpperCamelCase , _UpperCamelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
_UpperCAmelCase = CustomFeatureExtractor.from_pretrained(_UpperCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(_UpperCamelCase )
_UpperCAmelCase = AutoFeatureExtractor.from_pretrained(_UpperCamelCase )
self.assertIsInstance(_UpperCamelCase , _UpperCamelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def UpperCamelCase( self ):
class __UpperCamelCase ( A__ ):
__A : Dict = True
try:
AutoConfig.register('''custom''' , _UpperCamelCase )
AutoFeatureExtractor.register(_UpperCamelCase , _UpperCamelCase )
# If remote code is not set, the default is to use local
_UpperCAmelCase = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' )
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
_UpperCAmelCase = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' , trust_remote_code=_UpperCamelCase )
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
_UpperCAmelCase = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' , trust_remote_code=_UpperCamelCase )
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
self.assertTrue(not hasattr(_UpperCamelCase , '''is_local''' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig] | 32 |
"""simple docstring"""
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class a ( UpperCAmelCase__ ):
UpperCamelCase : Any = 'Speech2TextFeatureExtractor'
UpperCamelCase : Optional[Any] = 'Speech2TextTokenizer'
def __init__( self : Optional[Any] , lowerCAmelCase : List[str] , lowerCAmelCase : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] =self.feature_extractor
SCREAMING_SNAKE_CASE_: List[Any] =False
def __call__( self : Dict , *lowerCAmelCase : str , **lowerCAmelCase : str ) -> str:
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*lowerCAmelCase , **lowerCAmelCase )
if "raw_speech" in kwargs:
warnings.warn("""Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.""" )
SCREAMING_SNAKE_CASE_: Tuple =kwargs.pop("""raw_speech""" )
else:
SCREAMING_SNAKE_CASE_: int =kwargs.pop("""audio""" , lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] =kwargs.pop("""sampling_rate""" , lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Tuple =kwargs.pop("""text""" , lowerCAmelCase )
if len(lowerCAmelCase ) > 0:
SCREAMING_SNAKE_CASE_: List[str] =args[0]
SCREAMING_SNAKE_CASE_: List[str] =args[1:]
if audio is None and text is None:
raise ValueError("""You need to specify either an `audio` or `text` input to process.""" )
if audio is not None:
SCREAMING_SNAKE_CASE_: Optional[int] =self.feature_extractor(lowerCAmelCase , *lowerCAmelCase , sampling_rate=lowerCAmelCase , **lowerCAmelCase )
if text is not None:
SCREAMING_SNAKE_CASE_: Union[str, Any] =self.tokenizer(lowerCAmelCase , **lowerCAmelCase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
SCREAMING_SNAKE_CASE_: Any =encodings["""input_ids"""]
return inputs
def lowerCamelCase__ ( self : Any , *lowerCAmelCase : Any , **lowerCAmelCase : Tuple ) -> Any:
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCAmelCase , **lowerCAmelCase )
def lowerCamelCase__ ( self : Tuple , *lowerCAmelCase : Any , **lowerCAmelCase : int ) -> Any:
'''simple docstring'''
return self.tokenizer.decode(*lowerCAmelCase , **lowerCAmelCase )
@contextmanager
def lowerCamelCase__ ( self : Tuple ) -> Tuple:
'''simple docstring'''
warnings.warn(
"""`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """
"""labels by using the argument `text` of the regular `__call__` method (either in the same call as """
"""your audio inputs, or in a separate call.""" )
SCREAMING_SNAKE_CASE_: Optional[Any] =True
SCREAMING_SNAKE_CASE_: Dict =self.tokenizer
yield
SCREAMING_SNAKE_CASE_: int =self.feature_extractor
SCREAMING_SNAKE_CASE_: str =False
| 409 | 0 |
import jax.numpy as jnp
from ...utils import logging
from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel
from .configuration_mta import MTaConfig
lowercase = logging.get_logger(__name__)
lowercase = '''T5Config'''
def __lowerCAmelCase ( UpperCAmelCase__ : jnp.array , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> jnp.ndarray:
lowerCamelCase_ = jnp.zeros_like(a_ )
lowerCamelCase_ = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] )
lowerCamelCase_ = shifted_input_ids.at[:, 0].set(a_ )
lowerCamelCase_ = jnp.where(shifted_input_ids == -1_0_0 , a_ , a_ )
return shifted_input_ids
class __A( _UpperCamelCase ):
SCREAMING_SNAKE_CASE = '''mt5'''
SCREAMING_SNAKE_CASE = MTaConfig
class __A( _UpperCamelCase ):
SCREAMING_SNAKE_CASE = '''mt5'''
SCREAMING_SNAKE_CASE = MTaConfig
class __A( _UpperCamelCase ):
SCREAMING_SNAKE_CASE = '''mt5'''
SCREAMING_SNAKE_CASE = MTaConfig
| 713 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class __A( unittest.TestCase ):
def lowercase__ ( self : Optional[Any] ):
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
lowerCamelCase_ = [[1, 2, 4], [1, 2, 3, 4]]
lowerCamelCase_ = DisjunctiveConstraint(__UpperCamelCase )
self.assertTrue(isinstance(dc.token_ids , __UpperCamelCase ) )
with self.assertRaises(__UpperCamelCase ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(__UpperCamelCase ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def lowercase__ ( self : List[Any] ):
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
lowerCamelCase_ = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(__UpperCamelCase ):
DisjunctiveConstraint(__UpperCamelCase ) # fails here
def lowercase__ ( self : Union[str, Any] ):
lowerCamelCase_ = [[1, 2, 3], [1, 2, 4]]
lowerCamelCase_ = DisjunctiveConstraint(__UpperCamelCase )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = dc.update(1 )
lowerCamelCase_ = stepped is True and completed is False and reset is False
self.assertTrue(__UpperCamelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = dc.update(2 )
lowerCamelCase_ = stepped is True and completed is False and reset is False
self.assertTrue(__UpperCamelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = dc.update(3 )
lowerCamelCase_ = stepped is True and completed is True and reset is False
self.assertTrue(__UpperCamelCase )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def lowercase__ ( self : Tuple ):
lowerCamelCase_ = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
lowerCamelCase_ = DisjunctiveConstraint(__UpperCamelCase )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 103 | 0 |
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
_snake_case = subprocess.check_output("git merge-base main HEAD".split()).decode("utf-8")
_snake_case = (
subprocess.check_output(f'''git diff --diff-filter=d --name-only {fork_point_sha}'''.split()).decode("utf-8").split()
)
_snake_case = "|".join(sys.argv[1:])
_snake_case = re.compile(Rf'''^({joined_dirs}).*?\.py$''')
_snake_case = [x for x in modified_files if regex.match(x)]
print(" ".join(relevant_modified_files), end="")
| 500 |
def A ( _lowerCamelCase ):
'''simple docstring'''
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = F"Input value of [number={number}] must be an integer"
raise TypeError(_lowerCamelCase )
if number < 1:
_lowerCAmelCase : Tuple = F"Input value of [number={number}] must be > 0"
raise ValueError(_lowerCamelCase )
_lowerCAmelCase : Dict = 1
for i in range(1 , _lowerCamelCase ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 500 | 1 |
"""simple docstring"""
class a :
"""simple docstring"""
def __init__( self: Any , UpperCamelCase: list ):
"""simple docstring"""
A__ = set_counts
A__ = max(UpperCamelCase )
A__ = len(UpperCamelCase )
A__ = [1] * num_sets
A__ = list(range(UpperCamelCase ) )
def UpperCamelCase ( self: Any , UpperCamelCase: int , UpperCamelCase: int ):
"""simple docstring"""
A__ = self.get_parent(UpperCamelCase )
A__ = self.get_parent(UpperCamelCase )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
A__ = 0
A__ = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
A__ = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
A__ = 0
A__ = src_parent
A__ = self.set_counts[src_parent]
A__ = max(self.max_set , UpperCamelCase )
return True
def UpperCamelCase ( self: Tuple , UpperCamelCase: int ):
"""simple docstring"""
if self.parents[disj_set] == disj_set:
return disj_set
A__ = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 500 |
"""simple docstring"""
from __future__ import annotations
class a :
"""simple docstring"""
def __init__( self: Any , UpperCamelCase: str , UpperCamelCase: str ):
"""simple docstring"""
A__ , A__ = text, pattern
A__ , A__ = len(UpperCamelCase ), len(UpperCamelCase )
def UpperCamelCase ( self: Dict , UpperCamelCase: str ):
"""simple docstring"""
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def UpperCamelCase ( self: str , UpperCamelCase: int ):
"""simple docstring"""
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def UpperCamelCase ( self: List[Any] ):
"""simple docstring"""
A__ = []
for i in range(self.textLen - self.patLen + 1 ):
A__ = self.mismatch_in_text(UpperCamelCase )
if mismatch_index == -1:
positions.append(UpperCamelCase )
else:
A__ = self.match_in_pattern(self.text[mismatch_index] )
A__ = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
SCREAMING_SNAKE_CASE_ : List[Any] = 'ABAABA'
SCREAMING_SNAKE_CASE_ : List[Any] = 'AB'
SCREAMING_SNAKE_CASE_ : Union[str, Any] = BoyerMooreSearch(text, pattern)
SCREAMING_SNAKE_CASE_ : int = bms.bad_character_heuristic()
if len(positions) == 0:
print('No match found')
else:
print('Pattern found in following positions: ')
print(positions)
| 500 | 1 |
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def UpperCAmelCase_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
# Load configuration defined in the metadata file
with open(_UpperCAmelCase ) as metadata_file:
lowerCamelCase_: Optional[int] = json.load(_UpperCAmelCase )
lowerCamelCase_: List[Any] = LukeConfig(use_entity_aware_attention=_UpperCAmelCase , **metadata["""model_config"""] )
# Load in the weights from the checkpoint_path
lowerCamelCase_: List[str] = torch.load(_UpperCAmelCase , map_location="""cpu""" )
# Load the entity vocab file
lowerCamelCase_: Union[str, Any] = load_entity_vocab(_UpperCAmelCase )
lowerCamelCase_: str = RobertaTokenizer.from_pretrained(metadata["""model_config"""]["""bert_model_name"""] )
# Add special tokens to the token vocabulary for downstream tasks
lowerCamelCase_: Dict = AddedToken("""<ent>""" , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase )
lowerCamelCase_: Tuple = AddedToken("""<ent2>""" , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase )
tokenizer.add_special_tokens({"""additional_special_tokens""": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f"""Saving tokenizer to {pytorch_dump_folder_path}""" )
tokenizer.save_pretrained(_UpperCAmelCase )
with open(os.path.join(_UpperCAmelCase , LukeTokenizer.vocab_files_names["""entity_vocab_file"""] ) , """w""" ) as f:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
lowerCamelCase_: Optional[int] = LukeTokenizer.from_pretrained(_UpperCAmelCase )
# Initialize the embeddings of the special tokens
lowerCamelCase_: Union[str, Any] = state_dict["""embeddings.word_embeddings.weight"""]
lowerCamelCase_: Tuple = word_emb[tokenizer.convert_tokens_to_ids(["""@"""] )[0]].unsqueeze(0 )
lowerCamelCase_: Dict = word_emb[tokenizer.convert_tokens_to_ids(["""#"""] )[0]].unsqueeze(0 )
lowerCamelCase_: Dict = torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
lowerCamelCase_: Any = f"""encoder.layer.{layer_index}.attention.self."""
lowerCamelCase_: Tuple = state_dict[prefix + matrix_name]
lowerCamelCase_: int = state_dict[prefix + matrix_name]
lowerCamelCase_: Optional[Any] = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
lowerCamelCase_: Union[str, Any] = state_dict["""entity_embeddings.entity_embeddings.weight"""]
lowerCamelCase_: int = entity_emb[entity_vocab["""[MASK]"""]]
lowerCamelCase_: List[str] = LukeModel(config=_UpperCAmelCase ).eval()
lowerCamelCase_ , lowerCamelCase_: List[Any] = model.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase )
if not (len(_UpperCAmelCase ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(f"""Missing keys {', '.join(_UpperCAmelCase )}. Expected only missing embeddings.position_ids""" )
if not (all(key.startswith("""entity_predictions""" ) or key.startswith("""lm_head""" ) for key in unexpected_keys )):
raise ValueError(
"""Unexpected keys"""
f""" {', '.join([key for key in unexpected_keys if not (key.startswith('entity_predictions' ) or key.startswith('lm_head' ))] )}""" )
# Check outputs
lowerCamelCase_: Union[str, Any] = LukeTokenizer.from_pretrained(_UpperCAmelCase , task="""entity_classification""" )
lowerCamelCase_: str = (
"""Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the"""
""" new world number one avoid a humiliating second- round exit at Wimbledon ."""
)
lowerCamelCase_: str = (3_9, 4_2)
lowerCamelCase_: int = tokenizer(_UpperCAmelCase , entity_spans=[span] , add_prefix_space=_UpperCAmelCase , return_tensors="""pt""" )
lowerCamelCase_: Any = model(**_UpperCAmelCase )
# Verify word hidden states
if model_size == "large":
lowerCamelCase_: str = torch.Size((1, 4_2, 1_0_2_4) )
lowerCamelCase_: Any = torch.tensor(
[[0.0_1_3_3, 0.0_8_6_5, 0.0_0_9_5], [0.3_0_9_3, -0.2_5_7_6, -0.7_4_1_8], [-0.1_7_2_0, -0.2_1_1_7, -0.2_8_6_9]] )
else: # base
lowerCamelCase_: int = torch.Size((1, 4_2, 7_6_8) )
lowerCamelCase_: List[Any] = torch.tensor([[0.0_0_3_7, 0.1_3_6_8, -0.0_0_9_1], [0.1_0_9_9, 0.3_3_2_9, -0.1_0_9_5], [0.0_7_6_5, 0.5_3_3_5, 0.1_1_7_9]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f"""Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}""" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , _UpperCAmelCase , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
lowerCamelCase_: str = torch.Size((1, 1, 1_0_2_4) )
lowerCamelCase_: Tuple = torch.tensor([[0.0_4_6_6, -0.0_1_0_6, -0.0_1_7_9]] )
else: # base
lowerCamelCase_: List[Any] = torch.Size((1, 1, 7_6_8) )
lowerCamelCase_: int = torch.tensor([[0.1_4_5_7, 0.1_0_4_4, 0.0_1_7_4]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
f"""Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"""
f""" {expected_shape}""" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , _UpperCAmelCase , atol=1E-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print("""Saving PyTorch model to {}""".format(_UpperCAmelCase ) )
model.save_pretrained(_UpperCAmelCase )
def UpperCAmelCase_ ( _UpperCAmelCase ):
lowerCamelCase_: int = {}
with open(_UpperCAmelCase , """r""" , encoding="""utf-8""" ) as f:
for index, line in enumerate(_UpperCAmelCase ):
lowerCamelCase_ , lowerCamelCase_: str = line.rstrip().split("""\t""" )
lowerCamelCase_: Optional[Any] = index
return entity_vocab
if __name__ == "__main__":
lowercase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--checkpoint_path""", type=str, help="""Path to a pytorch_model.bin file.""")
parser.add_argument(
"""--metadata_path""", default=None, type=str, help="""Path to a metadata.json file, defining the configuration."""
)
parser.add_argument(
"""--entity_vocab_path""",
default=None,
type=str,
help="""Path to an entity_vocab.tsv file, containing the entity vocabulary.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to where to dump the output PyTorch model."""
)
parser.add_argument(
"""--model_size""", default="""base""", type=str, choices=["""base""", """large"""], help="""Size of the model to be converted."""
)
lowercase : Dict = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 423 | def UpperCAmelCase_ ( _UpperCAmelCase ):
lowerCamelCase_: Any = current_set.copy()
for row_index, row in enumerate(_UpperCAmelCase ):
lowerCamelCase_: Optional[Any] = row[0]
for column_index, column in enumerate(_UpperCAmelCase ):
if magnitude == 0:
lowerCamelCase_: Union[str, Any] = column
continue
lowerCamelCase_: Any = column / magnitude
# Subtract to cancel term
lowerCamelCase_: str = current_set[0]
lowerCamelCase_: Union[str, Any] = [first_row]
lowerCamelCase_: Optional[int] = current_set[1::]
for row in current_set:
lowerCamelCase_: List[Any] = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(_UpperCAmelCase )
continue
for column_index in range(len(_UpperCAmelCase ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(_UpperCAmelCase )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
lowerCamelCase_: Dict = final_set[0]
lowerCamelCase_: List[str] = []
lowerCamelCase_: Union[str, Any] = []
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
lowerCamelCase_: Any = simplify(_UpperCAmelCase )
for i in range(len(_UpperCAmelCase ) ):
resultant[i].insert(0 , current_first_column[i] )
resultant.insert(0 , _UpperCAmelCase )
lowerCamelCase_: Dict = resultant
return final_set
def UpperCAmelCase_ ( _UpperCAmelCase ):
if len(_UpperCAmelCase ) == 0:
raise IndexError("""solve_simultaneous() requires n lists of length n+1""" )
lowerCamelCase_: Optional[Any] = len(_UpperCAmelCase ) + 1
if any(len(_UpperCAmelCase ) != _length for item in equations ):
raise IndexError("""solve_simultaneous() requires n lists of length n+1""" )
for row in equations:
if any(not isinstance(_UpperCAmelCase , (int, float) ) for column in row ):
raise ValueError("""solve_simultaneous() requires lists of integers""" )
if len(_UpperCAmelCase ) == 1:
return [equations[0][-1] / equations[0][0]]
lowerCamelCase_: Tuple = equations.copy()
if any(0 in row for row in data_set ):
lowerCamelCase_: Tuple = data_set.copy()
lowerCamelCase_: Tuple = []
for row_index, row in enumerate(_UpperCAmelCase ):
if 0 not in row:
lowerCamelCase_: Optional[int] = data_set.pop(_UpperCAmelCase )
break
if not full_row:
raise ValueError("""solve_simultaneous() requires at least 1 full equation""" )
data_set.insert(0 , _UpperCAmelCase )
lowerCamelCase_: List[Any] = data_set.copy()
lowerCamelCase_: str = simplify(_UpperCAmelCase )
lowerCamelCase_: Union[str, Any] = simplified[::-1]
lowerCamelCase_: list = []
for row in simplified:
lowerCamelCase_: Any = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
lowerCamelCase_: List[str] = row.copy()[: len(_UpperCAmelCase ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(_UpperCAmelCase ) == 0:
solutions.append(0 )
continue
lowerCamelCase_: Optional[int] = temp_row[1::]
lowerCamelCase_: Optional[Any] = temp_row[::-1]
for column_index, column in enumerate(_UpperCAmelCase ):
current_solution -= column * solutions[column_index]
solutions.append(_UpperCAmelCase )
lowerCamelCase_: List[Any] = []
for item in solutions:
final.append(float(round(_UpperCAmelCase , 5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase : str = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 423 | 1 |
from __future__ import annotations
__A = 1.6_0_2_1e-1_9 # units = C
def lowerCamelCase_ ( UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : float , ) -> tuple[str, float]:
"""simple docstring"""
if (conductivity, electron_conc, mobility).count(0 ) != 1:
raise ValueError('You cannot supply more or less than 2 values' )
elif conductivity < 0:
raise ValueError('Conductivity cannot be negative' )
elif electron_conc < 0:
raise ValueError('Electron concentration cannot be negative' )
elif mobility < 0:
raise ValueError('mobility cannot be negative' )
elif conductivity == 0:
return (
"conductivity",
mobility * electron_conc * ELECTRON_CHARGE,
)
elif electron_conc == 0:
return (
"electron_conc",
conductivity / (mobility * ELECTRON_CHARGE),
)
else:
return (
"mobility",
conductivity / (electron_conc * ELECTRON_CHARGE),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 167 |
import requests
__A = "" # <-- Put your OpenWeatherMap appid here!
__A = "https://api.openweathermap.org/data/2.5/"
def lowerCamelCase_ ( UpperCamelCase__ : str = "Chicago" , UpperCamelCase__ : str = APPID ) -> dict:
"""simple docstring"""
return requests.get(URL_BASE + 'weather' , params=locals() ).json()
def lowerCamelCase_ ( UpperCamelCase__ : str = "Kolkata, India" , UpperCamelCase__ : str = APPID ) -> dict:
"""simple docstring"""
return requests.get(URL_BASE + 'forecast' , params=locals() ).json()
def lowerCamelCase_ ( UpperCamelCase__ : float = 55.68 , UpperCamelCase__ : float = 12.57 , UpperCamelCase__ : str = APPID ) -> dict:
"""simple docstring"""
return requests.get(URL_BASE + 'onecall' , params=locals() ).json()
if __name__ == "__main__":
from pprint import pprint
while True:
__A = input("Enter a location:").strip()
if location:
pprint(current_weather(location))
else:
break
| 167 | 1 |
"""simple docstring"""
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def lowercase__ ( lowerCamelCase ):
if not is_accelerate_available():
return method
_SCREAMING_SNAKE_CASE : Optional[int] = version.parse(accelerate.__version__ ).base_version
if version.parse(lowerCamelCase ) < version.parse('0.17.0' ):
return method
def wrapper(self, *lowerCamelCase, **lowerCamelCase ):
if hasattr(self, '_hf_hook' ) and hasattr(self._hf_hook, 'pre_forward' ):
self._hf_hook.pre_forward(self )
return method(self, *lowerCamelCase, **lowerCamelCase )
return wrapper
| 621 |
"""simple docstring"""
import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
lowerCAmelCase__ = threading.Lock()
lowerCAmelCase__ = None
lowerCAmelCase__ = {
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL,
}
lowerCAmelCase__ = logging.WARNING
lowerCAmelCase__ = True
def lowercase__ ( ):
_SCREAMING_SNAKE_CASE : int = os.getenv('TRANSFORMERS_VERBOSITY', lowerCamelCase )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
f"""Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, """
f"""has to be one of: { ", ".join(log_levels.keys() ) }""" )
return _default_log_level
def lowercase__ ( ):
return __name__.split('.' )[0]
def lowercase__ ( ):
return logging.getLogger(_get_library_name() )
def lowercase__ ( ):
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
_SCREAMING_SNAKE_CASE : Optional[int] = logging.StreamHandler() # Set sys.stderr as stream.
_SCREAMING_SNAKE_CASE : Tuple = sys.stderr.flush
# Apply our default configuration to the library root logger.
_SCREAMING_SNAKE_CASE : Union[str, Any] = _get_library_root_logger()
library_root_logger.addHandler(_default_handler )
library_root_logger.setLevel(_get_default_logging_level() )
_SCREAMING_SNAKE_CASE : int = False
def lowercase__ ( ):
global _default_handler
with _lock:
if not _default_handler:
return
_SCREAMING_SNAKE_CASE : Any = _get_library_root_logger()
library_root_logger.removeHandler(_default_handler )
library_root_logger.setLevel(logging.NOTSET )
_SCREAMING_SNAKE_CASE : Optional[Any] = None
def lowercase__ ( ):
return log_levels
def lowercase__ ( lowerCamelCase = None ):
if name is None:
_SCREAMING_SNAKE_CASE : Tuple = _get_library_name()
_configure_library_root_logger()
return logging.getLogger(lowerCamelCase )
def lowercase__ ( ):
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def lowercase__ ( lowerCamelCase ):
_configure_library_root_logger()
_get_library_root_logger().setLevel(lowerCamelCase )
def lowercase__ ( ):
return set_verbosity(lowerCamelCase )
def lowercase__ ( ):
return set_verbosity(lowerCamelCase )
def lowercase__ ( ):
return set_verbosity(lowerCamelCase )
def lowercase__ ( ):
return set_verbosity(lowerCamelCase )
def lowercase__ ( ):
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler )
def lowercase__ ( ):
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler )
def lowercase__ ( lowerCamelCase ):
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(lowerCamelCase )
def lowercase__ ( lowerCamelCase ):
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(lowerCamelCase )
def lowercase__ ( ):
_configure_library_root_logger()
_SCREAMING_SNAKE_CASE : List[str] = False
def lowercase__ ( ):
_configure_library_root_logger()
_SCREAMING_SNAKE_CASE : int = True
def lowercase__ ( ):
_SCREAMING_SNAKE_CASE : Any = _get_library_root_logger().handlers
for handler in handlers:
_SCREAMING_SNAKE_CASE : Optional[int] = logging.Formatter('[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s' )
handler.setFormatter(lowerCamelCase )
def lowercase__ ( ):
_SCREAMING_SNAKE_CASE : List[str] = _get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(lowerCamelCase )
def lowercase__ ( self, *lowerCamelCase, **lowerCamelCase ):
_SCREAMING_SNAKE_CASE : str = os.getenv('TRANSFORMERS_NO_ADVISORY_WARNINGS', lowerCamelCase )
if no_advisory_warnings:
return
self.warning(*lowerCamelCase, **lowerCamelCase )
lowerCAmelCase__ = warning_advice
@functools.lru_cache(lowerCamelCase )
def lowercase__ ( self, *lowerCamelCase, **lowerCamelCase ):
self.warning(*lowerCamelCase, **lowerCamelCase )
lowerCAmelCase__ = warning_once
class _lowerCAmelCase :
def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> Optional[int]: # pylint: disable=unused-argument
_SCREAMING_SNAKE_CASE : Tuple = args[0] if args else None
def __iter__( self ) -> Dict:
return iter(self._iterator )
def __getattr__( self , lowerCAmelCase_ ) -> Tuple:
def empty_fn(*lowerCAmelCase_ , **lowerCAmelCase_ ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self ) -> List[Any]:
return self
def __exit__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[Any]:
return
class _lowerCAmelCase :
def __call__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> Optional[Any]:
if _tqdm_active:
return tqdm_lib.tqdm(*lowerCAmelCase_ , **lowerCAmelCase_ )
else:
return EmptyTqdm(*lowerCAmelCase_ , **lowerCAmelCase_ )
def A ( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> List[str]:
_SCREAMING_SNAKE_CASE : Optional[int] = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*lowerCAmelCase_ , **lowerCAmelCase_ )
def A ( self ) -> Any:
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
lowerCAmelCase__ = _tqdm_cls()
def lowercase__ ( ):
global _tqdm_active
return bool(_tqdm_active )
def lowercase__ ( ):
global _tqdm_active
_SCREAMING_SNAKE_CASE : int = True
hf_hub_utils.enable_progress_bars()
def lowercase__ ( ):
global _tqdm_active
_SCREAMING_SNAKE_CASE : Optional[Any] = False
hf_hub_utils.disable_progress_bars()
| 621 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowercase = {
'configuration_megatron_bert': ['MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MegatronBertConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MegatronBertForCausalLM',
'MegatronBertForMaskedLM',
'MegatronBertForMultipleChoice',
'MegatronBertForNextSentencePrediction',
'MegatronBertForPreTraining',
'MegatronBertForQuestionAnswering',
'MegatronBertForSequenceClassification',
'MegatronBertForTokenClassification',
'MegatronBertModel',
'MegatronBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
__lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 719 |
"""simple docstring"""
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
__lowercase = logging.get_logger(__name__)
@add_end_docstrings(
_a ,r"""
top_k (`int`, defaults to 5):
The number of predictions to return.
targets (`str` or `List[str]`, *optional*):
When passed, the model will limit the scores to the passed targets instead of looking up in the whole
vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting
token will be used (with a warning, and that might be slower).
""" ,)
class _A ( _a ):
"""simple docstring"""
def __snake_case ( self : Optional[int] , __UpperCAmelCase : GenericTensor):
if self.framework == "tf":
a : Dict = tf.where(input_ids == self.tokenizer.mask_token_id).numpy()
elif self.framework == "pt":
a : str = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=__UpperCAmelCase)
else:
raise ValueError("Unsupported framework")
return masked_index
def __snake_case ( self : Optional[int] , __UpperCAmelCase : GenericTensor):
a : Union[str, Any] = self.get_masked_index(__UpperCAmelCase)
a : Dict = np.prod(masked_index.shape)
if numel < 1:
raise PipelineException(
"fill-mask" , self.model.base_model_prefix , f'''No mask_token ({self.tokenizer.mask_token}) found on the input''' , )
def __snake_case ( self : List[str] , __UpperCAmelCase : GenericTensor):
if isinstance(__UpperCAmelCase , __UpperCAmelCase):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input["input_ids"][0])
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(__UpperCAmelCase)
def __snake_case ( self : Optional[Any] , __UpperCAmelCase : int , __UpperCAmelCase : Optional[Any]=None , **__UpperCAmelCase : Optional[int]):
if return_tensors is None:
a : Optional[int] = self.framework
a : int = self.tokenizer(__UpperCAmelCase , return_tensors=__UpperCAmelCase)
self.ensure_exactly_one_mask_token(__UpperCAmelCase)
return model_inputs
def __snake_case ( self : Optional[Any] , __UpperCAmelCase : Tuple):
a : Dict = self.model(**__UpperCAmelCase)
a : Optional[int] = model_inputs["input_ids"]
return model_outputs
def __snake_case ( self : Optional[Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : int=5 , __UpperCAmelCase : int=None):
# Cap top_k if there are targets
if target_ids is not None and target_ids.shape[0] < top_k:
a : Optional[int] = target_ids.shape[0]
a : Dict = model_outputs["input_ids"][0]
a : int = model_outputs["logits"]
if self.framework == "tf":
a : Union[str, Any] = tf.where(input_ids == self.tokenizer.mask_token_id).numpy()[:, 0]
a : Tuple = outputs.numpy()
a : Optional[Any] = outputs[0, masked_index, :]
a : int = stable_softmax(__UpperCAmelCase , axis=-1)
if target_ids is not None:
a : List[str] = tf.gather_nd(tf.squeeze(__UpperCAmelCase , 0) , target_ids.reshape(-1 , 1))
a : Union[str, Any] = tf.expand_dims(__UpperCAmelCase , 0)
a : List[Any] = tf.math.top_k(__UpperCAmelCase , k=__UpperCAmelCase)
a , a : Optional[Any] = topk.values.numpy(), topk.indices.numpy()
else:
a : Any = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=__UpperCAmelCase).squeeze(-1)
# Fill mask pipeline supports only one ${mask_token} per sample
a : List[str] = outputs[0, masked_index, :]
a : int = logits.softmax(dim=-1)
if target_ids is not None:
a : Any = probs[..., target_ids]
a , a : Union[str, Any] = probs.topk(__UpperCAmelCase)
a : Optional[int] = []
a : List[Any] = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist())):
a : Dict = []
for v, p in zip(_values , _predictions):
# Copy is important since we're going to modify this array in place
a : List[str] = input_ids.numpy().copy()
if target_ids is not None:
a : List[Any] = target_ids[p].tolist()
a : Dict = p
# Filter padding out:
a : List[Any] = tokens[np.where(tokens != self.tokenizer.pad_token_id)]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
a : List[Any] = self.tokenizer.decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase)
a : Dict = {"score": v, "token": p, "token_str": self.tokenizer.decode([p]), "sequence": sequence}
row.append(__UpperCAmelCase)
result.append(__UpperCAmelCase)
if single_mask:
return result[0]
return result
def __snake_case ( self : str , __UpperCAmelCase : Dict , __UpperCAmelCase : Any=None):
if isinstance(__UpperCAmelCase , __UpperCAmelCase):
a : Optional[int] = [targets]
try:
a : Union[str, Any] = self.tokenizer.get_vocab()
except Exception:
a : Any = {}
a : str = []
for target in targets:
a : str = vocab.get(__UpperCAmelCase , __UpperCAmelCase)
if id_ is None:
a : Any = self.tokenizer(
__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase , max_length=1 , truncation=__UpperCAmelCase , )["input_ids"]
if len(__UpperCAmelCase) == 0:
logger.warning(
f'''The specified target token `{target}` does not exist in the model vocabulary. '''
"We cannot replace it with anything meaningful, ignoring it")
continue
a : Tuple = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
f'''The specified target token `{target}` does not exist in the model vocabulary. '''
f'''Replacing with `{self.tokenizer.convert_ids_to_tokens(id_)}`.''')
target_ids.append(id_)
a : Union[str, Any] = list(set(__UpperCAmelCase))
if len(__UpperCAmelCase) == 0:
raise ValueError("At least one target must be provided when passed.")
a : Union[str, Any] = np.array(__UpperCAmelCase)
return target_ids
def __snake_case ( self : List[str] , __UpperCAmelCase : Tuple=None , __UpperCAmelCase : int=None):
a : int = {}
if targets is not None:
a : List[Any] = self.get_target_ids(__UpperCAmelCase , __UpperCAmelCase)
a : Optional[int] = target_ids
if top_k is not None:
a : Any = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
"fill-mask" , self.model.base_model_prefix , "The tokenizer does not define a `mask_token`.")
return {}, {}, postprocess_params
def __call__( self : Optional[int] , __UpperCAmelCase : int , *__UpperCAmelCase : Dict , **__UpperCAmelCase : int):
a : str = super().__call__(__UpperCAmelCase , **__UpperCAmelCase)
if isinstance(__UpperCAmelCase , __UpperCAmelCase) and len(__UpperCAmelCase) == 1:
return outputs[0]
return outputs
| 135 | 0 |
def __lowerCamelCase ( lowerCamelCase__ : int = 10 , lowerCamelCase__ : int = 1000 , lowerCamelCase__ : bool = True ):
'''simple docstring'''
assert (
isinstance(__lowerCamelCase , __lowerCamelCase )
and isinstance(__lowerCamelCase , __lowerCamelCase )
and isinstance(__lowerCamelCase , __lowerCamelCase )
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError("""Invalid value for min_val or max_val (min_value < max_value)""" )
return min_val if option else max_val
def __lowerCamelCase ( lowerCamelCase__ : int , lowerCamelCase__ : int ):
'''simple docstring'''
return int((number_a + number_a) / 2 )
def __lowerCamelCase ( lowerCamelCase__ : int , lowerCamelCase__ : int , lowerCamelCase__ : int ):
'''simple docstring'''
assert (
isinstance(__lowerCamelCase , __lowerCamelCase ) and isinstance(__lowerCamelCase , __lowerCamelCase ) and isinstance(__lowerCamelCase , __lowerCamelCase )
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError("""argument value for lower and higher must be(lower > higher)""" )
if not lower < to_guess < higher:
raise ValueError(
"""guess value must be within the range of lower and higher value""" )
def answer(lowerCamelCase__ : int ) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print("""started...""" )
lowerCamelCase = lower
lowerCamelCase = higher
lowerCamelCase = []
while True:
lowerCamelCase = get_avg(__lowerCamelCase , __lowerCamelCase )
last_numbers.append(__lowerCamelCase )
if answer(__lowerCamelCase ) == "low":
lowerCamelCase = number
elif answer(__lowerCamelCase ) == "high":
lowerCamelCase = number
else:
break
print(f'guess the number : {last_numbers[-1]}' )
print(f'details : {last_numbers!s}' )
def __lowerCamelCase ( ):
'''simple docstring'''
lowerCamelCase = int(input("""Enter lower value : """ ).strip() )
lowerCamelCase = int(input("""Enter high value : """ ).strip() )
lowerCamelCase = int(input("""Enter value to guess : """ ).strip() )
guess_the_number(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if __name__ == "__main__":
main()
| 457 |
'''simple docstring'''
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class __magic_name__ ( lowerCAmelCase ):
def __init__( self , *snake_case , snake_case=None , snake_case=None , **snake_case) -> Tuple:
'''simple docstring'''
super().__init__(*snake_case , **snake_case)
_UpperCAmelCase : Any =eval_examples
_UpperCAmelCase : Optional[int] =post_process_function
def lowerCAmelCase ( self , snake_case = None , snake_case=None , snake_case = None , snake_case = "eval" , **snake_case , ) -> Dict[str, float]:
'''simple docstring'''
_UpperCAmelCase : Tuple =gen_kwargs.copy()
_UpperCAmelCase : int =(
gen_kwargs['max_length'] if gen_kwargs.get('max_length') is not None else self.args.generation_max_length
)
_UpperCAmelCase : Dict =(
gen_kwargs['num_beams'] if gen_kwargs.get('num_beams') is not None else self.args.generation_num_beams
)
_UpperCAmelCase : Dict =gen_kwargs
_UpperCAmelCase : Tuple =self.eval_dataset if eval_dataset is None else eval_dataset
_UpperCAmelCase : Union[str, Any] =self.get_eval_dataloader(snake_case)
_UpperCAmelCase : Tuple =self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
_UpperCAmelCase : Dict =self.compute_metrics
_UpperCAmelCase : Optional[Any] =None
_UpperCAmelCase : str =time.time()
_UpperCAmelCase : int =self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
_UpperCAmelCase : str =eval_loop(
snake_case , description='Evaluation' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=snake_case , metric_key_prefix=snake_case , )
finally:
_UpperCAmelCase : str =compute_metrics
_UpperCAmelCase : Optional[int] =self.args.eval_batch_size * self.args.world_size
if f"{metric_key_prefix}_jit_compilation_time" in output.metrics:
start_time += output.metrics[f"{metric_key_prefix}_jit_compilation_time"]
output.metrics.update(
speed_metrics(
snake_case , snake_case , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size) , ))
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
_UpperCAmelCase : Tuple =self.post_process_function(snake_case , snake_case , snake_case)
_UpperCAmelCase : Union[str, Any] =self.compute_metrics(snake_case)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(f"{metric_key_prefix}_"):
_UpperCAmelCase : Any =metrics.pop(snake_case)
metrics.update(output.metrics)
else:
_UpperCAmelCase : str =output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(snake_case)
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
_UpperCAmelCase : Any =self.callback_handler.on_evaluate(self.args , self.state , self.control , snake_case)
return metrics
def lowerCAmelCase ( self , snake_case , snake_case , snake_case=None , snake_case = "test" , **snake_case) -> Dict:
'''simple docstring'''
_UpperCAmelCase : Optional[int] =gen_kwargs.copy()
_UpperCAmelCase : int =self.get_test_dataloader(snake_case)
# Temporarily disable metric computation, we will do it in the loop here.
_UpperCAmelCase : str =self.compute_metrics
_UpperCAmelCase : Union[str, Any] =None
_UpperCAmelCase : Union[str, Any] =time.time()
_UpperCAmelCase : Tuple =self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
_UpperCAmelCase : Any =eval_loop(
snake_case , description='Prediction' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=snake_case , metric_key_prefix=snake_case , )
finally:
_UpperCAmelCase : Any =compute_metrics
_UpperCAmelCase : Dict =self.args.eval_batch_size * self.args.world_size
if f"{metric_key_prefix}_jit_compilation_time" in output.metrics:
start_time += output.metrics[f"{metric_key_prefix}_jit_compilation_time"]
output.metrics.update(
speed_metrics(
snake_case , snake_case , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size) , ))
if self.post_process_function is None or self.compute_metrics is None:
return output
_UpperCAmelCase : Tuple =self.post_process_function(snake_case , snake_case , snake_case , 'predict')
_UpperCAmelCase : Union[str, Any] =self.compute_metrics(snake_case)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(f"{metric_key_prefix}_"):
_UpperCAmelCase : Any =metrics.pop(snake_case)
metrics.update(output.metrics)
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=snake_case)
| 446 | 0 |
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def _lowercase ( a__ : Dict , a__ : int ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = torch.load(a__ , map_location="cpu" )
_UpperCamelCase = chkpt["model"]
# We have the base model one level deeper than the original XLM repository
_UpperCamelCase = {}
for k, v in state_dict.items():
if "pred_layer" in k:
_UpperCamelCase = v
else:
_UpperCamelCase = v
_UpperCamelCase = chkpt["params"]
_UpperCamelCase = {n: v for n, v in config.items() if not isinstance(a__ , (torch.FloatTensor, numpy.ndarray) )}
_UpperCamelCase = chkpt["dico_word2id"]
_UpperCamelCase = {s + "</w>" if s.find("@@" ) == -1 and i > 13 else s.replace("@@" , "" ): i for s, i in vocab.items()}
# Save pytorch-model
_UpperCamelCase = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
_UpperCamelCase = pytorch_dump_folder_path + "/" + CONFIG_NAME
_UpperCamelCase = pytorch_dump_folder_path + "/" + VOCAB_FILES_NAMES["vocab_file"]
print(f'''Save PyTorch model to {pytorch_weights_dump_path}''' )
torch.save(a__ , a__ )
print(f'''Save configuration file to {pytorch_config_dump_path}''' )
with open(a__ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(a__ , indent=2 ) + "\n" )
print(f'''Save vocab file to {pytorch_config_dump_path}''' )
with open(a__ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(a__ , indent=2 ) + "\n" )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--xlm_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
__lowerCAmelCase = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 714 |
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class lowerCamelCase_ ( lowercase , lowercase ):
@register_to_config
def __init__( self , *,
lowerCamelCase_ = 4 , lowerCamelCase_ = 7_68 , lowerCamelCase_ , lowerCamelCase_ , ) -> int:
"""simple docstring"""
super().__init__()
_UpperCamelCase = nn.Parameter(torch.zeros(lowerCamelCase_ ) )
# parameters for additional clip time embeddings
_UpperCamelCase = nn.Linear(lowerCamelCase_ , lowerCamelCase_ )
_UpperCamelCase = nn.Linear(lowerCamelCase_ , lowerCamelCase_ )
# parameters for encoder hidden states
_UpperCamelCase = clip_extra_context_tokens
_UpperCamelCase = nn.Linear(
lowerCamelCase_ , self.clip_extra_context_tokens * cross_attention_dim )
_UpperCamelCase = nn.Linear(lowerCamelCase_ , lowerCamelCase_ )
_UpperCamelCase = nn.LayerNorm(lowerCamelCase_ )
def lowercase ( self , *, lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> int:
"""simple docstring"""
if do_classifier_free_guidance:
# Add the classifier free guidance embeddings to the image embeddings
_UpperCamelCase = image_embeddings.shape[0]
_UpperCamelCase = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 )
_UpperCamelCase = classifier_free_guidance_embeddings.expand(
lowerCamelCase_ , -1 )
_UpperCamelCase = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 )
# The image embeddings batch size and the text embeddings batch size are equal
assert image_embeddings.shape[0] == prompt_embeds.shape[0]
_UpperCamelCase = prompt_embeds.shape[0]
# "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and
# adding CLIP embeddings to the existing timestep embedding, ...
_UpperCamelCase = self.embedding_proj(lowerCamelCase_ )
_UpperCamelCase = self.clip_image_embeddings_project_to_time_embeddings(lowerCamelCase_ )
_UpperCamelCase = time_projected_image_embeddings + time_projected_prompt_embeds
# ... and by projecting CLIP embeddings into four
# extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder"
_UpperCamelCase = self.clip_extra_context_tokens_proj(lowerCamelCase_ )
_UpperCamelCase = clip_extra_context_tokens.reshape(lowerCamelCase_ , -1 , self.clip_extra_context_tokens )
_UpperCamelCase = clip_extra_context_tokens.permute(0 , 2 , 1 )
_UpperCamelCase = self.encoder_hidden_states_proj(lowerCamelCase_ )
_UpperCamelCase = self.text_encoder_hidden_states_norm(lowerCamelCase_ )
_UpperCamelCase = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 )
return text_encoder_hidden_states, additive_clip_time_embeddings
| 589 | 0 |
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
return price * (1 + tax_rate)
if __name__ == "__main__":
print(f"{price_plus_tax(1_00, 0.25) = }")
print(f"{price_plus_tax(125.50, 0.05) = }")
| 0 | import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
A : Dict = threading.Lock()
A : Optional[logging.Handler] = None
A : Optional[int] = {
"debug": logging.DEBUG,
"info": logging.INFO,
"warning": logging.WARNING,
"error": logging.ERROR,
"critical": logging.CRITICAL,
}
A : List[str] = logging.WARNING
A : str = True
def a__ ( ):
SCREAMING_SNAKE_CASE_ = os.getenv("TRANSFORMERS_VERBOSITY" , __UpperCamelCase )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
F'''Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, '''
F'''has to be one of: { ", ".join(log_levels.keys() ) }''' )
return _default_log_level
def a__ ( ):
return __name__.split("." )[0]
def a__ ( ):
return logging.getLogger(_get_library_name() )
def a__ ( ):
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
SCREAMING_SNAKE_CASE_ = logging.StreamHandler() # Set sys.stderr as stream.
SCREAMING_SNAKE_CASE_ = sys.stderr.flush
# Apply our default configuration to the library root logger.
SCREAMING_SNAKE_CASE_ = _get_library_root_logger()
library_root_logger.addHandler(_default_handler )
library_root_logger.setLevel(_get_default_logging_level() )
SCREAMING_SNAKE_CASE_ = False
def a__ ( ):
global _default_handler
with _lock:
if not _default_handler:
return
SCREAMING_SNAKE_CASE_ = _get_library_root_logger()
library_root_logger.removeHandler(_default_handler )
library_root_logger.setLevel(logging.NOTSET )
SCREAMING_SNAKE_CASE_ = None
def a__ ( ):
return log_levels
def a__ ( __UpperCamelCase = None ):
if name is None:
SCREAMING_SNAKE_CASE_ = _get_library_name()
_configure_library_root_logger()
return logging.getLogger(__UpperCamelCase )
def a__ ( ):
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def a__ ( __UpperCamelCase ):
_configure_library_root_logger()
_get_library_root_logger().setLevel(__UpperCamelCase )
def a__ ( ):
return set_verbosity(__UpperCamelCase )
def a__ ( ):
return set_verbosity(__UpperCamelCase )
def a__ ( ):
return set_verbosity(__UpperCamelCase )
def a__ ( ):
return set_verbosity(__UpperCamelCase )
def a__ ( ):
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler )
def a__ ( ):
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler )
def a__ ( __UpperCamelCase ):
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(__UpperCamelCase )
def a__ ( __UpperCamelCase ):
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(__UpperCamelCase )
def a__ ( ):
_configure_library_root_logger()
SCREAMING_SNAKE_CASE_ = False
def a__ ( ):
_configure_library_root_logger()
SCREAMING_SNAKE_CASE_ = True
def a__ ( ):
SCREAMING_SNAKE_CASE_ = _get_library_root_logger().handlers
for handler in handlers:
SCREAMING_SNAKE_CASE_ = logging.Formatter("[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s" )
handler.setFormatter(__UpperCamelCase )
def a__ ( ):
SCREAMING_SNAKE_CASE_ = _get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(__UpperCamelCase )
def a__ ( self , *__UpperCamelCase , **__UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = os.getenv("TRANSFORMERS_NO_ADVISORY_WARNINGS" , __UpperCamelCase )
if no_advisory_warnings:
return
self.warning(*__UpperCamelCase , **__UpperCamelCase )
A : Dict = warning_advice
@functools.lru_cache(__UpperCamelCase )
def a__ ( self , *__UpperCamelCase , **__UpperCamelCase ):
self.warning(*__UpperCamelCase , **__UpperCamelCase )
A : Union[str, Any] = warning_once
class lowerCamelCase :
"""simple docstring"""
def __init__( self : Optional[Any] , *__magic_name__ : Optional[int] , **__magic_name__ : int ) -> int: # pylint: disable=unused-argument
SCREAMING_SNAKE_CASE_ = args[0] if args else None
def __iter__( self : str ) -> Optional[int]:
return iter(self._iterator )
def __getattr__( self : Dict , __magic_name__ : int ) -> Optional[int]:
def empty_fn(*__magic_name__ : Tuple , **__magic_name__ : Optional[Any] ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self : Any ) -> Dict:
return self
def __exit__( self : Union[str, Any] , __magic_name__ : Any , __magic_name__ : List[str] , __magic_name__ : List[str] ) -> List[str]:
return
class lowerCamelCase :
"""simple docstring"""
def __call__( self : List[Any] , *__magic_name__ : List[str] , **__magic_name__ : Tuple ) -> Union[str, Any]:
if _tqdm_active:
return tqdm_lib.tqdm(*__magic_name__ , **__magic_name__ )
else:
return EmptyTqdm(*__magic_name__ , **__magic_name__ )
def __A ( self : Tuple , *__magic_name__ : List[Any] , **__magic_name__ : List[str] ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*__magic_name__ , **__magic_name__ )
def __A ( self : Optional[Any] ) -> Dict:
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
A : Optional[int] = _tqdm_cls()
def a__ ( ):
global _tqdm_active
return bool(_tqdm_active )
def a__ ( ):
global _tqdm_active
SCREAMING_SNAKE_CASE_ = True
hf_hub_utils.enable_progress_bars()
def a__ ( ):
global _tqdm_active
SCREAMING_SNAKE_CASE_ = False
hf_hub_utils.disable_progress_bars()
| 140 | 0 |
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase_ ( snake_case_ : int | float | str , snake_case_ : int | float | str ) -> list[str]:
'''simple docstring'''
if nth_term == "":
return [""]
__lowerCAmelCase = int(snake_case_ )
__lowerCAmelCase = int(snake_case_ )
__lowerCAmelCase = []
for temp in range(int(snake_case_ ) ):
series.append(f"""1 / {pow(temp + 1 , int(snake_case_ ) )}""" if series else """1""" )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
_A : int = int(input('''Enter the last number (nth term) of the P-Series'''))
_A : Any = int(input('''Enter the power for P-Series'''))
print('''Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p''')
print(p_series(nth_term, power))
| 330 | '''simple docstring'''
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
_A : Union[str, Any] = '''\
@misc{chen2021evaluating,
title={Evaluating Large Language Models Trained on Code},
author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \
and Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \
and Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \
and Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \
and Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \
and Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \
and Mohammad Bavarian and Clemens Winter and Philippe Tillet \
and Felipe Petroski Such and Dave Cummings and Matthias Plappert \
and Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \
and William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \
and Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \
and William Saunders and Christopher Hesse and Andrew N. Carr \
and Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \
and Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \
and Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \
and Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},
year={2021},
eprint={2107.03374},
archivePrefix={arXiv},
primaryClass={cs.LG}
}
'''
_A : Dict = '''\
This metric implements the evaluation harness for the HumanEval problem solving dataset
described in the paper "Evaluating Large Language Models Trained on Code"
(https://arxiv.org/abs/2107.03374).
'''
_A : Any = '''
Calculates how good are predictions given some references, using certain scores
Args:
predictions: list of candidates to evaluate. Each candidates should be a list
of strings with several code candidates to solve the problem.
references: a list with a test for each prediction. Each test should evaluate the
correctness of a code candidate.
k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])
num_workers: number of workers used to evaluate the canidate programs (Default: 4).
timeout:
Returns:
pass_at_k: dict with pass rates for each k
results: dict with granular results of each unittest
Examples:
>>> code_eval = datasets.load_metric("code_eval")
>>> test_cases = ["assert add(2,3)==5"]
>>> candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]]
>>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])
>>> print(pass_at_k)
{\'pass@1\': 0.5, \'pass@2\': 1.0}
'''
_A : int = '''
################################################################################
!!!WARNING!!!
################################################################################
The "code_eval" metric executes untrusted model-generated code in Python.
Although it is highly unlikely that model-generated code will do something
overtly malicious in response to this test suite, model-generated code may act
destructively due to a lack of model capability or alignment.
Users are strongly encouraged to sandbox this evaluation suite so that it
does not perform destructive actions on their host or network. For more
information on how OpenAI sandboxes its code, see the paper "Evaluating Large
Language Models Trained on Code" (https://arxiv.org/abs/2107.03374).
Once you have read this disclaimer and taken appropriate precautions,
set the environment variable HF_ALLOW_CODE_EVAL="1". Within Python you can to this
with:
>>> import os
>>> os.environ["HF_ALLOW_CODE_EVAL"] = "1"
################################################################################\
'''
_A : List[str] = '''The MIT License
Copyright (c) OpenAI (https://openai.com)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowercase ( datasets.Metric ):
'''simple docstring'''
def a ( self : Optional[int] ) -> Optional[Any]:
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" ) ),
"""references""": datasets.Value("""string""" ),
} ) , homepage="""https://github.com/openai/human-eval""" , codebase_urls=["""https://github.com/openai/human-eval"""] , reference_urls=["""https://github.com/openai/human-eval"""] , license=_LICENSE , )
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[Any]=[1, 10, 1_00] , SCREAMING_SNAKE_CASE__ : Tuple=4 , SCREAMING_SNAKE_CASE__ : Optional[Any]=3.0 ) -> Optional[int]:
if os.getenv("""HF_ALLOW_CODE_EVAL""" , 0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError("""This metric is currently not supported on Windows.""" )
with ThreadPoolExecutor(max_workers=SCREAMING_SNAKE_CASE__ ) as executor:
__lowerCAmelCase = []
__lowerCAmelCase = Counter()
__lowerCAmelCase = 0
__lowerCAmelCase = defaultdict(SCREAMING_SNAKE_CASE__ )
for task_id, (candidates, test_case) in enumerate(zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ):
for candidate in candidates:
__lowerCAmelCase = candidate + """\n""" + test_case
__lowerCAmelCase = (test_program, timeout, task_id, completion_id[task_id])
__lowerCAmelCase = executor.submit(SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ )
futures.append(SCREAMING_SNAKE_CASE__ )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(SCREAMING_SNAKE_CASE__ ):
__lowerCAmelCase = future.result()
results[result["task_id"]].append((result["""completion_id"""], result) )
__lowerCAmelCase , __lowerCAmelCase = [], []
for result in results.values():
result.sort()
__lowerCAmelCase = [r[1]["""passed"""] for r in result]
total.append(len(SCREAMING_SNAKE_CASE__ ) )
correct.append(sum(SCREAMING_SNAKE_CASE__ ) )
__lowerCAmelCase = np.array(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = np.array(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = k
__lowerCAmelCase = {f"""pass@{k}""": estimate_pass_at_k(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def UpperCamelCase_ ( snake_case_ : List[Any] , snake_case_ : Tuple , snake_case_ : int ) -> Dict:
'''simple docstring'''
def estimator(snake_case_ : int , snake_case_ : int , snake_case_ : int ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) )
if isinstance(snake_case_ , snake_case_ ):
__lowerCAmelCase = itertools.repeat(snake_case_ , len(snake_case_ ) )
else:
assert len(snake_case_ ) == len(snake_case_ )
__lowerCAmelCase = iter(snake_case_ )
return np.array([estimator(int(snake_case_ ) , int(snake_case_ ) , snake_case_ ) for n, c in zip(snake_case_ , snake_case_ )] )
| 330 | 1 |
from __future__ import annotations
import numpy as np
def snake_case ( lowerCamelCase ):
'''simple docstring'''
return np.maximum(0 , lowerCamelCase )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 80 |
from typing import TYPE_CHECKING
from ....utils import _LazyModule
_SCREAMING_SNAKE_CASE : Union[str, Any] = {'tokenization_tapex': ['TapexTokenizer']}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
_SCREAMING_SNAKE_CASE : int = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 226 | 0 |
'''simple docstring'''
def A_ ( __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> int:
"""simple docstring"""
__A : int = [1]
__A , __A , __A : Tuple = 0, 0, 0
__A : Dict = ugly_nums[ia] * 2
__A : Tuple = ugly_nums[ia] * 3
__A : List[str] = ugly_nums[ia] * 5
for _ in range(1 , __UpperCamelCase ):
__A : List[str] = min(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
ugly_nums.append(__UpperCamelCase )
if next_num == next_a:
ia += 1
__A : Optional[int] = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
__A : Union[str, Any] = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
__A : List[Any] = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(F"{ugly_numbers(200) = }")
| 713 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
A__ : Union[str, Any] =logging.get_logger(__name__)
A__ : List[str] ={'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
# See all LED models at https://huggingface.co/models?filter=LED
A__ : str ={
'vocab_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json',
},
'merges_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt',
},
'tokenizer_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json',
},
}
A__ : List[Any] ={
'allenai/led-base-16384': 16_384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def A_ ( ) -> Tuple:
"""simple docstring"""
__A : Optional[int] = (
list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) )
)
__A : List[Any] = bs[:]
__A : Union[str, Any] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(__SCREAMING_SNAKE_CASE )
cs.append(2**8 + n )
n += 1
__A : int = [chr(__SCREAMING_SNAKE_CASE ) for n in cs]
return dict(zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
def A_ ( __SCREAMING_SNAKE_CASE : str ) -> Tuple:
"""simple docstring"""
__A : str = set()
__A : int = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__A : Optional[Any] = char
return pairs
class __A ( _SCREAMING_SNAKE_CASE ):
lowerCamelCase =VOCAB_FILES_NAMES
lowerCamelCase =PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase =['''input_ids''', '''attention_mask''']
def __init__( self : Any , lowerCamelCase : Dict , lowerCamelCase : List[str] , lowerCamelCase : Dict="replace" , lowerCamelCase : int="<s>" , lowerCamelCase : str="</s>" , lowerCamelCase : List[str]="</s>" , lowerCamelCase : Optional[Any]="<s>" , lowerCamelCase : Any="<unk>" , lowerCamelCase : str="<pad>" , lowerCamelCase : Any="<mask>" , lowerCamelCase : Optional[int]=False , **lowerCamelCase : str , ):
"""simple docstring"""
__A : List[Any] = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else bos_token
__A : Any = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else eos_token
__A : int = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else sep_token
__A : Any = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else cls_token
__A : List[Any] = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else unk_token
__A : List[Any] = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__A : List[str] = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else mask_token
super().__init__(
errors=lowerCamelCase , bos_token=lowerCamelCase , eos_token=lowerCamelCase , unk_token=lowerCamelCase , sep_token=lowerCamelCase , cls_token=lowerCamelCase , pad_token=lowerCamelCase , mask_token=lowerCamelCase , add_prefix_space=lowerCamelCase , **lowerCamelCase , )
with open(lowerCamelCase , encoding="""utf-8""" ) as vocab_handle:
__A : Optional[Any] = json.load(lowerCamelCase )
__A : Dict = {v: k for k, v in self.encoder.items()}
__A : List[Any] = errors # how to handle errors in decoding
__A : Optional[int] = bytes_to_unicode()
__A : str = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCamelCase , encoding="""utf-8""" ) as merges_handle:
__A : str = merges_handle.read().split("""\n""" )[1:-1]
__A : Tuple = [tuple(merge.split() ) for merge in bpe_merges]
__A : List[Any] = dict(zip(lowerCamelCase , range(len(lowerCamelCase ) ) ) )
__A : List[str] = {}
__A : List[str] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__A : Any = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def lowercase_( self : Any ):
"""simple docstring"""
return len(self.encoder )
def lowercase_( self : Optional[int] ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def lowercase_( self : str , lowerCamelCase : Optional[int] ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
__A : Optional[Any] = tuple(lowerCamelCase )
__A : Dict = get_pairs(lowerCamelCase )
if not pairs:
return token
while True:
__A : str = min(lowerCamelCase , key=lambda lowerCamelCase : self.bpe_ranks.get(lowerCamelCase , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
__A , __A : Optional[Any] = bigram
__A : List[Any] = []
__A : Union[str, Any] = 0
while i < len(lowerCamelCase ):
try:
__A : str = word.index(lowerCamelCase , lowerCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__A : Union[str, Any] = j
if word[i] == first and i < len(lowerCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__A : Dict = tuple(lowerCamelCase )
__A : List[Any] = new_word
if len(lowerCamelCase ) == 1:
break
else:
__A : Optional[Any] = get_pairs(lowerCamelCase )
__A : Optional[int] = """ """.join(lowerCamelCase )
__A : str = word
return word
def lowercase_( self : str , lowerCamelCase : Optional[int] ):
"""simple docstring"""
__A : List[str] = []
for token in re.findall(self.pat , lowerCamelCase ):
__A : Optional[Any] = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCamelCase ).split(""" """ ) )
return bpe_tokens
def lowercase_( self : Dict , lowerCamelCase : Tuple ):
"""simple docstring"""
return self.encoder.get(lowerCamelCase , self.encoder.get(self.unk_token ) )
def lowercase_( self : str , lowerCamelCase : Optional[Any] ):
"""simple docstring"""
return self.decoder.get(lowerCamelCase )
def lowercase_( self : Tuple , lowerCamelCase : int ):
"""simple docstring"""
__A : Any = """""".join(lowerCamelCase )
__A : int = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors )
return text
def lowercase_( self : List[str] , lowerCamelCase : str , lowerCamelCase : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(lowerCamelCase ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
__A : Any = os.path.join(
lowerCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
__A : Optional[Any] = os.path.join(
lowerCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(lowerCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCamelCase , ensure_ascii=lowerCamelCase ) + """\n""" )
__A : str = 0
with open(lowerCamelCase , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCamelCase : kv[1] ):
if index != token_index:
logger.warning(
f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
""" Please check that the tokenizer is not corrupted!""" )
__A : Any = token_index
writer.write(""" """.join(lowerCamelCase ) + """\n""" )
index += 1
return vocab_file, merge_file
def lowercase_( self : str , lowerCamelCase : List[int] , lowerCamelCase : Optional[List[int]] = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__A : Tuple = [self.cls_token_id]
__A : str = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowercase_( self : List[str] , lowerCamelCase : List[int] , lowerCamelCase : Optional[List[int]] = None , lowerCamelCase : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase , token_ids_a=lowerCamelCase , already_has_special_tokens=lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase )) + [1]
return [1] + ([0] * len(lowerCamelCase )) + [1, 1] + ([0] * len(lowerCamelCase )) + [1]
def lowercase_( self : List[str] , lowerCamelCase : List[int] , lowerCamelCase : Optional[List[int]] = None ):
"""simple docstring"""
__A : int = [self.sep_token_id]
__A : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowercase_( self : Dict , lowerCamelCase : int , lowerCamelCase : Dict=False , **lowerCamelCase : List[Any] ):
"""simple docstring"""
__A : Union[str, Any] = kwargs.pop("""add_prefix_space""" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCamelCase ) > 0 and not text[0].isspace()):
__A : Dict = """ """ + text
return (text, kwargs)
def lowercase_( self : Optional[int] , lowerCamelCase : Union[Dict[str, EncodedInput], BatchEncoding] , lowerCamelCase : Optional[int] = None , lowerCamelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , lowerCamelCase : Optional[int] = None , lowerCamelCase : Optional[bool] = None , ):
"""simple docstring"""
__A : str = super()._pad(
encoded_inputs=lowerCamelCase , max_length=lowerCamelCase , padding_strategy=lowerCamelCase , pad_to_multiple_of=lowerCamelCase , return_attention_mask=lowerCamelCase , )
# Load from model defaults
if return_attention_mask is None:
__A : List[Any] = """attention_mask""" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
__A : Tuple = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
__A : List[Any] = len(encoded_inputs["""global_attention_mask"""] ) != len(lowerCamelCase )
if needs_to_be_padded:
__A : str = len(lowerCamelCase ) - len(encoded_inputs["""global_attention_mask"""] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
__A : List[Any] = (
encoded_inputs["""global_attention_mask"""] + [-1] * difference
)
elif self.padding_side == "left":
__A : Any = [-1] * difference + encoded_inputs[
"""global_attention_mask"""
]
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) )
return encoded_inputs
| 499 | 0 |
"""simple docstring"""
import json
import logging
import os
import sys
from time import time
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, require_torch_tpu
logging.basicConfig(level=logging.DEBUG)
__magic_name__ : Optional[Any] = logging.getLogger()
def a_ ( lowercase__ :List[str] ):
__lowerCamelCase = {}
__lowerCamelCase = os.path.join(lowercase__, """all_results.json""" )
if os.path.exists(lowercase__ ):
with open(lowercase__, """r""" ) as f:
__lowerCamelCase = json.load(lowercase__ )
else:
raise ValueError(f'can\'t find {path}' )
return results
__magic_name__ : Optional[Any] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
@require_torch_tpu
class __snake_case (lowerCamelCase ):
def __a ( self: int ):
import xla_spawn
__lowerCamelCase = self.get_auto_remove_tmp_dir()
__lowerCamelCase = f'\n ./examples/pytorch/text-classification/run_glue.py\n --num_cores=8\n ./examples/pytorch/text-classification/run_glue.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --do_train\n --do_eval\n --debug tpu_metrics_debug\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --max_steps=10\n --warmup_steps=2\n --seed=42\n --max_seq_length=128\n '.split()
with patch.object(A_ , """argv""" , A_ ):
__lowerCamelCase = time()
xla_spawn.main()
__lowerCamelCase = time()
__lowerCamelCase = get_results(A_ )
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.75 )
# Assert that the script takes less than 500 seconds to make sure it doesn't hang.
self.assertLess(end - start , 5_00 )
def __a ( self: Optional[Any] ):
import xla_spawn
__lowerCamelCase = """
./tests/test_trainer_tpu.py
--num_cores=8
./tests/test_trainer_tpu.py
""".split()
with patch.object(A_ , """argv""" , A_ ):
xla_spawn.main()
| 281 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ : str = logging.get_logger(__name__)
__magic_name__ : List[Any] = {
'uclanlp/visualbert-vqa': 'https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json',
'uclanlp/visualbert-vqa-pre': 'https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json',
'uclanlp/visualbert-vqa-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json'
),
'uclanlp/visualbert-vcr': 'https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json',
'uclanlp/visualbert-vcr-pre': 'https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json',
'uclanlp/visualbert-vcr-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json'
),
'uclanlp/visualbert-nlvr2': 'https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json',
'uclanlp/visualbert-nlvr2-pre': 'https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json',
'uclanlp/visualbert-nlvr2-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json'
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class __snake_case (lowerCamelCase ):
__a = '''visual_bert'''
def __init__( self: Any , A_: List[str]=3_05_22 , A_: Tuple=7_68 , A_: Dict=5_12 , A_: List[Any]=12 , A_: str=12 , A_: Dict=30_72 , A_: str="gelu" , A_: Union[str, Any]=0.1 , A_: Any=0.1 , A_: List[Any]=5_12 , A_: int=2 , A_: int=0.02 , A_: Any=1E-12 , A_: List[str]=False , A_: str=True , A_: Union[str, Any]=1 , A_: Optional[int]=0 , A_: Dict=2 , **A_: Optional[int] , ):
super().__init__(pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , **A_ )
__lowerCamelCase = vocab_size
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = hidden_size
__lowerCamelCase = visual_embedding_dim
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = initializer_range
__lowerCamelCase = type_vocab_size
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = bypass_transformer
__lowerCamelCase = special_visual_initialize
| 281 | 1 |
'''simple docstring'''
def __UpperCamelCase ( _A : str ) -> bool:
"""simple docstring"""
if not all(x.isalpha() for x in string ):
raise ValueError('String must only contain alphabetic characters.' )
lowerCAmelCase : List[Any] = sorted(string.lower() )
return len(_A ) == len(set(_A ) )
if __name__ == "__main__":
_lowerCAmelCase : Tuple = input('Enter a string ').strip()
_lowerCAmelCase : Dict = is_isogram(input_str)
print(f"""{input_str} is {'an' if isogram else 'not an'} isogram.""")
| 646 |
'''simple docstring'''
_lowerCAmelCase : List[str] = {str(digit): digit**5 for digit in range(10)}
def __UpperCamelCase ( _A : int ) -> int:
"""simple docstring"""
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(_A ) )
def __UpperCamelCase ( ) -> int:
"""simple docstring"""
return sum(
number
for number in range(10_00 , 1_00_00_00 )
if number == digits_fifth_powers_sum(_A ) )
if __name__ == "__main__":
print(solution())
| 646 | 1 |
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {'''vocab_file''': '''spiece.model'''}
SCREAMING_SNAKE_CASE__ = {
'''vocab_file''': {
'''AI-Sweden/gpt-sw3-126m''': '''https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-350m''': '''https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-1.6b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-6.7b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-20b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model''',
}
}
SCREAMING_SNAKE_CASE__ = {
'''AI-Sweden/gpt-sw3-126m''': 2048,
'''AI-Sweden/gpt-sw3-350m''': 2048,
'''AI-Sweden/gpt-sw3-1.6b''': 2048,
'''AI-Sweden/gpt-sw3-6.7b''': 2048,
'''AI-Sweden/gpt-sw3-20b''': 2048,
}
class _UpperCamelCase( __lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Any = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : List[Any] = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : Any = ['''input_ids''', '''attention_mask''']
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : Optional[int]=False , SCREAMING_SNAKE_CASE__ : Optional[int]=False , SCREAMING_SNAKE_CASE__ : str=None , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : List[str]=None , SCREAMING_SNAKE_CASE__ : Optional[Dict[str, Any]] = None , **SCREAMING_SNAKE_CASE__ : List[Any] , ):
'''simple docstring'''
__a : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
__a : List[Any] = kwargs.get('name_or_path' )
if name_or_path is None:
logger.warning(
'name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,'
' you are testing the model, this can safely be ignored' )
__a : str = 'None'
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
__a : List[Any] = '<|endoftext|>' if eos_token is None else eos_token
__a : List[str] = '<unk>' if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
__a : str = unk_token if pad_token is None else pad_token
__a : List[Any] = eos_token if bos_token is None else bos_token
else:
__a : Optional[Any] = '<pad>' if pad_token is None else pad_token
__a : Tuple = '<s>' if bos_token is None else bos_token
super().__init__(
do_lower_case=SCREAMING_SNAKE_CASE__ , remove_space=SCREAMING_SNAKE_CASE__ , keep_accents=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE__ , )
__a : str = do_lower_case
__a : Dict = remove_space
__a : Any = keep_accents
__a : Optional[Any] = vocab_file
__a : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(SCREAMING_SNAKE_CASE__ )
# Used for whitespace normalization in input texts
# fmt : off
__a : int = {' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', '', ''}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
__a : List[Any] = re.compile(
f'''[{"".join(map(SCREAMING_SNAKE_CASE__ , list(range(0 , 9 ) ) + list(range(1_1 , 3_2 ) ) + list(range(1_2_7 , 1_6_0 ) ) + [1_6_0, 1_7_3, 8_2_0_3] ) )}]''' )
def __getstate__( self : Optional[int] ):
'''simple docstring'''
__a : List[Any] = self.__dict__.copy()
__a : Union[str, Any] = None
return state
def __setstate__( self : Any , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
__a : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__a : Dict = {}
__a : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def __lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
return len(self.sp_model )
def __lowerCAmelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
__a : Optional[Any] = self.non_printing_characters_re.sub('' , SCREAMING_SNAKE_CASE__ )
# Normalize whitespaces
__a : int = ''.join([char if char not in self.whitespaces else ' ' for char in text] )
# NFC Unicode normalization
__a : Union[str, Any] = unicodedata.normalize('NFC' , SCREAMING_SNAKE_CASE__ )
return text
def __lowerCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : str , **SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
__a : Tuple = self.preprocess_text(SCREAMING_SNAKE_CASE__ )
return self.sp_model.encode(SCREAMING_SNAKE_CASE__ , out_type=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
return self.sp_model.PieceToId(SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
return self.sp_model.IdToPiece(SCREAMING_SNAKE_CASE__ )
@staticmethod
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
return out_string
def __lowerCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] ):
'''simple docstring'''
__a : Any = []
__a : List[Any] = ''
__a : Optional[int] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE__ ) + token
__a : int = True
__a : Tuple = []
else:
current_sub_tokens.append(SCREAMING_SNAKE_CASE__ )
__a : str = False
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE__ )
return out_string
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
__a : Optional[Any] = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __lowerCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__a : Tuple = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE__ )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE__ , 'wb' ) as fi:
__a : Any = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE__ )
return (out_vocab_file,)
def __lowerCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Union[str, List[str]] , SCREAMING_SNAKE_CASE__ : Union[str, bool] = False ):
'''simple docstring'''
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__a : Tuple = self.preprocess_text(SCREAMING_SNAKE_CASE__ )
__a : Dict = self.sp_model.encode(SCREAMING_SNAKE_CASE__ )
else:
__a : Optional[Any] = [self.preprocess_text(SCREAMING_SNAKE_CASE__ ) for t in text]
__a : List[Any] = self.sp_model.encode(SCREAMING_SNAKE_CASE__ )
if return_tensors is True or return_tensors == "pt":
__a : Tuple = torch.tensor(SCREAMING_SNAKE_CASE__ )
return token_ids
def __lowerCAmelCase ( self : str , SCREAMING_SNAKE_CASE__ : Union[int, List[int]] ):
'''simple docstring'''
return self.sp_model.decode(SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : "Conversation" ):
'''simple docstring'''
__a : int = [f'''User: {text}''' if is_user else f'''Bot: {text}''' for is_user, text in conversation.iter_texts()]
__a : int = (
f'''{self.eos_token}{self.bos_token}''' + f'''{self.bos_token}'''.join(SCREAMING_SNAKE_CASE__ ) + f'''{self.bos_token}Bot:'''
)
return self.encode(text=SCREAMING_SNAKE_CASE__ )
| 47 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class lowerCAmelCase_ ( _lowercase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: List[str] = BioGptTokenizer
_lowerCamelCase: Tuple = False
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
A = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
A = dict(zip(A_ ,range(len(A_ ) ) ) )
A = ['l o 123', 'lo w 1456', 'e r</w> 1789', '']
A = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
A = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file ,'w' ) as fp:
fp.write(json.dumps(A_ ) )
with open(self.merges_file ,'w' ) as fp:
fp.write('\n'.join(A_ ) )
def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : Tuple ) -> int:
A = 'lower newer'
A = 'lower newer'
return input_text, output_text
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
A = BioGptTokenizer(self.vocab_file ,self.merges_file )
A = 'lower'
A = ['low', 'er</w>']
A = tokenizer.tokenize(A_ )
self.assertListEqual(A_ ,A_ )
A = tokens + ['<unk>']
A = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) ,A_ )
@slow
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]:
A = BioGptTokenizer.from_pretrained('microsoft/biogpt' )
A = tokenizer.encode('sequence builders' ,add_special_tokens=A_ )
A = tokenizer.encode('multi-sequence build' ,add_special_tokens=A_ )
A = tokenizer.build_inputs_with_special_tokens(A_ )
A = tokenizer.build_inputs_with_special_tokens(A_ ,A_ )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a ) | 91 | 0 |
import math
from collections.abc import Callable
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Callable[[float], float] , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float ):
__UpperCamelCase =xa
__UpperCamelCase =xa
while True:
if x_n == x_na or function(SCREAMING_SNAKE_CASE__ ) == function(SCREAMING_SNAKE_CASE__ ):
raise ZeroDivisionError('float division by zero, could not find root' )
__UpperCamelCase =x_na - (
function(SCREAMING_SNAKE_CASE__ ) / ((function(SCREAMING_SNAKE_CASE__ ) - function(SCREAMING_SNAKE_CASE__ )) / (x_na - x_n))
)
if abs(x_na - x_na ) < 10**-5:
return x_na
__UpperCamelCase =x_na
__UpperCamelCase =x_na
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : float ):
return math.pow(SCREAMING_SNAKE_CASE__ , 3 ) - (2 * x) - 5
if __name__ == "__main__":
print(intersection(f, 3, 3.5))
| 682 |
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=True , A_=True , A_=True , A_=99 , A_=32 , A_=2 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=16 , A_=2 , A_=0.02 , A_=3 , A_=4 , A_=None , ) -> Tuple:
__UpperCamelCase =parent
__UpperCamelCase =13
__UpperCamelCase =7
__UpperCamelCase =True
__UpperCamelCase =True
__UpperCamelCase =True
__UpperCamelCase =True
__UpperCamelCase =99
__UpperCamelCase =32
__UpperCamelCase =2
__UpperCamelCase =4
__UpperCamelCase =37
__UpperCamelCase ='gelu'
__UpperCamelCase =0.1
__UpperCamelCase =0.1
__UpperCamelCase =512
__UpperCamelCase =16
__UpperCamelCase =2
__UpperCamelCase =0.02
__UpperCamelCase =3
__UpperCamelCase =4
__UpperCamelCase =None
def _a ( self ) -> Tuple:
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase =None
if self.use_input_mask:
__UpperCamelCase =random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase =None
if self.use_token_type_ids:
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCamelCase =None
__UpperCamelCase =None
__UpperCamelCase =None
if self.use_labels:
__UpperCamelCase =ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCamelCase =ids_tensor([self.batch_size] , self.num_choices )
__UpperCamelCase =RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=A_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> str:
__UpperCamelCase =TFRoFormerModel(config=A_ )
__UpperCamelCase ={'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__UpperCamelCase =[input_ids, input_mask]
__UpperCamelCase =model(A_ )
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> int:
__UpperCamelCase =True
__UpperCamelCase =TFRoFormerForCausalLM(config=A_ )
__UpperCamelCase ={
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__UpperCamelCase =model(A_ )['logits']
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> str:
__UpperCamelCase =TFRoFormerForMaskedLM(config=A_ )
__UpperCamelCase ={
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Dict:
__UpperCamelCase =self.num_labels
__UpperCamelCase =TFRoFormerForSequenceClassification(config=A_ )
__UpperCamelCase ={
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> str:
__UpperCamelCase =self.num_choices
__UpperCamelCase =TFRoFormerForMultipleChoice(config=A_ )
__UpperCamelCase =tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase =tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase =tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase ={
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Optional[Any]:
__UpperCamelCase =self.num_labels
__UpperCamelCase =TFRoFormerForTokenClassification(config=A_ )
__UpperCamelCase ={
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Union[str, Any]:
__UpperCamelCase =TFRoFormerForQuestionAnswering(config=A_ )
__UpperCamelCase ={
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _a ( self ) -> Dict:
__UpperCamelCase =self.prepare_config_and_inputs()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) =config_and_inputs
__UpperCamelCase ={'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase__ ( A_ , A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Dict = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
UpperCAmelCase__ : Tuple = (
{
"feature-extraction": TFRoFormerModel,
"fill-mask": TFRoFormerForMaskedLM,
"question-answering": TFRoFormerForQuestionAnswering,
"text-classification": TFRoFormerForSequenceClassification,
"text-generation": TFRoFormerForCausalLM,
"token-classification": TFRoFormerForTokenClassification,
"zero-shot": TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCAmelCase__ : Union[str, Any] = False
UpperCAmelCase__ : Tuple = False
def _a ( self , A_ , A_ , A_ , A_ , A_ ) -> Optional[Any]:
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def _a ( self ) -> str:
__UpperCamelCase =TFRoFormerModelTester(self )
__UpperCamelCase =ConfigTester(self , config_class=A_ , hidden_size=37 )
def _a ( self ) -> Tuple:
self.config_tester.run_common_tests()
def _a ( self ) -> Any:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def _a ( self ) -> Dict:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A_ )
def _a ( self ) -> List[str]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*A_ )
def _a ( self ) -> str:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*A_ )
def _a ( self ) -> int:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A_ )
def _a ( self ) -> List[Any]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A_ )
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A_ )
@slow
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase =TFRoFormerModel.from_pretrained('junnyu/roformer_chinese_base' )
self.assertIsNotNone(A_ )
@require_tf
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _a ( self ) -> List[str]:
__UpperCamelCase =TFRoFormerForMaskedLM.from_pretrained('junnyu/roformer_chinese_base' )
__UpperCamelCase =tf.constant([[0, 1, 2, 3, 4, 5]] )
__UpperCamelCase =model(A_ )[0]
# TODO Replace vocab size
__UpperCamelCase =50000
__UpperCamelCase =[1, 6, vocab_size]
self.assertEqual(output.shape , A_ )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
__UpperCamelCase =tf.constant(
[
[
[-0.1205_3341, -1.026_4901, 0.2922_1946],
[-1.513_3783, 0.19_7433, 0.1519_0607],
[-5.013_5403, -3.90_0256, -0.8403_8764],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , A_ , atol=1E-4 )
@require_tf
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : int = 1e-4
def _a ( self ) -> int:
__UpperCamelCase =tf.constant([[4, 10]] )
__UpperCamelCase =TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
__UpperCamelCase =emba(input_ids.shape )
__UpperCamelCase =tf.constant(
[[0.0000, 0.0000, 0.0000, 1.0000, 1.0000, 1.0000], [0.8415, 0.0464, 0.0022, 0.5403, 0.9989, 1.0000]] )
tf.debugging.assert_near(A_ , A_ , atol=self.tolerance )
def _a ( self ) -> int:
__UpperCamelCase =tf.constant(
[
[0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
[0.8415, 0.8219, 0.8020, 0.7819, 0.7617],
[0.9093, 0.9364, 0.9581, 0.9749, 0.9870],
] )
__UpperCamelCase =TFRoFormerSinusoidalPositionalEmbedding(num_positions=512 , embedding_dim=512 )
emba([2, 16, 512] )
__UpperCamelCase =emba.weight[:3, :5]
tf.debugging.assert_near(A_ , A_ , atol=self.tolerance )
@require_tf
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = 1e-4
def _a ( self ) -> List[Any]:
# 2,12,16,64
__UpperCamelCase =tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
__UpperCamelCase =-tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
__UpperCamelCase =TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 )
__UpperCamelCase =embed_positions([2, 16, 768] )[None, None, :, :]
__UpperCamelCase , __UpperCamelCase =TFRoFormerSelfAttention.apply_rotary_position_embeddings(
A_ , A_ , A_ )
__UpperCamelCase =tf.constant(
[
[0.0000, 0.0100, 0.0200, 0.0300, 0.0400, 0.0500, 0.0600, 0.0700],
[-0.2012, 0.8897, 0.0263, 0.9401, 0.2074, 0.9463, 0.3481, 0.9343],
[-1.7057, 0.6271, -1.2145, 1.3897, -0.6303, 1.7647, -0.1173, 1.8985],
[-2.1731, -1.6397, -2.7358, 0.2854, -2.1840, 1.7183, -1.3018, 2.4871],
[0.2717, -3.6173, -2.9206, -2.1988, -3.6638, 0.3858, -2.9155, 2.2980],
[3.9859, -2.1580, -0.7984, -4.4904, -4.1181, -2.0252, -4.4782, 1.1253],
] )
__UpperCamelCase =tf.constant(
[
[0.0000, -0.0100, -0.0200, -0.0300, -0.0400, -0.0500, -0.0600, -0.0700],
[0.2012, -0.8897, -0.0263, -0.9401, -0.2074, -0.9463, -0.3481, -0.9343],
[1.7057, -0.6271, 1.2145, -1.3897, 0.6303, -1.7647, 0.1173, -1.8985],
[2.1731, 1.6397, 2.7358, -0.2854, 2.1840, -1.7183, 1.3018, -2.4871],
[-0.2717, 3.6173, 2.9206, 2.1988, 3.6638, -0.3858, 2.9155, -2.2980],
[-3.9859, 2.1580, 0.7984, 4.4904, 4.1181, 2.0252, 4.4782, -1.1253],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , A_ , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , A_ , atol=self.tolerance )
| 682 | 1 |
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class __lowercase ( a__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = ["image_processor", "tokenizer"]
SCREAMING_SNAKE_CASE = "BlipImageProcessor"
SCREAMING_SNAKE_CASE = "AutoTokenizer"
def __init__( self : Union[str, Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : str ):
"""simple docstring"""
super().__init__(__a , __a )
# add QFormer tokenizer
__A = qformer_tokenizer
def __call__( self : List[Any] , UpperCamelCase_ : ImageInput = None , UpperCamelCase_ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , UpperCamelCase_ : bool = True , UpperCamelCase_ : Union[bool, str, PaddingStrategy] = False , UpperCamelCase_ : Union[bool, str, TruncationStrategy] = None , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : int = 0 , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Optional[bool] = None , UpperCamelCase_ : bool = False , UpperCamelCase_ : bool = False , UpperCamelCase_ : bool = False , UpperCamelCase_ : bool = False , UpperCamelCase_ : bool = False , UpperCamelCase_ : bool = True , UpperCamelCase_ : Optional[Union[str, TensorType]] = None , **UpperCamelCase_ : List[str] , ):
"""simple docstring"""
if images is None and text is None:
raise ValueError("""You have to specify at least images or text.""" )
__A = BatchFeature()
if text is not None:
__A = self.tokenizer(
text=__a , add_special_tokens=__a , padding=__a , truncation=__a , max_length=__a , stride=__a , pad_to_multiple_of=__a , return_attention_mask=__a , return_overflowing_tokens=__a , return_special_tokens_mask=__a , return_offsets_mapping=__a , return_token_type_ids=__a , return_length=__a , verbose=__a , return_tensors=__a , **__a , )
encoding.update(__a )
__A = self.qformer_tokenizer(
text=__a , add_special_tokens=__a , padding=__a , truncation=__a , max_length=__a , stride=__a , pad_to_multiple_of=__a , return_attention_mask=__a , return_overflowing_tokens=__a , return_special_tokens_mask=__a , return_offsets_mapping=__a , return_token_type_ids=__a , return_length=__a , verbose=__a , return_tensors=__a , **__a , )
__A = qformer_text_encoding.pop("""input_ids""" )
__A = qformer_text_encoding.pop("""attention_mask""" )
if images is not None:
__A = self.image_processor(__a , return_tensors=__a )
encoding.update(__a )
return encoding
def lowerCAmelCase_ ( self : Optional[int] , *UpperCamelCase_ : Tuple , **UpperCamelCase_ : int ):
"""simple docstring"""
return self.tokenizer.batch_decode(*__a , **__a )
def lowerCAmelCase_ ( self : Tuple , *UpperCamelCase_ : Tuple , **UpperCamelCase_ : Optional[Any] ):
"""simple docstring"""
return self.tokenizer.decode(*__a , **__a )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def lowerCAmelCase_ ( self : Tuple ):
"""simple docstring"""
__A = self.tokenizer.model_input_names
__A = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def lowerCAmelCase_ ( self : Tuple , UpperCamelCase_ : str , **UpperCamelCase_ : Optional[Any] ):
"""simple docstring"""
if os.path.isfile(__a ):
raise ValueError(F"Provided path ({save_directory}) should be a directory, not a file" )
os.makedirs(__a , exist_ok=__a )
__A = os.path.join(__a , """qformer_tokenizer""" )
self.qformer_tokenizer.save_pretrained(__a )
return super().save_pretrained(__a , **__a )
@classmethod
def lowerCAmelCase_ ( cls : List[Any] , UpperCamelCase_ : List[Any] , **UpperCamelCase_ : Any ):
"""simple docstring"""
__A = AutoTokenizer.from_pretrained(__a , subfolder="""qformer_tokenizer""" )
__A = cls._get_arguments_from_pretrained(__a , **__a )
args.append(__a )
return cls(*__a )
| 637 |
snake_case__ : List[Any] = [
999,
800,
799,
600,
599,
500,
400,
399,
377,
355,
333,
311,
288,
266,
244,
222,
200,
199,
177,
155,
133,
111,
88,
66,
44,
22,
0,
]
snake_case__ : Tuple = [
999,
976,
952,
928,
905,
882,
858,
857,
810,
762,
715,
714,
572,
429,
428,
286,
285,
238,
190,
143,
142,
118,
95,
71,
47,
24,
0,
]
snake_case__ : List[Any] = [
999,
988,
977,
966,
955,
944,
933,
922,
911,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
350,
300,
299,
266,
233,
200,
199,
179,
159,
140,
120,
100,
99,
88,
77,
66,
55,
44,
33,
22,
11,
0,
]
snake_case__ : List[str] = [
999,
995,
992,
989,
985,
981,
978,
975,
971,
967,
964,
961,
957,
956,
951,
947,
942,
937,
933,
928,
923,
919,
914,
913,
908,
903,
897,
892,
887,
881,
876,
871,
870,
864,
858,
852,
846,
840,
834,
828,
827,
820,
813,
806,
799,
792,
785,
784,
777,
770,
763,
756,
749,
742,
741,
733,
724,
716,
707,
699,
698,
688,
677,
666,
656,
655,
645,
634,
623,
613,
612,
598,
584,
570,
569,
555,
541,
527,
526,
505,
484,
483,
462,
440,
439,
396,
395,
352,
351,
308,
307,
264,
263,
220,
219,
176,
132,
88,
44,
0,
]
snake_case__ : Any = [
999,
997,
995,
992,
990,
988,
986,
984,
981,
979,
977,
975,
972,
970,
968,
966,
964,
961,
959,
957,
956,
954,
951,
949,
946,
944,
941,
939,
936,
934,
931,
929,
926,
924,
921,
919,
916,
914,
913,
910,
907,
905,
902,
899,
896,
893,
891,
888,
885,
882,
879,
877,
874,
871,
870,
867,
864,
861,
858,
855,
852,
849,
846,
843,
840,
837,
834,
831,
828,
827,
824,
821,
817,
814,
811,
808,
804,
801,
798,
795,
791,
788,
785,
784,
780,
777,
774,
770,
766,
763,
760,
756,
752,
749,
746,
742,
741,
737,
733,
730,
726,
722,
718,
714,
710,
707,
703,
699,
698,
694,
690,
685,
681,
677,
673,
669,
664,
660,
656,
655,
650,
646,
641,
636,
632,
627,
622,
618,
613,
612,
607,
602,
596,
591,
586,
580,
575,
570,
569,
563,
557,
551,
545,
539,
533,
527,
526,
519,
512,
505,
498,
491,
484,
483,
474,
466,
457,
449,
440,
439,
428,
418,
407,
396,
395,
381,
366,
352,
351,
330,
308,
307,
286,
264,
263,
242,
220,
219,
176,
175,
132,
131,
88,
44,
0,
]
snake_case__ : int = [
999,
991,
982,
974,
966,
958,
950,
941,
933,
925,
916,
908,
900,
899,
874,
850,
825,
800,
799,
700,
600,
500,
400,
300,
200,
100,
0,
]
snake_case__ : Any = [
999,
992,
985,
978,
971,
964,
957,
949,
942,
935,
928,
921,
914,
907,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
300,
299,
200,
199,
100,
99,
0,
]
snake_case__ : Union[str, Any] = [
999,
996,
992,
989,
985,
982,
979,
975,
972,
968,
965,
961,
958,
955,
951,
948,
944,
941,
938,
934,
931,
927,
924,
920,
917,
914,
910,
907,
903,
900,
899,
891,
884,
876,
869,
861,
853,
846,
838,
830,
823,
815,
808,
800,
799,
788,
777,
766,
755,
744,
733,
722,
711,
700,
699,
688,
677,
666,
655,
644,
633,
622,
611,
600,
599,
585,
571,
557,
542,
528,
514,
500,
499,
485,
471,
457,
442,
428,
414,
400,
399,
379,
359,
340,
320,
300,
299,
279,
259,
240,
220,
200,
199,
166,
133,
100,
99,
66,
33,
0,
]
| 278 | 0 |
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
_UpperCAmelCase : Tuple =logging.get_logger(__name__)
_UpperCAmelCase : int ={
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
_UpperCAmelCase : List[str] =[
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> Optional[Any]:
for attribute in key.split('''.''' ):
lowerCAmelCase_ : Union[str, Any] = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
if weight_type is not None:
lowerCAmelCase_ : Any = getattr(lowerCAmelCase_ , lowerCAmelCase_ ).shape
else:
lowerCAmelCase_ : int = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
lowerCAmelCase_ : Optional[Any] = value
elif weight_type == "weight_g":
lowerCAmelCase_ : Any = value
elif weight_type == "weight_v":
lowerCAmelCase_ : List[str] = value
elif weight_type == "bias":
lowerCAmelCase_ : Tuple = value
else:
lowerCAmelCase_ : str = value
logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Union[str, Any]:
lowerCAmelCase_ : List[str] = []
lowerCAmelCase_ : List[str] = fairseq_model.state_dict()
lowerCAmelCase_ : Dict = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
lowerCAmelCase_ : Any = None
for name, value in fairseq_dict.items():
lowerCAmelCase_ : int = False
if "conv_layers" in name:
load_conv_layer(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , hf_model.config.feat_extract_norm == '''group''' , )
lowerCAmelCase_ : List[str] = True
elif name.split('''.''' )[0] == "proj":
lowerCAmelCase_ : int = fairseq_model.proj
lowerCAmelCase_ : List[Any] = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
lowerCAmelCase_ : Any = True
if "*" in mapped_key:
lowerCAmelCase_ : str = name.split(lowerCAmelCase_ )[0].split('''.''' )[-2]
lowerCAmelCase_ : Optional[Any] = mapped_key.replace('''*''' , lowerCAmelCase_ )
if "weight_g" in name:
lowerCAmelCase_ : List[str] = '''weight_g'''
elif "weight_v" in name:
lowerCAmelCase_ : str = '''weight_v'''
elif "bias" in name:
lowerCAmelCase_ : Any = '''bias'''
elif "weight" in name:
lowerCAmelCase_ : List[Any] = '''weight'''
else:
lowerCAmelCase_ : Any = None
set_recursively(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
continue
if not is_used:
unused_weights.append(lowerCAmelCase_ )
logger.warning(f"""Unused weights: {unused_weights}""" )
return proj_weight
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> Dict:
lowerCAmelCase_ : List[str] = full_name.split('''conv_layers.''' )[-1]
lowerCAmelCase_ : Optional[int] = name.split('''.''' )
lowerCAmelCase_ : Any = int(items[0] )
lowerCAmelCase_ : List[str] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
lowerCAmelCase_ : int = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
lowerCAmelCase_ : int = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
lowerCAmelCase_ : Dict = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
lowerCAmelCase_ : Optional[int] = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(lowerCAmelCase_ )
def lowerCAmelCase ( lowerCAmelCase_ )-> Any:
lowerCAmelCase_ , lowerCAmelCase_ : Tuple = emb.weight.shape
lowerCAmelCase_ : List[str] = nn.Linear(lowerCAmelCase_ , lowerCAmelCase_ , bias=lowerCAmelCase_ )
lowerCAmelCase_ : Optional[Any] = emb.weight.data
return lin_layer
def lowerCAmelCase ( lowerCAmelCase_ )-> int:
with open(lowerCAmelCase_ , '''r''' , encoding='''utf-8''' ) as f:
lowerCAmelCase_ : Optional[Any] = f.readlines()
lowerCAmelCase_ : str = [line.split(''' ''' )[0] for line in lines]
lowerCAmelCase_ : Optional[Any] = len(lowerCAmelCase_ )
lowerCAmelCase_ : List[str] = {
'''<s>''': 0,
'''<pad>''': 1,
'''</s>''': 2,
'''<unk>''': 3,
}
vocab_dict.update(dict(zip(lowerCAmelCase_ , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , )-> Tuple:
lowerCAmelCase_ : int = WavaVecaConfig.from_pretrained(lowerCAmelCase_ )
lowerCAmelCase_ : Optional[Any] = SpeechaTextaConfig.from_pretrained(
lowerCAmelCase_ , vocab_size=lowerCAmelCase_ , decoder_layers=lowerCAmelCase_ , do_stable_layer_norm=lowerCAmelCase_ )
lowerCAmelCase_ : Union[str, Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , )
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Tuple = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
lowerCAmelCase_ : Dict = model[0].eval()
# set weights for wav2vec2 encoder
lowerCAmelCase_ : Tuple = WavaVecaModel(lowerCAmelCase_ )
lowerCAmelCase_ : List[Any] = recursively_load_weights_wavaveca(model.encoder , lowerCAmelCase_ )
lowerCAmelCase_ : Optional[Any] = SpeechaTextaForCausalLM(lowerCAmelCase_ )
lowerCAmelCase_ , lowerCAmelCase_ : Union[str, Any] = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=lowerCAmelCase_ )
# set output linear layer
unexpected_keys.remove('''embed_out''' )
lowerCAmelCase_ : Any = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(f"""The following keys are missing when loading the decoder weights: {missing_keys}""" )
logger.warning(f"""The following keys are unexpected when loading the decoder weights: {unexpected_keys}""" )
lowerCAmelCase_ : int = SpeechEncoderDecoderModel(encoder=lowerCAmelCase_ , decoder=lowerCAmelCase_ )
lowerCAmelCase_ : Optional[int] = False
# add projection layer
lowerCAmelCase_ : Any = nn.Parameter(projection_layer.weight )
lowerCAmelCase_ : Optional[Any] = nn.Parameter(projection_layer.bias )
lowerCAmelCase_ : Any = create_vocab_dict(lowerCAmelCase_ )
with open(os.path.join(lowerCAmelCase_ , '''vocab.json''' ) , '''w''' ) as fp:
json.dump(lowerCAmelCase_ , lowerCAmelCase_ )
lowerCAmelCase_ : List[Any] = SpeechaTextaTokenizer(os.path.join(lowerCAmelCase_ , '''vocab.json''' ) )
tokenizer.save_pretrained(lowerCAmelCase_ )
lowerCAmelCase_ : int = hf_wavavec.config.to_dict()
lowerCAmelCase_ : List[Any] = tokenizer.pad_token_id
lowerCAmelCase_ : List[str] = tokenizer.bos_token_id
lowerCAmelCase_ : Optional[int] = tokenizer.eos_token_id
lowerCAmelCase_ : Optional[int] = '''speech_to_text_2'''
lowerCAmelCase_ : Tuple = '''wav2vec2'''
lowerCAmelCase_ : Optional[int] = SpeechEncoderDecoderConfig.from_dict(lowerCAmelCase_ )
hf_wavavec.save_pretrained(lowerCAmelCase_ )
feature_extractor.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
_UpperCAmelCase : List[Any] =argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument(
"""--encoder_config_path""",
default="""facebook/wav2vec2-large-lv60""",
type=str,
help="""Path to hf encoder wav2vec2 checkpoint config""",
)
parser.add_argument(
"""--decoder_config_path""",
default="""facebook/s2t-small-mustc-en-fr-st""",
type=str,
help="""Path to hf decoder s2t checkpoint config""",
)
parser.add_argument("""--vocab_size""", default=1_0224, type=int, help="""Vocab size of decoder""")
parser.add_argument("""--num_decoder_layers""", default=7, type=int, help="""Number of decoder layers""")
_UpperCAmelCase : Tuple =parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
) | 619 |
from __future__ import annotations
from math import pi
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> dict[str, float]:
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if inductance < 0:
raise ValueError('''Inductance cannot be negative''' )
if frequency < 0:
raise ValueError('''Frequency cannot be negative''' )
if reactance < 0:
raise ValueError('''Inductive reactance cannot be negative''' )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod() | 619 | 1 |
_lowerCAmelCase : int = {"a": ["c", "b"], "b": ["d", "e"], "c": [], "d": [], "e": []}
_lowerCAmelCase : List[str] = ["a", "b", "c", "d", "e"]
def UpperCamelCase_( _snake_case : List[str] , _snake_case : Union[str, Any] , _snake_case : str ):
"""simple docstring"""
__a =start
# add current to visited
visited.append(_snake_case )
__a =edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
__a =topological_sort(_snake_case , _snake_case , _snake_case )
# if all neighbors visited add current to sort
sort.append(_snake_case )
# if all vertices haven't been visited select a new one to visit
if len(_snake_case ) != len(_snake_case ):
for vertice in vertices:
if vertice not in visited:
__a =topological_sort(_snake_case , _snake_case , _snake_case )
# return sort
return sort
if __name__ == "__main__":
_lowerCAmelCase : str = topological_sort("a", [], [])
print(sort)
| 242 |
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
_lowerCAmelCase : List[str] = logging.get_logger(__name__) # pylint: disable=invalid-name
class __magic_name__ ( lowerCAmelCase_ ):
def __init__( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> Optional[int]:
'''simple docstring'''
super().__init__()
self.register_modules(
vae=__snake_case , text_encoder=__snake_case , tokenizer=__snake_case , unet=__snake_case , scheduler=__snake_case , safety_checker=__snake_case , feature_extractor=__snake_case , )
def __magic_name__ ( self , __snake_case = "auto" ) -> int:
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__a =self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__snake_case )
def __magic_name__ ( self ) -> Optional[int]:
'''simple docstring'''
self.enable_attention_slicing(__snake_case )
@torch.no_grad()
def __call__( self , __snake_case , __snake_case = 512 , __snake_case = 512 , __snake_case = 50 , __snake_case = 7.5 , __snake_case = None , __snake_case = 1 , __snake_case = 0.0 , __snake_case = None , __snake_case = None , __snake_case = "pil" , __snake_case = True , __snake_case = None , __snake_case = 1 , __snake_case = None , **__snake_case , ) -> Tuple:
'''simple docstring'''
if isinstance(__snake_case , __snake_case ):
__a =1
elif isinstance(__snake_case , __snake_case ):
__a =len(__snake_case )
else:
raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(__snake_case )}' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'`height` and `width` have to be divisible by 8 but are {height} and {width}.' )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__snake_case , __snake_case ) or callback_steps <= 0)
):
raise ValueError(
f'`callback_steps` has to be a positive integer but is {callback_steps} of type'
f' {type(__snake_case )}.' )
# get prompt text embeddings
__a =self.tokenizer(
__snake_case , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , )
__a =text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__a =self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
f' {self.tokenizer.model_max_length} tokens: {removed_text}' )
__a =text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
__a =self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
__a , __a , __a =text_embeddings.shape
__a =text_embeddings.repeat(1 , __snake_case , 1 )
__a =text_embeddings.view(bs_embed * num_images_per_prompt , __snake_case , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
__a =guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
__a =42
if negative_prompt is None:
__a =['']
elif type(__snake_case ) is not type(__snake_case ):
raise TypeError(
f'`negative_prompt` should be the same type to `prompt`, but got {type(__snake_case )} !='
f' {type(__snake_case )}.' )
elif isinstance(__snake_case , __snake_case ):
__a =[negative_prompt]
elif batch_size != len(__snake_case ):
raise ValueError(
f'`negative_prompt`: {negative_prompt} has batch size {len(__snake_case )}, but `prompt`:'
f' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'
' the batch size of `prompt`.' )
else:
__a =negative_prompt
__a =text_input_ids.shape[-1]
__a =self.tokenizer(
__snake_case , padding='max_length' , max_length=__snake_case , truncation=__snake_case , return_tensors='pt' , )
__a =self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__a =uncond_embeddings.shape[1]
__a =uncond_embeddings.repeat(__snake_case , __snake_case , 1 )
__a =uncond_embeddings.view(batch_size * num_images_per_prompt , __snake_case , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__a =torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
__a =(batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
__a =(batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
__a =text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
__a =torch.randn(
__snake_case , generator=__snake_case , device='cpu' , dtype=__snake_case ).to(self.device )
__a =torch.randn(__snake_case , generator=__snake_case , device='cpu' , dtype=__snake_case ).to(
self.device )
else:
__a =torch.randn(
__snake_case , generator=__snake_case , device=self.device , dtype=__snake_case )
__a =torch.randn(__snake_case , generator=__snake_case , device=self.device , dtype=__snake_case )
else:
if latents_reference.shape != latents_shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {latents_shape}' )
__a =latents_reference.to(self.device )
__a =latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
__a =(latents_shape[3] - latents_shape_reference[3]) // 2
__a =(latents_shape[2] - latents_shape_reference[2]) // 2
__a =latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
__a =latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
__a =0 if dx < 0 else dx
__a =0 if dy < 0 else dy
__a =max(-dx , 0 )
__a =max(-dy , 0 )
# import pdb
# pdb.set_trace()
__a =latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(__snake_case )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
__a =self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
__a =latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__a ='eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__a ={}
if accepts_eta:
__a =eta
for i, t in enumerate(self.progress_bar(__snake_case ) ):
# expand the latents if we are doing classifier free guidance
__a =torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__a =self.scheduler.scale_model_input(__snake_case , __snake_case )
# predict the noise residual
__a =self.unet(__snake_case , __snake_case , encoder_hidden_states=__snake_case ).sample
# perform guidance
if do_classifier_free_guidance:
__a , __a =noise_pred.chunk(2 )
__a =noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
__a =self.scheduler.step(__snake_case , __snake_case , __snake_case , **__snake_case ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__snake_case , __snake_case , __snake_case )
__a =1 / 0.1_8215 * latents
__a =self.vae.decode(__snake_case ).sample
__a =(image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__a =image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
__a =self.feature_extractor(self.numpy_to_pil(__snake_case ) , return_tensors='pt' ).to(
self.device )
__a , __a =self.safety_checker(
images=__snake_case , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
__a =None
if output_type == "pil":
__a =self.numpy_to_pil(__snake_case )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=__snake_case , nsfw_content_detected=__snake_case )
| 242 | 1 |
'''simple docstring'''
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
UpperCamelCase__ : Optional[Any] = re.compile(R"\s+")
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> int:
"""simple docstring"""
return {"hash": hashlib.mda(re.sub(SCREAMING_SNAKE_CASE_ , """""" , example["""content"""] ).encode("""utf-8""" ) ).hexdigest()}
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = [len(SCREAMING_SNAKE_CASE_ ) for line in example["""content"""].splitlines()]
return {"line_mean": np.mean(SCREAMING_SNAKE_CASE_ ), "line_max": max(SCREAMING_SNAKE_CASE_ )}
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = np.mean([c.isalnum() for c in example["""content"""]] )
return {"alpha_frac": alpha_frac}
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
"""simple docstring"""
if example["hash"] in uniques:
uniques.remove(example["""hash"""] )
return True
else:
return False
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=5 ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ["""auto-generated""", """autogenerated""", """automatically generated"""]
_SCREAMING_SNAKE_CASE = example["""content"""].splitlines()
for _, line in zip(range(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=0.05 ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ["""unit tests""", """test file""", """configuration file"""]
_SCREAMING_SNAKE_CASE = example["""content"""].splitlines()
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = 0
# first test
for _, line in zip(range(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
_SCREAMING_SNAKE_CASE = example["""content"""].count("""\n""" )
_SCREAMING_SNAKE_CASE = int(coeff * nlines )
for line in lines:
count_config += line.lower().count("""config""" )
count_test += line.lower().count("""test""" )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ["""def """, """class """, """for """, """while """]
_SCREAMING_SNAKE_CASE = example["""content"""].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=4 ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = example["""content"""].splitlines()
_SCREAMING_SNAKE_CASE = 0
for line in lines:
counter += line.lower().count("""=""" )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = tokenizer(example["""content"""] , truncation=SCREAMING_SNAKE_CASE_ )["""input_ids"""]
_SCREAMING_SNAKE_CASE = len(example["""content"""] ) / len(SCREAMING_SNAKE_CASE_ )
return {"ratio": ratio}
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = {}
results.update(get_hash(SCREAMING_SNAKE_CASE_ ) )
results.update(line_stats(SCREAMING_SNAKE_CASE_ ) )
results.update(alpha_stats(SCREAMING_SNAKE_CASE_ ) )
results.update(char_token_ratio(SCREAMING_SNAKE_CASE_ ) )
results.update(is_autogenerated(SCREAMING_SNAKE_CASE_ ) )
results.update(is_config_or_test(SCREAMING_SNAKE_CASE_ ) )
results.update(has_no_keywords(SCREAMING_SNAKE_CASE_ ) )
results.update(has_few_assignments(SCREAMING_SNAKE_CASE_ ) )
return results
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int:
"""simple docstring"""
if not check_uniques(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> int:
"""simple docstring"""
with open(SCREAMING_SNAKE_CASE_ , """rb""" ) as f_in:
with gzip.open(str(SCREAMING_SNAKE_CASE_ ) + """.gz""" , """wb""" , compresslevel=6 ) as f_out:
shutil.copyfileobj(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
os.unlink(SCREAMING_SNAKE_CASE_ )
# Settings
UpperCamelCase__ : List[str] = HfArgumentParser(PreprocessingArguments)
UpperCamelCase__ : List[Any] = parser.parse_args()
if args.num_workers is None:
UpperCamelCase__ : Optional[int] = multiprocessing.cpu_count()
UpperCamelCase__ : List[Any] = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
UpperCamelCase__ : Union[str, Any] = time.time()
UpperCamelCase__ : Optional[Any] = load_dataset(args.dataset_name, split="train")
print(f"""Time to load dataset: {time.time()-t_start:.2f}""")
# Run preprocessing
UpperCamelCase__ : Any = time.time()
UpperCamelCase__ : Any = ds.map(preprocess, num_proc=args.num_workers)
print(f"""Time to preprocess dataset: {time.time()-t_start:.2f}""")
# Deduplicate hashes
UpperCamelCase__ : int = set(ds.unique("hash"))
UpperCamelCase__ : Optional[int] = len(uniques) / len(ds)
print(f"""Fraction of duplicates: {1-frac:.2%}""")
# Deduplicate data and apply heuristics
UpperCamelCase__ : Optional[Any] = time.time()
UpperCamelCase__ : Tuple = ds.filter(filter, fn_kwargs={"uniques": uniques, "args": args})
print(f"""Time to filter dataset: {time.time()-t_start:.2f}""")
print(f"""Size of filtered dataset: {len(ds_filter)}""")
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
UpperCamelCase__ : Optional[int] = time.time()
UpperCamelCase__ , UpperCamelCase__ : List[Any] = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(f"""Time to deduplicate dataset: {time.time()-t_start:.2f}""")
print(f"""Size of deduplicate dataset: {len(ds_filter)}""")
# Save data in batches of samples_per_file
UpperCamelCase__ : str = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / "duplicate_clusters.json", "w") as f:
json.dump(duplicate_clusters, f)
UpperCamelCase__ : Tuple = output_dir / "data"
data_dir.mkdir(exist_ok=True)
UpperCamelCase__ : Optional[Any] = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
UpperCamelCase__ : List[str] = str(data_dir / f"""file-{file_number+1:012}.json""")
UpperCamelCase__ : Optional[int] = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(f"""Time to save dataset: {time.time()-t_start:.2f}""")
| 0 |
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class _a (_lowerCamelCase):
"""simple docstring"""
def __init__( self , A__ , A__ ) -> Any:
_SCREAMING_SNAKE_CASE = params
_SCREAMING_SNAKE_CASE = np.array(A__ )
_SCREAMING_SNAKE_CASE = np.array([len(A__ ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self , A__ ) -> Dict:
return (self.token_ids[index], self.lengths[index])
def __len__( self ) -> Tuple:
return len(self.lengths )
def UpperCamelCase ( self ) -> Dict:
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def UpperCamelCase ( self ) -> List[str]:
_SCREAMING_SNAKE_CASE = self.params.max_model_input_size
_SCREAMING_SNAKE_CASE = self.lengths > max_len
logger.info(F"Splitting {sum(A__ )} too long sequences." )
def divide_chunks(A__ , A__ ):
return [l[i : i + n] for i in range(0 , len(A__ ) , A__ )]
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
if self.params.mlm:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.params.special_tok_ids["""cls_token"""], self.params.special_tok_ids["""sep_token"""]
else:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.params.special_tok_ids["""bos_token"""], self.params.special_tok_ids["""eos_token"""]
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
_SCREAMING_SNAKE_CASE = []
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
_SCREAMING_SNAKE_CASE = np.insert(A__ , 0 , A__ )
if sub_s[-1] != sep_id:
_SCREAMING_SNAKE_CASE = np.insert(A__ , len(A__ ) , A__ )
assert len(A__ ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(A__ )
new_tok_ids.extend(A__ )
new_lengths.extend([len(A__ ) for l in sub_seqs] )
_SCREAMING_SNAKE_CASE = np.array(A__ )
_SCREAMING_SNAKE_CASE = np.array(A__ )
def UpperCamelCase ( self ) -> List[str]:
_SCREAMING_SNAKE_CASE = len(self )
_SCREAMING_SNAKE_CASE = self.lengths > 11
_SCREAMING_SNAKE_CASE = self.token_ids[indices]
_SCREAMING_SNAKE_CASE = self.lengths[indices]
_SCREAMING_SNAKE_CASE = len(self )
logger.info(F"Remove {init_size - new_size} too short (<=11 tokens) sequences." )
def UpperCamelCase ( self ) -> int:
if "unk_token" not in self.params.special_tok_ids:
return
else:
_SCREAMING_SNAKE_CASE = self.params.special_tok_ids["""unk_token"""]
_SCREAMING_SNAKE_CASE = len(self )
_SCREAMING_SNAKE_CASE = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
_SCREAMING_SNAKE_CASE = (unk_occs / self.lengths) < 0.5
_SCREAMING_SNAKE_CASE = self.token_ids[indices]
_SCREAMING_SNAKE_CASE = self.lengths[indices]
_SCREAMING_SNAKE_CASE = len(self )
logger.info(F"Remove {init_size - new_size} sequences with a high level of unknown tokens (50%)." )
def UpperCamelCase ( self ) -> Optional[Any]:
if not self.params.is_master:
return
logger.info(F"{len(self )} sequences" )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def UpperCamelCase ( self , A__ ) -> Any:
_SCREAMING_SNAKE_CASE = [t[0] for t in batch]
_SCREAMING_SNAKE_CASE = [t[1] for t in batch]
assert len(A__ ) == len(A__ )
# Max for paddings
_SCREAMING_SNAKE_CASE = max(A__ )
# Pad token ids
if self.params.mlm:
_SCREAMING_SNAKE_CASE = self.params.special_tok_ids["""pad_token"""]
else:
_SCREAMING_SNAKE_CASE = self.params.special_tok_ids["""unk_token"""]
_SCREAMING_SNAKE_CASE = [list(t.astype(A__ ) ) + [pad_idx] * (max_seq_len_ - len(A__ )) for t in token_ids]
assert len(tk_ ) == len(A__ )
assert all(len(A__ ) == max_seq_len_ for t in tk_ )
_SCREAMING_SNAKE_CASE = torch.tensor(tk_ ) # (bs, max_seq_len_)
_SCREAMING_SNAKE_CASE = torch.tensor(A__ ) # (bs)
return tk_t, lg_t
| 0 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
snake_case : Tuple = {
"""configuration_swiftformer""": [
"""SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SwiftFormerConfig""",
"""SwiftFormerOnnxConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : int = [
"""SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SwiftFormerForImageClassification""",
"""SwiftFormerModel""",
"""SwiftFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
snake_case : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 545 |
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCamelCase__ ( a_):
"""simple docstring"""
def __init__( self : List[str] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Optional[int]=1_3 , UpperCamelCase_ : Dict=7 , UpperCamelCase_ : Dict=True , UpperCamelCase_ : Optional[Any]=True , UpperCamelCase_ : Union[str, Any]=True , UpperCamelCase_ : Dict=True , UpperCamelCase_ : Optional[int]=True , UpperCamelCase_ : Tuple=False , UpperCamelCase_ : Union[str, Any]=False , UpperCamelCase_ : Any=False , UpperCamelCase_ : Tuple=2 , UpperCamelCase_ : List[Any]=9_9 , UpperCamelCase_ : List[Any]=0 , UpperCamelCase_ : Tuple=3_2 , UpperCamelCase_ : Any=5 , UpperCamelCase_ : Dict=4 , UpperCamelCase_ : Tuple=0.1 , UpperCamelCase_ : Any=0.1 , UpperCamelCase_ : Optional[Any]=5_1_2 , UpperCamelCase_ : Union[str, Any]=1_2 , UpperCamelCase_ : List[str]=2 , UpperCamelCase_ : str=0.02 , UpperCamelCase_ : List[str]=3 , UpperCamelCase_ : Dict=4 , UpperCamelCase_ : Optional[int]="last" , UpperCamelCase_ : List[str]=None , UpperCamelCase_ : Tuple=None , ):
'''simple docstring'''
__magic_name__ = parent
__magic_name__ = batch_size
__magic_name__ = seq_length
__magic_name__ = is_training
__magic_name__ = use_input_lengths
__magic_name__ = use_token_type_ids
__magic_name__ = use_labels
__magic_name__ = gelu_activation
__magic_name__ = sinusoidal_embeddings
__magic_name__ = causal
__magic_name__ = asm
__magic_name__ = n_langs
__magic_name__ = vocab_size
__magic_name__ = n_special
__magic_name__ = hidden_size
__magic_name__ = num_hidden_layers
__magic_name__ = num_attention_heads
__magic_name__ = hidden_dropout_prob
__magic_name__ = attention_probs_dropout_prob
__magic_name__ = max_position_embeddings
__magic_name__ = type_vocab_size
__magic_name__ = type_sequence_label_size
__magic_name__ = initializer_range
__magic_name__ = num_labels
__magic_name__ = num_choices
__magic_name__ = summary_type
__magic_name__ = use_proj
__magic_name__ = scope
def a__ ( self : List[str] ):
'''simple docstring'''
__magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ = random_attention_mask([self.batch_size, self.seq_length] )
__magic_name__ = None
if self.use_input_lengths:
__magic_name__ = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
__magic_name__ = None
if self.use_token_type_ids:
__magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
__magic_name__ = None
__magic_name__ = None
__magic_name__ = None
if self.use_labels:
__magic_name__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__magic_name__ = ids_tensor([self.batch_size] , 2 ).float()
__magic_name__ = ids_tensor([self.batch_size] , self.num_choices )
__magic_name__ = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def a__ ( self : int ):
'''simple docstring'''
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def a__ ( self : Dict , UpperCamelCase_ : List[str] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : str , UpperCamelCase_ : List[str] , UpperCamelCase_ : List[str] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Optional[int] , ):
'''simple docstring'''
__magic_name__ = FlaubertModel(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__magic_name__ = model(UpperCamelCase_ , lengths=UpperCamelCase_ , langs=UpperCamelCase_ )
__magic_name__ = model(UpperCamelCase_ , langs=UpperCamelCase_ )
__magic_name__ = model(UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a__ ( self : Dict , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Any , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Tuple , UpperCamelCase_ : int , UpperCamelCase_ : str , ):
'''simple docstring'''
__magic_name__ = FlaubertWithLMHeadModel(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__magic_name__ = model(UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a__ ( self : Union[str, Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : List[Any] , UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : str , ):
'''simple docstring'''
__magic_name__ = FlaubertForQuestionAnsweringSimple(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__magic_name__ = model(UpperCamelCase_ )
__magic_name__ = model(UpperCamelCase_ , start_positions=UpperCamelCase_ , end_positions=UpperCamelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a__ ( self : str , UpperCamelCase_ : int , UpperCamelCase_ : Any , UpperCamelCase_ : Any , UpperCamelCase_ : Any , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[str] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : str , UpperCamelCase_ : Union[str, Any] , ):
'''simple docstring'''
__magic_name__ = FlaubertForQuestionAnswering(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__magic_name__ = model(UpperCamelCase_ )
__magic_name__ = model(
UpperCamelCase_ , start_positions=UpperCamelCase_ , end_positions=UpperCamelCase_ , cls_index=UpperCamelCase_ , is_impossible=UpperCamelCase_ , p_mask=UpperCamelCase_ , )
__magic_name__ = model(
UpperCamelCase_ , start_positions=UpperCamelCase_ , end_positions=UpperCamelCase_ , cls_index=UpperCamelCase_ , is_impossible=UpperCamelCase_ , )
((__magic_name__) , ) = result_with_labels.to_tuple()
__magic_name__ = model(UpperCamelCase_ , start_positions=UpperCamelCase_ , end_positions=UpperCamelCase_ )
((__magic_name__) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def a__ ( self : str , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : str , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[str] , ):
'''simple docstring'''
__magic_name__ = FlaubertForSequenceClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__magic_name__ = model(UpperCamelCase_ )
__magic_name__ = model(UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def a__ ( self : Optional[int] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[str] , UpperCamelCase_ : str , UpperCamelCase_ : List[str] , UpperCamelCase_ : Any , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Tuple , ):
'''simple docstring'''
__magic_name__ = self.num_labels
__magic_name__ = FlaubertForTokenClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__magic_name__ = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a__ ( self : str , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : List[str] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Dict , UpperCamelCase_ : List[str] , UpperCamelCase_ : Any , UpperCamelCase_ : Tuple , ):
'''simple docstring'''
__magic_name__ = self.num_choices
__magic_name__ = FlaubertForMultipleChoice(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__magic_name__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__magic_name__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__magic_name__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__magic_name__ = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def a__ ( self : Union[str, Any] ):
'''simple docstring'''
__magic_name__ = self.prepare_config_and_inputs()
(
(
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) ,
) = config_and_inputs
__magic_name__ = {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'lengths': input_lengths,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( a_ , a_ , unittest.TestCase):
"""simple docstring"""
__UpperCAmelCase = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
__UpperCAmelCase = (
{
"""feature-extraction""": FlaubertModel,
"""fill-mask""": FlaubertWithLMHeadModel,
"""question-answering""": FlaubertForQuestionAnsweringSimple,
"""text-classification""": FlaubertForSequenceClassification,
"""token-classification""": FlaubertForTokenClassification,
"""zero-shot""": FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def a__ ( self : List[str] , UpperCamelCase_ : Any , UpperCamelCase_ : List[str] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Dict ):
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def a__ ( self : List[Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[Any]=False ):
'''simple docstring'''
__magic_name__ = super()._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ , return_labels=UpperCamelCase_ )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
__magic_name__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase_ )
__magic_name__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase_ )
return inputs_dict
def a__ ( self : List[str] ):
'''simple docstring'''
__magic_name__ = FlaubertModelTester(self )
__magic_name__ = ConfigTester(self , config_class=UpperCamelCase_ , emb_dim=3_7 )
def a__ ( self : Optional[int] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def a__ ( self : Optional[Any] ):
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*UpperCamelCase_ )
def a__ ( self : Dict ):
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*UpperCamelCase_ )
def a__ ( self : int ):
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*UpperCamelCase_ )
def a__ ( self : int ):
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*UpperCamelCase_ )
def a__ ( self : Optional[int] ):
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*UpperCamelCase_ )
def a__ ( self : str ):
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*UpperCamelCase_ )
def a__ ( self : Union[str, Any] ):
'''simple docstring'''
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*UpperCamelCase_ )
@slow
def a__ ( self : Any ):
'''simple docstring'''
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ = FlaubertModel.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
@slow
@require_torch_gpu
def a__ ( self : str ):
'''simple docstring'''
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
__magic_name__ = True
__magic_name__ = model_class(config=UpperCamelCase_ )
__magic_name__ = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ )
__magic_name__ = torch.jit.trace(
UpperCamelCase_ , (inputs_dict['input_ids'].to('cpu' ), inputs_dict['attention_mask'].to('cpu' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(UpperCamelCase_ , os.path.join(UpperCamelCase_ , 'traced_model.pt' ) )
__magic_name__ = torch.jit.load(os.path.join(UpperCamelCase_ , 'traced_model.pt' ) , map_location=UpperCamelCase_ )
loaded(inputs_dict['input_ids'].to(UpperCamelCase_ ) , inputs_dict['attention_mask'].to(UpperCamelCase_ ) )
@require_torch
class UpperCamelCase__ ( unittest.TestCase):
"""simple docstring"""
@slow
def a__ ( self : Dict ):
'''simple docstring'''
__magic_name__ = FlaubertModel.from_pretrained('flaubert/flaubert_base_cased' )
__magic_name__ = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
with torch.no_grad():
__magic_name__ = model(UpperCamelCase_ )[0]
__magic_name__ = torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , UpperCamelCase_ )
__magic_name__ = torch.tensor(
[[[-2.6_251, -1.4_298, -0.0_227], [-2.8_510, -1.6_387, 0.2_258], [-2.8_114, -1.1_832, -0.3_066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCamelCase_ , atol=1e-4 ) ) | 545 | 1 |
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCAmelCase__ =logging.get_logger(__name__)
UpperCAmelCase__ ={"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
UpperCAmelCase__ ={
"tokenizer_file": {
"EleutherAI/gpt-neox-20b": "https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json",
},
}
UpperCAmelCase__ ={
"gpt-neox-20b": 2048,
}
class lowerCamelCase__ ( __lowercase ):
a : List[str] = VOCAB_FILES_NAMES
a : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
a : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : Tuple = ['input_ids', 'attention_mask']
def __init__( self : List[Any] , A_ : Tuple=None , A_ : Optional[Any]=None , A_ : str=None , A_ : Optional[int]="<|endoftext|>" , A_ : Dict="<|endoftext|>" , A_ : List[Any]="<|endoftext|>" , A_ : Any=False , **A_ : List[str] , ):
'''simple docstring'''
super().__init__(
__A , __A , tokenizer_file=__A , unk_token=__A , bos_token=__A , eos_token=__A , add_prefix_space=__A , **__A , )
__lowercase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , __A ) != add_prefix_space:
__lowercase = getattr(__A , pre_tok_state.pop("""type""" ) )
__lowercase = add_prefix_space
__lowercase = pre_tok_class(**__A )
__lowercase = add_prefix_space
def SCREAMING_SNAKE_CASE_ ( self : Tuple , A_ : Union[str, Any] , A_ : int = None ):
'''simple docstring'''
__lowercase = self._tokenizer.model.save(__A , name=__A )
return tuple(__A )
def SCREAMING_SNAKE_CASE_ ( self : str , A_ : Any ):
'''simple docstring'''
__lowercase = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__A , add_special_tokens=__A ) + [self.eos_token_id] )
if len(__A ) > self.model_max_length:
__lowercase = input_ids[-self.model_max_length :]
return input_ids
| 714 |
"""simple docstring"""
def lowerCAmelCase_ ( UpperCamelCase__ : str , UpperCamelCase__ : str ):
"""simple docstring"""
def get_matched_characters(UpperCamelCase__ : str , UpperCamelCase__ : str ) -> str:
__lowercase = []
__lowercase = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
__lowercase = int(max(0 , i - limit ) )
__lowercase = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(UpperCamelCase__ )
__lowercase = f'''{_stra[0:_stra.index(UpperCamelCase__ )]} {_stra[_stra.index(UpperCamelCase__ ) + 1:]}'''
return "".join(UpperCamelCase__ )
# matching characters
__lowercase = get_matched_characters(UpperCamelCase__ , UpperCamelCase__ )
__lowercase = get_matched_characters(UpperCamelCase__ , UpperCamelCase__ )
__lowercase = len(UpperCamelCase__ )
# transposition
__lowercase = (
len([(ca, ca) for ca, ca in zip(UpperCamelCase__ , UpperCamelCase__ ) if ca != ca] ) // 2
)
if not match_count:
__lowercase = 0.0
else:
__lowercase = (
1
/ 3
* (
match_count / len(UpperCamelCase__ )
+ match_count / len(UpperCamelCase__ )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
__lowercase = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler("hello", "world"))
| 442 | 0 |
def _lowerCamelCase ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str ):
"""simple docstring"""
a_ : int = len(SCREAMING_SNAKE_CASE_ )
a_ : int = len(SCREAMING_SNAKE_CASE_ )
a_ : int = (
first_str_length if first_str_length > second_str_length else second_str_length
)
a_ : list = []
for char_count in range(SCREAMING_SNAKE_CASE_ ):
if char_count < first_str_length:
output_list.append(first_str[char_count] )
if char_count < second_str_length:
output_list.append(second_str[char_count] )
return "".join(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
print(alternative_string_arrange("AB", "XYZ"), end=" ")
| 419 |
from __future__ import annotations
def _lowerCamelCase ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
a_ : list[list[int]] = []
create_all_state(1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , [] , SCREAMING_SNAKE_CASE_ )
return result
def _lowerCamelCase ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : list[int] , SCREAMING_SNAKE_CASE_ : list[list[int]] , ):
"""simple docstring"""
if level == 0:
total_list.append(current_list[:] )
return
for i in range(SCREAMING_SNAKE_CASE_ , total_number - level + 2 ):
current_list.append(SCREAMING_SNAKE_CASE_ )
create_all_state(i + 1 , SCREAMING_SNAKE_CASE_ , level - 1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
current_list.pop()
def _lowerCamelCase ( SCREAMING_SNAKE_CASE_ : list[list[int]] ):
"""simple docstring"""
for i in total_list:
print(*SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : str = 4
SCREAMING_SNAKE_CASE : Optional[Any] = 2
SCREAMING_SNAKE_CASE : Optional[int] = generate_all_combinations(n, k)
print_all_state(total_list)
| 419 | 1 |
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def UpperCAmelCase ( _lowerCamelCase : int , _lowerCamelCase : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = old_name
if "patch_embed" in old_name:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = old_name.split("." )
if layer == "0":
SCREAMING_SNAKE_CASE__ : Tuple = old_name.replace("0" , "convolution1" )
elif layer == "1":
SCREAMING_SNAKE_CASE__ : int = old_name.replace("1" , "batchnorm_before" )
elif layer == "3":
SCREAMING_SNAKE_CASE__ : List[str] = old_name.replace("3" , "convolution2" )
else:
SCREAMING_SNAKE_CASE__ : List[str] = old_name.replace("4" , "batchnorm_after" )
if "network" in old_name and re.search(r"\d\.\d" , _lowerCamelCase ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = r"\b\d{2}\b"
if bool(re.search(_lowerCamelCase , _lowerCamelCase ) ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = re.search(r"\d\.\d\d." , _lowerCamelCase ).group()
else:
SCREAMING_SNAKE_CASE__ : str = re.search(r"\d\.\d." , _lowerCamelCase ).group()
if int(match[0] ) < 6:
SCREAMING_SNAKE_CASE__ : Tuple = old_name.replace(_lowerCamelCase , "" )
SCREAMING_SNAKE_CASE__ : List[Any] = trimmed_name.replace("network" , match[0] + ".meta4D_layers.blocks." + match[2:-1] )
SCREAMING_SNAKE_CASE__ : Optional[Any] = "intermediate_stages." + trimmed_name
else:
SCREAMING_SNAKE_CASE__ : str = old_name.replace(_lowerCamelCase , "" )
if int(match[2] ) < num_meta4D_last_stage:
SCREAMING_SNAKE_CASE__ : str = trimmed_name.replace("network" , "meta4D_layers.blocks." + match[2] )
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = str(int(match[2] ) - num_meta4D_last_stage )
SCREAMING_SNAKE_CASE__ : List[str] = trimmed_name.replace("network" , "meta3D_layers.blocks." + layer_index )
if "norm1" in old_name:
SCREAMING_SNAKE_CASE__ : Optional[int] = trimmed_name.replace("norm1" , "layernorm1" )
elif "norm2" in old_name:
SCREAMING_SNAKE_CASE__ : List[Any] = trimmed_name.replace("norm2" , "layernorm2" )
elif "fc1" in old_name:
SCREAMING_SNAKE_CASE__ : Dict = trimmed_name.replace("fc1" , "linear_in" )
elif "fc2" in old_name:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = trimmed_name.replace("fc2" , "linear_out" )
SCREAMING_SNAKE_CASE__ : Any = "last_stage." + trimmed_name
elif "network" in old_name and re.search(r".\d." , _lowerCamelCase ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = old_name.replace("network" , "intermediate_stages" )
if "fc" in new_name:
SCREAMING_SNAKE_CASE__ : Dict = new_name.replace("fc" , "convolution" )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = new_name.replace("norm1" , "batchnorm_before" )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
SCREAMING_SNAKE_CASE__ : Optional[int] = new_name.replace("norm2" , "batchnorm_after" )
if "proj" in new_name:
SCREAMING_SNAKE_CASE__ : Dict = new_name.replace("proj" , "projection" )
if "dist_head" in new_name:
SCREAMING_SNAKE_CASE__ : Optional[Any] = new_name.replace("dist_head" , "distillation_classifier" )
elif "head" in new_name:
SCREAMING_SNAKE_CASE__ : int = new_name.replace("head" , "classifier" )
elif "patch_embed" in new_name:
SCREAMING_SNAKE_CASE__ : List[Any] = "efficientformer." + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
SCREAMING_SNAKE_CASE__ : Optional[Any] = new_name.replace("norm" , "layernorm" )
SCREAMING_SNAKE_CASE__ : Dict = "efficientformer." + new_name
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = "efficientformer.encoder." + new_name
return new_name
def UpperCAmelCase ( _lowerCamelCase : Dict , _lowerCamelCase : Optional[int] ):
'''simple docstring'''
for key in checkpoint.copy().keys():
SCREAMING_SNAKE_CASE__ : Union[str, Any] = checkpoint.pop(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : List[str] = val
return checkpoint
def UpperCAmelCase ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = "http://images.cocodataset.org/val2017/000000039769.jpg"
SCREAMING_SNAKE_CASE__ : Union[str, Any] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return image
def UpperCAmelCase ( _lowerCamelCase : Path , _lowerCamelCase : Path , _lowerCamelCase : Path , _lowerCamelCase : bool ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = torch.load(_lowerCamelCase , map_location="cpu" )["model"]
SCREAMING_SNAKE_CASE__ : Tuple = EfficientFormerConfig.from_json_file(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Tuple = EfficientFormerForImageClassificationWithTeacher(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = "_".join(checkpoint_path.split("/" )[-1].split("." )[0].split("_" )[:-1] )
SCREAMING_SNAKE_CASE__ : Dict = config.depths[-1] - config.num_metaad_blocks + 1
SCREAMING_SNAKE_CASE__ : Optional[Any] = convert_torch_checkpoint(_lowerCamelCase , _lowerCamelCase )
model.load_state_dict(_lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE__ : str = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
# prepare image
SCREAMING_SNAKE_CASE__ : int = prepare_img()
SCREAMING_SNAKE_CASE__ : List[Any] = 256
SCREAMING_SNAKE_CASE__ : int = 224
SCREAMING_SNAKE_CASE__ : Dict = EfficientFormerImageProcessor(
size={"shortest_edge": image_size} , crop_size={"height": crop_size, "width": crop_size} , resample=pillow_resamplings["bicubic"] , )
SCREAMING_SNAKE_CASE__ : List[str] = processor(images=_lowerCamelCase , return_tensors="pt" ).pixel_values
# original processing pipeline
SCREAMING_SNAKE_CASE__ : Union[str, Any] = Compose(
[
Resize(_lowerCamelCase , interpolation=pillow_resamplings["bicubic"] ),
CenterCrop(_lowerCamelCase ),
ToTensor(),
Normalize(_lowerCamelCase , _lowerCamelCase ),
] )
SCREAMING_SNAKE_CASE__ : str = image_transforms(_lowerCamelCase ).unsqueeze(0 )
assert torch.allclose(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE__ : str = model(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : int = outputs.logits
SCREAMING_SNAKE_CASE__ : Tuple = (1, 1_000)
if "l1" in model_name:
SCREAMING_SNAKE_CASE__ : Dict = torch.Tensor(
[-0.1_3_1_2, 0.4_3_5_3, -1.0_4_9_9, -0.5_1_2_4, 0.4_1_8_3, -0.6_7_9_3, -1.3_7_7_7, -0.0_8_9_3, -0.7_3_5_8, -2.4_3_2_8] )
assert torch.allclose(logits[0, :10] , _lowerCamelCase , atol=1E-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
SCREAMING_SNAKE_CASE__ : int = torch.Tensor(
[-1.3_1_5_0, -1.5_4_5_6, -1.2_5_5_6, -0.8_4_9_6, -0.7_1_2_7, -0.7_8_9_7, -0.9_7_2_8, -0.3_0_5_2, 0.3_7_5_1, -0.3_1_2_7] )
assert torch.allclose(logits[0, :10] , _lowerCamelCase , atol=1E-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.Tensor(
[-1.0_2_8_3, -1.4_1_3_1, -0.5_6_4_4, -1.3_1_1_5, -0.5_7_8_5, -1.2_0_4_9, -0.7_5_2_8, 0.1_9_9_2, -0.3_8_2_2, -0.0_8_7_8] )
assert logits.shape == expected_shape
else:
raise ValueError(
f"""Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7""" )
# Save Checkpoints
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
model.save_pretrained(_lowerCamelCase )
print(f"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" )
processor.save_pretrained(_lowerCamelCase )
print(f"""Processor successfuly saved at {pytorch_dump_path}""" )
if push_to_hub:
print("Pushing model to the hub..." )
model.push_to_hub(
repo_id=f"""Bearnardd/{pytorch_dump_path}""" , commit_message="Add model" , use_temp_dir=_lowerCamelCase , )
processor.push_to_hub(
repo_id=f"""Bearnardd/{pytorch_dump_path}""" , commit_message="Add image processor" , use_temp_dir=_lowerCamelCase , )
if __name__ == "__main__":
__lowercase :int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--pytorch_model_path",
default=None,
type=str,
required=True,
help="Path to EfficientFormer pytorch checkpoint.",
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The json file for EfficientFormer model config.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument("--push_to_hub", action="store_true", help="Push model and image processor to the hub")
parser.add_argument(
"--no-push_to_hub",
dest="push_to_hub",
action="store_false",
help="Do not push model and image processor to the hub",
)
parser.set_defaults(push_to_hub=True)
__lowercase :List[Any] = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
) | 702 |
import sys
from collections import defaultdict
class _a :
"""simple docstring"""
def __init__( self : Any ) ->Dict:
SCREAMING_SNAKE_CASE__ : Tuple = []
def A_ ( self : int , a : List[str] ) ->Dict:
return self.node_position[vertex]
def A_ ( self : Optional[Any] , a : Any , a : List[str] ) ->Optional[Any]:
SCREAMING_SNAKE_CASE__ : str = pos
def A_ ( self : List[Any] , a : List[str] , a : Dict , a : Dict , a : List[Any] ) ->Optional[int]:
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
SCREAMING_SNAKE_CASE__ : Optional[Any] = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
SCREAMING_SNAKE_CASE__ : Dict = 2 * start + 1
else:
SCREAMING_SNAKE_CASE__ : Tuple = 2 * start + 2
if heap[smallest_child] < heap[start]:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : int = heap[smallest_child], positions[smallest_child]
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[int] = (
heap[start],
positions[start],
)
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Tuple = temp, tempa
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , a )
self.top_to_bottom(a , a , a , a )
def A_ ( self : Union[str, Any] , a : Tuple , a : Tuple , a : Union[str, Any] , a : List[Any] ) ->Optional[int]:
SCREAMING_SNAKE_CASE__ : List[Any] = position[index]
while index != 0:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
SCREAMING_SNAKE_CASE__ : List[Any] = heap[parent]
SCREAMING_SNAKE_CASE__ : str = position[parent]
self.set_position(position[parent] , a )
else:
SCREAMING_SNAKE_CASE__ : int = val
SCREAMING_SNAKE_CASE__ : Optional[Any] = temp
self.set_position(a , a )
break
SCREAMING_SNAKE_CASE__ : Optional[int] = parent
else:
SCREAMING_SNAKE_CASE__ : int = val
SCREAMING_SNAKE_CASE__ : List[str] = temp
self.set_position(a , 0 )
def A_ ( self : Union[str, Any] , a : int , a : List[str] ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : List[str] = len(a ) // 2 - 1
for i in range(a , -1 , -1 ):
self.top_to_bottom(a , a , len(a ) , a )
def A_ ( self : Dict , a : List[Any] , a : Dict ) ->Optional[int]:
SCREAMING_SNAKE_CASE__ : Any = positions[0]
SCREAMING_SNAKE_CASE__ : Optional[int] = sys.maxsize
self.top_to_bottom(a , 0 , len(a ) , a )
return temp
def UpperCAmelCase ( _lowerCamelCase : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = Heap()
SCREAMING_SNAKE_CASE__ : Any = [0] * len(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Any = [-1] * len(_lowerCamelCase ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [] # Heap of Distance of vertices from their neighboring vertex
SCREAMING_SNAKE_CASE__ : str = []
for vertex in range(len(_lowerCamelCase ) ):
distance_tv.append(sys.maxsize )
positions.append(_lowerCamelCase )
heap.node_position.append(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = []
SCREAMING_SNAKE_CASE__ : int = 1
SCREAMING_SNAKE_CASE__ : int = sys.maxsize
for neighbor, distance in adjacency_list[0]:
SCREAMING_SNAKE_CASE__ : int = 0
SCREAMING_SNAKE_CASE__ : List[str] = distance
heap.heapify(_lowerCamelCase , _lowerCamelCase )
for _ in range(1 , len(_lowerCamelCase ) ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = heap.delete_minimum(_lowerCamelCase , _lowerCamelCase )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(_lowerCamelCase )]
):
SCREAMING_SNAKE_CASE__ : Any = distance
heap.bottom_to_top(
_lowerCamelCase , heap.get_position(_lowerCamelCase ) , _lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE__ : str = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
__lowercase :Union[str, Any] = int(input("Enter number of edges: ").strip())
__lowercase :Dict = defaultdict(list)
for _ in range(edges_number):
__lowercase :Any = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list)) | 26 | 0 |
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
UpperCamelCase : Dict = logging.get_logger(__name__)
def UpperCamelCase_ ( __a ) -> Union[str, Any]:
a__ : Tuple = R"\w+[.]\d+"
a__ : List[Any] = re.findall(__a , __a )
for pat in pats:
a__ : Union[str, Any] = key.replace(__a , "_".join(pat.split("." ) ) )
return key
def UpperCamelCase_ ( __a , __a , __a ) -> List[str]:
a__ : List[str] = pt_tuple_key[:-1] + ("scale",)
if (
any("norm" in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
a__ : Any = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
a__ : Optional[Any] = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
a__ : Union[str, Any] = pt_tuple_key[:-1] + ("embedding",)
return renamed_pt_tuple_key, pt_tensor
# conv layer
a__ : List[str] = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
a__ : str = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
a__ : Tuple = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight":
a__ : Tuple = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
a__ : Optional[Any] = pt_tuple_key[:-1] + ("weight",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
a__ : Union[str, Any] = pt_tuple_key[:-1] + ("bias",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def UpperCamelCase_ ( __a , __a , __a=42 ) -> str:
# Step 1: Convert pytorch tensor to numpy
a__ : Optional[int] = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
a__ : Tuple = flax_model.init_weights(PRNGKey(__a ) )
a__ : Optional[Any] = flatten_dict(__a )
a__ : Union[str, Any] = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
a__ : Optional[int] = rename_key(__a )
a__ : Optional[int] = tuple(renamed_pt_key.split("." ) )
# Correctly rename weight parameters
a__, a__ : Union[str, Any] = rename_key_and_reshape_tensor(__a , __a , __a )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
f'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# also add unexpected weight so that warning is thrown
a__ : str = jnp.asarray(__a )
return unflatten_dict(__a )
| 37 |
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
UpperCamelCase : Any = logging.get_logger(__name__)
UpperCamelCase : Any = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
UpperCamelCase : Union[str, Any] = {
"""vocab_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/vocab.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/vocab.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/vocab.json""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json""",
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json"""
),
},
"""merges_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/merges.txt""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/merges.txt""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/merges.txt""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt""",
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt"""
),
},
"""tokenizer_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/tokenizer.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/tokenizer.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json""",
"""roberta-base-openai-detector""": (
"""https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json"""
),
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json"""
),
},
}
UpperCamelCase : List[str] = {
"""roberta-base""": 512,
"""roberta-large""": 512,
"""roberta-large-mnli""": 512,
"""distilroberta-base""": 512,
"""roberta-base-openai-detector""": 512,
"""roberta-large-openai-detector""": 512,
}
class A__ ( A__ ):
"""simple docstring"""
_lowercase = VOCAB_FILES_NAMES
_lowercase = PRETRAINED_VOCAB_FILES_MAP
_lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase = ['input_ids', 'attention_mask']
_lowercase = RobertaTokenizer
def __init__( self : List[str] , lowerCamelCase__ : Any=None , lowerCamelCase__ : List[Any]=None , lowerCamelCase__ : Dict=None , lowerCamelCase__ : List[str]="replace" , lowerCamelCase__ : List[str]="<s>" , lowerCamelCase__ : Union[str, Any]="</s>" , lowerCamelCase__ : Any="</s>" , lowerCamelCase__ : Any="<s>" , lowerCamelCase__ : int="<unk>" , lowerCamelCase__ : Any="<pad>" , lowerCamelCase__ : Tuple="<mask>" , lowerCamelCase__ : Any=False , lowerCamelCase__ : Dict=True , **lowerCamelCase__ : Optional[Any] , ):
super().__init__(
lowerCamelCase__ , lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , errors=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ , trim_offsets=lowerCamelCase__ , **lowerCamelCase__ , )
a__ : List[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , lowerCamelCase__ ) != add_prefix_space:
a__ : Any = getattr(lowerCamelCase__ , pre_tok_state.pop("type" ) )
a__ : int = add_prefix_space
a__ : Tuple = pre_tok_class(**lowerCamelCase__ )
a__ : str = add_prefix_space
a__ : Tuple = "post_processor"
a__ : Dict = getattr(self.backend_tokenizer , lowerCamelCase__ , lowerCamelCase__ )
if tokenizer_component_instance:
a__ : Tuple = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
a__ : Tuple = tuple(state["sep"] )
if "cls" in state:
a__ : str = tuple(state["cls"] )
a__ : str = False
if state.get("add_prefix_space" , lowerCamelCase__ ) != add_prefix_space:
a__ : str = add_prefix_space
a__ : Any = True
if state.get("trim_offsets" , lowerCamelCase__ ) != trim_offsets:
a__ : int = trim_offsets
a__ : Dict = True
if changes_to_apply:
a__ : Union[str, Any] = getattr(lowerCamelCase__ , state.pop("type" ) )
a__ : str = component_class(**lowerCamelCase__ )
setattr(self.backend_tokenizer , lowerCamelCase__ , lowerCamelCase__ )
@property
def _UpperCamelCase( self : Union[str, Any] ):
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def _UpperCamelCase( self : List[Any] , lowerCamelCase__ : Tuple ):
a__ : List[Any] = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else value
a__ : List[str] = value
def _UpperCamelCase( self : Union[str, Any] , *lowerCamelCase__ : int , **lowerCamelCase__ : int ):
a__ : Optional[int] = kwargs.get("is_split_into_words" , lowerCamelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*lowerCamelCase__ , **lowerCamelCase__ )
def _UpperCamelCase( self : Tuple , *lowerCamelCase__ : Dict , **lowerCamelCase__ : List[str] ):
a__ : Dict = kwargs.get("is_split_into_words" , lowerCamelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*lowerCamelCase__ , **lowerCamelCase__ )
def _UpperCamelCase( self : str , lowerCamelCase__ : str , lowerCamelCase__ : Optional[str] = None ):
a__ : int = self._tokenizer.model.save(lowerCamelCase__ , name=lowerCamelCase__ )
return tuple(lowerCamelCase__ )
def _UpperCamelCase( self : Optional[int] , lowerCamelCase__ : Dict , lowerCamelCase__ : Optional[int]=None ):
a__ : Union[str, Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _UpperCamelCase( self : Dict , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None ):
a__ : Tuple = [self.sep_token_id]
a__ : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 37 | 1 |
"""simple docstring"""
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase =logging.get_logger(__name__)
def _A ( _a : str , _a : str ):
"""simple docstring"""
A = RobertaPreLayerNormConfig.from_pretrained(
_a , architectures=["""RobertaPreLayerNormForMaskedLM"""] )
# convert state_dict
A = torch.load(hf_hub_download(repo_id=_a , filename="""pytorch_model.bin""" ) )
A = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith("""roberta.""" ):
A = """roberta_prelayernorm.""" + tensor_key[len("""roberta.""" ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith(""".self.LayerNorm.weight""" ) or tensor_key.endswith(""".self.LayerNorm.bias""" ):
continue
A = tensor_value
A = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=_a , config=_a , state_dict=_a )
model.save_pretrained(_a )
# convert tokenizer
A = AutoTokenizer.from_pretrained(_a )
tokenizer.save_pretrained(_a )
if __name__ == "__main__":
UpperCAmelCase =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint-repo",
default=None,
type=str,
required=True,
help="Path the official PyTorch dump, e.g. 'andreasmadsen/efficient_mlm_m0.40'.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
UpperCAmelCase =parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
| 255 |
"""simple docstring"""
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
UpperCAmelCase =logging.get_logger(__name__)
UpperCAmelCase ={
"google/umt5-small": "https://huggingface.co/google/umt5-small/resolve/main/config.json",
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowerCamelCase = '''umt5'''
_lowerCamelCase = ['''past_key_values''']
def __init__( self ,lowerCamelCase_=2_5_0_1_1_2 ,lowerCamelCase_=5_1_2 ,lowerCamelCase_=6_4 ,lowerCamelCase_=1_0_2_4 ,lowerCamelCase_=8 ,lowerCamelCase_=None ,lowerCamelCase_=6 ,lowerCamelCase_=3_2 ,lowerCamelCase_=1_2_8 ,lowerCamelCase_=0.1 ,lowerCamelCase_=1E-6 ,lowerCamelCase_=1.0 ,lowerCamelCase_="gated-gelu" ,lowerCamelCase_=True ,lowerCamelCase_=True ,lowerCamelCase_="T5Tokenizer" ,lowerCamelCase_=True ,lowerCamelCase_=0 ,lowerCamelCase_=1 ,lowerCamelCase_=0 ,**lowerCamelCase_ ,) -> Dict:
super().__init__(
is_encoder_decoder=lowerCamelCase_ ,tokenizer_class=lowerCamelCase_ ,tie_word_embeddings=lowerCamelCase_ ,pad_token_id=lowerCamelCase_ ,eos_token_id=lowerCamelCase_ ,decoder_start_token_id=lowerCamelCase_ ,**lowerCamelCase_ ,)
A = vocab_size
A = d_model
A = d_kv
A = d_ff
A = num_layers
A = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
A = num_heads
A = relative_attention_num_buckets
A = relative_attention_max_distance
A = dropout_rate
A = layer_norm_epsilon
A = initializer_factor
A = feed_forward_proj
A = use_cache
A = self.feed_forward_proj.split("""-""" )
A = act_info[-1]
A = act_info[0] == """gated"""
if len(lowerCamelCase_ ) > 1 and act_info[0] != "gated" or len(lowerCamelCase_ ) > 2:
raise ValueError(
f'`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'
"""Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. """
"""'gated-gelu' or 'relu'""" )
if feed_forward_proj == "gated-gelu":
A = """gelu_new"""
@property
def UpperCamelCase__ ( self ) -> Dict:
return self.d_model
@property
def UpperCamelCase__ ( self ) -> Any:
return self.num_heads
@property
def UpperCamelCase__ ( self ) -> int:
return self.num_layers
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def UpperCamelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
A = {
"""input_ids""": {0: """batch""", 1: """encoder_sequence"""},
"""attention_mask""": {0: """batch""", 1: """encoder_sequence"""},
}
if self.use_past:
A = """past_encoder_sequence + sequence"""
A = {0: """batch"""}
A = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
A = {0: """batch""", 1: """decoder_sequence"""}
A = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(lowerCamelCase_ ,direction="""inputs""" )
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def UpperCamelCase__ ( self ) -> int:
return 1_3
@property
def UpperCamelCase__ ( self ) -> float:
return 5E-4
| 255 | 1 |
'''simple docstring'''
def __lowercase ( __lowercase ) -> Union[str, Any]:
'''simple docstring'''
_A , _A = [], []
while len(__lowercase ) > 1:
_A , _A = min(__lowercase ), max(__lowercase )
start.append(__lowercase )
end.append(__lowercase )
collection.remove(__lowercase )
collection.remove(__lowercase )
end.reverse()
return start + collection + end
if __name__ == "__main__":
lowerCamelCase_ = input('''Enter numbers separated by a comma:\n''').strip()
lowerCamelCase_ = [int(item) for item in user_input.split(''',''')]
print(*merge_sort(unsorted), sep=''',''')
| 330 |
'''simple docstring'''
def __lowercase ( __lowercase , __lowercase , __lowercase ) -> float:
'''simple docstring'''
if principal <= 0:
raise Exception("Principal borrowed must be > 0" )
if rate_per_annum < 0:
raise Exception("Rate of interest must be >= 0" )
if years_to_repay <= 0 or not isinstance(__lowercase , __lowercase ):
raise Exception("Years to repay must be an integer > 0" )
# Yearly rate is divided by 12 to get monthly rate
_A = rate_per_annum / 12
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
_A = years_to_repay * 12
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 330 | 1 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
A : Optional[Any] = logging.get_logger(__name__)
def snake_case_ ( a__ : Union[str, Any] ):
"""simple docstring"""
__lowercase = """huggingface/label-files"""
__lowercase = """imagenet-1k-id2label.json"""
__lowercase = json.load(open(hf_hub_download(a__ ,a__ ,repo_type="""dataset""" ) ,"""r""" ) )
__lowercase = {int(a__ ): v for k, v in idalabel.items()}
__lowercase = {v: k for k, v in idalabel.items()}
__lowercase = """std_conv""" if """bit""" in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
__lowercase = BitConfig(
conv_layer=a__ ,num_labels=10_00 ,idalabel=a__ ,labelaid=a__ ,)
return config
def snake_case_ ( a__ : Union[str, Any] ):
"""simple docstring"""
if "stem.conv" in name:
__lowercase = name.replace("""stem.conv""" ,"""bit.embedder.convolution""" )
if "blocks" in name:
__lowercase = name.replace("""blocks""" ,"""layers""" )
if "head.fc" in name:
__lowercase = name.replace("""head.fc""" ,"""classifier.1""" )
if name.startswith("""norm""" ):
__lowercase = """bit.""" + name
if "bit" not in name and "classifier" not in name:
__lowercase = """bit.encoder.""" + name
return name
def snake_case_ ( ):
"""simple docstring"""
__lowercase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__lowercase = Image.open(requests.get(a__ ,stream=a__ ).raw )
return im
@torch.no_grad()
def snake_case_ ( a__ : Dict ,a__ : Optional[int] ,a__ : Tuple=False ):
"""simple docstring"""
__lowercase = get_config(a__ )
# load original model from timm
__lowercase = create_model(a__ ,pretrained=a__ )
timm_model.eval()
# load state_dict of original model
__lowercase = timm_model.state_dict()
for key in state_dict.copy().keys():
__lowercase = state_dict.pop(a__ )
__lowercase = val.squeeze() if """head""" in key else val
# load HuggingFace model
__lowercase = BitForImageClassification(a__ )
model.eval()
model.load_state_dict(a__ )
# create image processor
__lowercase = create_transform(**resolve_data_config({} ,model=a__ ) )
__lowercase = transform.transforms
__lowercase = {
"""bilinear""": PILImageResampling.BILINEAR,
"""bicubic""": PILImageResampling.BICUBIC,
"""nearest""": PILImageResampling.NEAREST,
}
__lowercase = BitImageProcessor(
do_resize=a__ ,size={"""shortest_edge""": timm_transforms[0].size} ,resample=pillow_resamplings[timm_transforms[0].interpolation.value] ,do_center_crop=a__ ,crop_size={"""height""": timm_transforms[1].size[0], """width""": timm_transforms[1].size[1]} ,do_normalize=a__ ,image_mean=timm_transforms[-1].mean.tolist() ,image_std=timm_transforms[-1].std.tolist() ,)
__lowercase = prepare_img()
__lowercase = transform(a__ ).unsqueeze(0 )
__lowercase = processor(a__ ,return_tensors="""pt""" ).pixel_values
# verify pixel values
assert torch.allclose(a__ ,a__ )
# verify logits
with torch.no_grad():
__lowercase = model(a__ )
__lowercase = outputs.logits
print("""Logits:""" ,logits[0, :3] )
print("""Predicted class:""" ,model.config.idalabel[logits.argmax(-1 ).item()] )
__lowercase = timm_model(a__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(a__ ,outputs.logits ,atol=1e-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
Path(a__ ).mkdir(exist_ok=a__ )
print(f'Saving model {model_name} and processor to {pytorch_dump_folder_path}' )
model.save_pretrained(a__ )
processor.save_pretrained(a__ )
if push_to_hub:
print(f'Pushing model {model_name} and processor to the hub' )
model.push_to_hub(f'ybelkada/{model_name}' )
processor.push_to_hub(f'ybelkada/{model_name}' )
if __name__ == "__main__":
A : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""resnetv2_50x1_bitm""",
type=str,
help="""Name of the BiT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model to the hub.""",
)
A : Union[str, Any] = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 163 |
'''simple docstring'''
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 163 | 1 |
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class snake_case__ ( UpperCamelCase_ ):
def __init__( self : List[str] , _lowerCamelCase : NestedDataStructureLike[PathLike] , _lowerCamelCase : Optional[NamedSplit] = None , _lowerCamelCase : Optional[Features] = None , _lowerCamelCase : str = None , _lowerCamelCase : bool = False , _lowerCamelCase : bool = False , _lowerCamelCase : Optional[int] = None , **_lowerCamelCase : Optional[int] , ):
super().__init__(
_lowerCamelCase , split=_lowerCamelCase , features=_lowerCamelCase , cache_dir=_lowerCamelCase , keep_in_memory=_lowerCamelCase , streaming=_lowerCamelCase , num_proc=_lowerCamelCase , **_lowerCamelCase , )
snake_case__ : Tuple = path_or_paths if isinstance(_lowerCamelCase , _lowerCamelCase ) else {self.split: path_or_paths}
snake_case__ : Any = Text(
cache_dir=_lowerCamelCase , data_files=_lowerCamelCase , features=_lowerCamelCase , **_lowerCamelCase , )
def UpperCAmelCase__ ( self : Union[str, Any] ):
# Build iterable dataset
if self.streaming:
snake_case__ : Dict = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
snake_case__ : List[Any] = None
snake_case__ : List[str] = None
snake_case__ : int = None
snake_case__ : Dict = None
self.builder.download_and_prepare(
download_config=_lowerCamelCase , download_mode=_lowerCamelCase , verification_mode=_lowerCamelCase , base_path=_lowerCamelCase , num_proc=self.num_proc , )
snake_case__ : str = self.builder.as_dataset(
split=self.split , verification_mode=_lowerCamelCase , in_memory=self.keep_in_memory )
return dataset
| 170 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCamelCase : List[str] = {'configuration_encoder_decoder': ['EncoderDecoderConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : List[str] = ['EncoderDecoderModel']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : str = ['TFEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Union[str, Any] = ['FlaxEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
lowerCamelCase : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 170 | 1 |
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ : str = logging.get_logger(__name__)
lowerCAmelCase__ : Optional[Any] = {"vocab_file": "spiece.model"}
lowerCAmelCase__ : str = {
"vocab_file": {
"albert-base-v1": "https://huggingface.co/albert-base-v1/resolve/main/spiece.model",
"albert-large-v1": "https://huggingface.co/albert-large-v1/resolve/main/spiece.model",
"albert-xlarge-v1": "https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model",
"albert-xxlarge-v1": "https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model",
"albert-base-v2": "https://huggingface.co/albert-base-v2/resolve/main/spiece.model",
"albert-large-v2": "https://huggingface.co/albert-large-v2/resolve/main/spiece.model",
"albert-xlarge-v2": "https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model",
"albert-xxlarge-v2": "https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model",
}
}
lowerCAmelCase__ : Optional[Any] = {
"albert-base-v1": 512,
"albert-large-v1": 512,
"albert-xlarge-v1": 512,
"albert-xxlarge-v1": 512,
"albert-base-v2": 512,
"albert-large-v2": 512,
"albert-xlarge-v2": 512,
"albert-xxlarge-v2": 512,
}
lowerCAmelCase__ : Union[str, Any] = "▁"
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = PRETRAINED_VOCAB_FILES_MAP
snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Any ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : Tuple=True ,lowerCamelCase__ : List[str]=True ,lowerCamelCase__ : Union[str, Any]=False ,lowerCamelCase__ : str="[CLS]" ,lowerCamelCase__ : int="[SEP]" ,lowerCamelCase__ : Dict="<unk>" ,lowerCamelCase__ : Dict="[SEP]" ,lowerCamelCase__ : Any="<pad>" ,lowerCamelCase__ : Optional[int]="[CLS]" ,lowerCamelCase__ : Any="[MASK]" ,lowerCamelCase__ : Optional[Dict[str, Any]] = None ,**lowerCamelCase__ : str ,):
UpperCAmelCase__ = (
AddedToken(lowerCAmelCase_ ,lstrip=lowerCAmelCase_ ,rstrip=lowerCAmelCase_ ,normalized=lowerCAmelCase_ )
if isinstance(lowerCAmelCase_ ,lowerCAmelCase_ )
else mask_token
)
UpperCAmelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=lowerCAmelCase_ ,remove_space=lowerCAmelCase_ ,keep_accents=lowerCAmelCase_ ,bos_token=lowerCAmelCase_ ,eos_token=lowerCAmelCase_ ,unk_token=lowerCAmelCase_ ,sep_token=lowerCAmelCase_ ,pad_token=lowerCAmelCase_ ,cls_token=lowerCAmelCase_ ,mask_token=lowerCAmelCase_ ,sp_model_kwargs=self.sp_model_kwargs ,**lowerCAmelCase_ ,)
UpperCAmelCase__ = do_lower_case
UpperCAmelCase__ = remove_space
UpperCAmelCase__ = keep_accents
UpperCAmelCase__ = vocab_file
UpperCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCAmelCase_ )
@property
def __lowerCAmelCase ( self : Optional[Any] ):
return len(self.sp_model )
def __lowerCAmelCase ( self : List[Any] ):
UpperCAmelCase__ = {self.convert_ids_to_tokens(lowerCAmelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Tuple ):
UpperCAmelCase__ = self.__dict__.copy()
UpperCAmelCase__ = None
return state
def __setstate__( self : Optional[Any] ,lowerCamelCase__ : int ):
UpperCAmelCase__ = d
# for backward compatibility
if not hasattr(self ,'sp_model_kwargs' ):
UpperCAmelCase__ = {}
UpperCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __lowerCAmelCase ( self : Optional[int] ,lowerCamelCase__ : Optional[int] ):
if self.remove_space:
UpperCAmelCase__ = ' '.join(inputs.strip().split() )
else:
UpperCAmelCase__ = inputs
UpperCAmelCase__ = outputs.replace('``' ,'\"' ).replace('\'\'' ,'\"' )
if not self.keep_accents:
UpperCAmelCase__ = unicodedata.normalize('NFKD' ,lowerCAmelCase_ )
UpperCAmelCase__ = ''.join([c for c in outputs if not unicodedata.combining(lowerCAmelCase_ )] )
if self.do_lower_case:
UpperCAmelCase__ = outputs.lower()
return outputs
def __lowerCAmelCase ( self : Optional[Any] ,lowerCamelCase__ : str ):
UpperCAmelCase__ = self.preprocess_text(lowerCAmelCase_ )
UpperCAmelCase__ = self.sp_model.encode(lowerCAmelCase_ ,out_type=lowerCAmelCase_ )
UpperCAmelCase__ = []
for piece in pieces:
if len(lowerCAmelCase_ ) > 1 and piece[-1] == str(',' ) and piece[-2].isdigit():
UpperCAmelCase__ = self.sp_model.EncodeAsPieces(piece[:-1].replace(lowerCAmelCase_ ,'' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
UpperCAmelCase__ = cur_pieces[1:]
else:
UpperCAmelCase__ = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(lowerCAmelCase_ )
else:
new_pieces.append(lowerCAmelCase_ )
return new_pieces
def __lowerCAmelCase ( self : List[Any] ,lowerCamelCase__ : int ):
return self.sp_model.PieceToId(lowerCAmelCase_ )
def __lowerCAmelCase ( self : Dict ,lowerCamelCase__ : Dict ):
return self.sp_model.IdToPiece(lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[int] ,lowerCamelCase__ : int ):
UpperCAmelCase__ = []
UpperCAmelCase__ = ''
UpperCAmelCase__ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowerCAmelCase_ ) + token
UpperCAmelCase__ = True
UpperCAmelCase__ = []
else:
current_sub_tokens.append(lowerCAmelCase_ )
UpperCAmelCase__ = False
out_string += self.sp_model.decode(lowerCAmelCase_ )
return out_string.strip()
def __lowerCAmelCase ( self : Dict ,lowerCamelCase__ : List[int] ,lowerCamelCase__ : Optional[List[int]] = None ):
UpperCAmelCase__ = [self.sep_token_id]
UpperCAmelCase__ = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __lowerCAmelCase ( self : Tuple ,lowerCamelCase__ : List[int] ,lowerCamelCase__ : Optional[List[int]] = None ,lowerCamelCase__ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase_ ,token_ids_a=lowerCAmelCase_ ,already_has_special_tokens=lowerCAmelCase_ )
if token_ids_a is not None:
return [1] + ([0] * len(lowerCAmelCase_ )) + [1] + ([0] * len(lowerCAmelCase_ )) + [1]
return [1] + ([0] * len(lowerCAmelCase_ )) + [1]
def __lowerCAmelCase ( self : List[str] ,lowerCamelCase__ : List[int] ,lowerCamelCase__ : Optional[List[int]] = None ):
UpperCAmelCase__ = [self.sep_token_id]
UpperCAmelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCAmelCase ( self : Dict ,lowerCamelCase__ : str ,lowerCamelCase__ : Optional[str] = None ):
if not os.path.isdir(lowerCAmelCase_ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCAmelCase__ = os.path.join(
lowerCAmelCase_ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,lowerCAmelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCAmelCase_ ,'wb' ) as fi:
UpperCAmelCase__ = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase_ )
return (out_vocab_file,)
| 712 | """simple docstring"""
import re
def a_ ( lowerCamelCase ):
return [char.split() for char in re.split(r'[^ a-z A-Z 0-9 \s]' , str_ )]
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = split_input(str_ )
return "".join(
[''.join([char.capitalize() for char in sub_str] ) for sub_str in string_split] )
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
try:
UpperCAmelCase__ = split_input(lowerCamelCase )
if upper:
UpperCAmelCase__ = ''.join(
[
separator.join([char.upper() for char in sub_str] )
for sub_str in string_split
] )
else:
UpperCAmelCase__ = ''.join(
[
separator.join([char.lower() for char in sub_str] )
for sub_str in string_split
] )
return res_str
except IndexError:
return "not valid string"
def a_ ( lowerCamelCase ):
return to_simple_case(lowerCamelCase )
def a_ ( lowerCamelCase ):
try:
UpperCAmelCase__ = to_simple_case(lowerCamelCase )
return res_str[0].lower() + res_str[1:]
except IndexError:
return "not valid string"
def a_ ( lowerCamelCase , lowerCamelCase ):
return to_complex_case(lowerCamelCase , lowerCamelCase , '_' )
def a_ ( lowerCamelCase , lowerCamelCase ):
return to_complex_case(lowerCamelCase , lowerCamelCase , '-' )
if __name__ == "__main__":
__import__('doctest').testmod()
| 632 | 0 |
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def SCREAMING_SNAKE_CASE_ ( _snake_case :Union[dict, list, tuple, torch.Tensor] ) -> List[Tuple[int, ...]]:
_A = []
if isinstance(_snake_case , _snake_case ):
for v in tree.values():
shapes.extend(_fetch_dims(_snake_case ) )
elif isinstance(_snake_case , (list, tuple) ):
for t in tree:
shapes.extend(_fetch_dims(_snake_case ) )
elif isinstance(_snake_case , torch.Tensor ):
shapes.append(tree.shape )
else:
raise ValueError('''Not supported''' )
return shapes
@torch.jit.ignore
def SCREAMING_SNAKE_CASE_ ( _snake_case :int , _snake_case :Tuple[int, ...] ) -> Tuple[int, ...]:
_A = []
for d in reversed(_snake_case ):
idx.append(flat_idx % d )
_A = flat_idx // d
return tuple(reversed(_snake_case ) )
@torch.jit.ignore
def SCREAMING_SNAKE_CASE_ ( _snake_case :Sequence[int] , _snake_case :Sequence[int] , _snake_case :Sequence[int] , _snake_case :Optional[Sequence[bool]] = None , _snake_case :Optional[Sequence[bool]] = None , ) -> List[Tuple[slice, ...]]:
# start_edges and end_edges both indicate whether, starting from any given
# dimension, the start/end index is at the top/bottom edge of the
# corresponding tensor, modeled as a tree
def reduce_edge_list(_snake_case :List[bool] ) -> None:
_A = True
for i in range(len(_snake_case ) ):
_A = -1 * (i + 1)
l[reversed_idx] &= tally
_A = l[reversed_idx]
if start_edges is None:
_A = [s == 0 for s in start]
reduce_edge_list(_snake_case )
if end_edges is None:
_A = [e == (d - 1) for e, d in zip(_snake_case , _snake_case )]
reduce_edge_list(_snake_case )
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(_snake_case ) == 0:
return [()]
elif len(_snake_case ) == 1:
return [(slice(start[0] , end[0] + 1 ),)]
_A = []
_A = []
# Dimensions common to start and end can be selected directly
for s, e in zip(_snake_case , _snake_case ):
if s == e:
path_list.append(slice(_snake_case , s + 1 ) )
else:
break
_A = tuple(_snake_case )
_A = len(_snake_case )
# start == end, and we're done
if divergence_idx == len(_snake_case ):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
_A = start[divergence_idx]
return tuple(
path + (slice(_snake_case , sdi + 1 ),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) )
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
_A = end[divergence_idx]
return tuple(
path + (slice(_snake_case , edi + 1 ),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) )
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) )
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) )
slices.extend(lower() )
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper() )
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) )
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper() )
_A = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) )
slices.extend(lower() )
return slices
@torch.jit.ignore
def SCREAMING_SNAKE_CASE_ ( _snake_case :torch.Tensor , _snake_case :int , _snake_case :int , _snake_case :int ) -> torch.Tensor:
_A = t.shape[:no_batch_dims]
_A = list(_flat_idx_to_idx(_snake_case , _snake_case ) )
# _get_minimal_slice_set is inclusive
_A = list(_flat_idx_to_idx(flat_end - 1 , _snake_case ) )
# Get an ordered list of slices to perform
_A = _get_minimal_slice_set(
_snake_case , _snake_case , _snake_case , )
_A = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] )
def SCREAMING_SNAKE_CASE_ ( _snake_case :Callable , _snake_case :Dict[str, Any] , _snake_case :int , _snake_case :int , _snake_case :bool = False , _snake_case :Any = None , _snake_case :bool = False , ) -> Any:
if not (len(_snake_case ) > 0):
raise ValueError('''Must provide at least one input''' )
_A = [shape[:no_batch_dims] for shape in _fetch_dims(_snake_case )]
_A = tuple([max(_snake_case ) for s in zip(*_snake_case )] )
def _prep_inputs(_snake_case :torch.Tensor ) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims] ) == no_batch_dims:
_A = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
_A = t.reshape(-1 , *t.shape[no_batch_dims:] )
else:
_A = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
return t
_A = tensor_tree_map(_prep_inputs , _snake_case )
_A = None
if _out is not None:
_A = tensor_tree_map(lambda _snake_case : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out )
_A = 1
for d in orig_batch_dims:
flat_batch_dim *= d
_A = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(_snake_case :torch.Tensor ) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
_A = 0
_A = prepped_outputs
for _ in range(_snake_case ):
# Chunk the input
if not low_mem:
_A = _select_chunk
else:
_A = partial(
_chunk_slice , flat_start=_snake_case , flat_end=min(_snake_case , i + chunk_size ) , no_batch_dims=len(_snake_case ) , )
_A = tensor_tree_map(_snake_case , _snake_case )
# Run the layer on the chunk
_A = layer(**_snake_case )
# Allocate space for the output
if out is None:
_A = tensor_tree_map(lambda _snake_case : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , _snake_case )
# Put the chunk in its pre-allocated space
if isinstance(_snake_case , _snake_case ):
def assign(_snake_case :dict , _snake_case :dict ) -> None:
for k, v in da.items():
if isinstance(_snake_case , _snake_case ):
assign(_snake_case , da[k] )
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
_A = da[k]
assign(_snake_case , _snake_case )
elif isinstance(_snake_case , _snake_case ):
for xa, xa in zip(_snake_case , _snake_case ):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
_A = xa
elif isinstance(_snake_case , torch.Tensor ):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
_A = output_chunk
else:
raise ValueError('''Not supported''' )
i += chunk_size
_A = tensor_tree_map(lambda _snake_case : t.view(orig_batch_dims + t.shape[1:] ) , _snake_case )
return out
class lowerCamelCase__ :
"""simple docstring"""
def __init__( self : List[str] , __lowerCAmelCase : int = 5_12 , ) -> Optional[int]:
_A = max_chunk_size
_A = None
_A = None
def snake_case_ ( self : int , __lowerCAmelCase : Callable , __lowerCAmelCase : tuple , __lowerCAmelCase : int ) -> int:
logging.info('''Tuning chunk size...''' )
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
_A = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )]
_A = [c for c in candidates if c > min_chunk_size]
_A = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(__lowerCAmelCase : int ) -> bool:
try:
with torch.no_grad():
fn(*__lowerCAmelCase , chunk_size=__lowerCAmelCase )
return True
except RuntimeError:
return False
_A = 0
_A = len(__lowerCAmelCase ) - 1
while i > min_viable_chunk_size_index:
_A = test_chunk_size(candidates[i] )
if not viable:
_A = (min_viable_chunk_size_index + i) // 2
else:
_A = i
_A = (i + len(__lowerCAmelCase ) - 1) // 2
return candidates[min_viable_chunk_size_index]
def snake_case_ ( self : Union[str, Any] , __lowerCAmelCase : Iterable , __lowerCAmelCase : Iterable ) -> bool:
_A = True
for aa, aa in zip(__lowerCAmelCase , __lowerCAmelCase ):
assert type(__lowerCAmelCase ) == type(__lowerCAmelCase )
if isinstance(__lowerCAmelCase , (list, tuple) ):
consistent &= self._compare_arg_caches(__lowerCAmelCase , __lowerCAmelCase )
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ):
_A = [v for _, v in sorted(aa.items() , key=lambda __lowerCAmelCase : x[0] )]
_A = [v for _, v in sorted(aa.items() , key=lambda __lowerCAmelCase : x[0] )]
consistent &= self._compare_arg_caches(__lowerCAmelCase , __lowerCAmelCase )
else:
consistent &= aa == aa
return consistent
def snake_case_ ( self : Any , __lowerCAmelCase : Callable , __lowerCAmelCase : tuple , __lowerCAmelCase : int , ) -> int:
_A = True
_A = tree_map(lambda __lowerCAmelCase : a.shape if isinstance(__lowerCAmelCase , torch.Tensor ) else a , __lowerCAmelCase , __lowerCAmelCase )
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data ) == len(__lowerCAmelCase )
_A = self._compare_arg_caches(self.cached_arg_data , __lowerCAmelCase )
else:
# Otherwise, we can reuse the precomputed value
_A = False
if not consistent:
_A = self._determine_favorable_chunk_size(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , )
_A = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 2 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_A = {
'configuration_chinese_clip': [
'CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'ChineseCLIPConfig',
'ChineseCLIPOnnxConfig',
'ChineseCLIPTextConfig',
'ChineseCLIPVisionConfig',
],
'processing_chinese_clip': ['ChineseCLIPProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ['ChineseCLIPFeatureExtractor']
_A = ['ChineseCLIPImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'ChineseCLIPModel',
'ChineseCLIPPreTrainedModel',
'ChineseCLIPTextModel',
'ChineseCLIPVisionModel',
]
if TYPE_CHECKING:
from .configuration_chinese_clip import (
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
ChineseCLIPConfig,
ChineseCLIPOnnxConfig,
ChineseCLIPTextConfig,
ChineseCLIPVisionConfig,
)
from .processing_chinese_clip import ChineseCLIPProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_chinese_clip import (
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
ChineseCLIPModel,
ChineseCLIPPreTrainedModel,
ChineseCLIPTextModel,
ChineseCLIPVisionModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 159 | 0 |
'''simple docstring'''
import unittest
import torch
from torch import nn
from diffusers.models.activations import get_activation
class __A ( unittest.TestCase ):
def _lowercase (self : Any ):
UpperCAmelCase_ = get_activation("swish" )
self.assertIsInstance(__a , nn.SiLU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def _lowercase (self : Any ):
UpperCAmelCase_ = get_activation("silu" )
self.assertIsInstance(__a , nn.SiLU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def _lowercase (self : str ):
UpperCAmelCase_ = get_activation("mish" )
self.assertIsInstance(__a , nn.Mish )
self.assertEqual(act(torch.tensor(-200 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def _lowercase (self : Dict ):
UpperCAmelCase_ = get_activation("gelu" )
self.assertIsInstance(__a , nn.GELU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
| 415 | '''simple docstring'''
import torch
from diffusers import DiffusionPipeline
class __A ( UpperCamelCase__ ):
def __init__(self : int , __a : Tuple , __a : int ):
super().__init__()
self.register_modules(unet=__a , scheduler=__a )
def __call__(self : Union[str, Any] ):
UpperCAmelCase_ = torch.randn(
(1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , )
UpperCAmelCase_ = 1
UpperCAmelCase_ = self.unet(__a , __a ).sample
UpperCAmelCase_ = self.scheduler.step(__a , __a , __a ).prev_sample
UpperCAmelCase_ = scheduler_output - scheduler_output + torch.ones_like(__a )
return result
| 415 | 1 |
"""simple docstring"""
class UpperCAmelCase_ :
def __init__( self ) -> None:
__lowercase : dict[str, TrieNode] = {} # Mapping from char to TrieNode
__lowercase : Dict = False
def _lowerCamelCase ( self , UpperCamelCase_ ) -> None:
for word in words:
self.insert(UpperCamelCase_ )
def _lowerCamelCase ( self , UpperCamelCase_ ) -> None:
__lowercase : List[str] = self
for char in word:
if char not in curr.nodes:
__lowercase : Optional[Any] = TrieNode()
__lowercase : List[Any] = curr.nodes[char]
__lowercase : List[str] = True
def _lowerCamelCase ( self , UpperCamelCase_ ) -> bool:
__lowercase : List[str] = self
for char in word:
if char not in curr.nodes:
return False
__lowercase : Tuple = curr.nodes[char]
return curr.is_leaf
def _lowerCamelCase ( self , UpperCamelCase_ ) -> None:
def _delete(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> bool:
if index == len(UpperCamelCase_ ):
# If word does not exist
if not curr.is_leaf:
return False
__lowercase : str = False
return len(curr.nodes ) == 0
__lowercase : Any = word[index]
__lowercase : List[str] = curr.nodes.get(UpperCamelCase_ )
# If char not in current trie node
if not char_node:
return False
# Flag to check if node can be deleted
__lowercase : Optional[int] = _delete(UpperCamelCase_ , UpperCamelCase_ , index + 1 )
if delete_curr:
del curr.nodes[char]
return len(curr.nodes ) == 0
return delete_curr
_delete(self , UpperCamelCase_ , 0 )
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
if node.is_leaf:
print(__UpperCamelCase , end=''' ''' )
for key, value in node.nodes.items():
print_words(__UpperCamelCase , word + key )
def __UpperCAmelCase ( ):
__lowercase : Optional[int] = '''banana bananas bandana band apple all beast'''.split()
__lowercase : str = TrieNode()
root.insert_many(__UpperCamelCase )
# print_words(root, "")
assert all(root.find(__UpperCamelCase ) for word in words )
assert root.find('''banana''' )
assert not root.find('''bandanas''' )
assert not root.find('''apps''' )
assert root.find('''apple''' )
assert root.find('''all''' )
root.delete('''all''' )
assert not root.find('''all''' )
root.delete('''banana''' )
assert not root.find('''banana''' )
assert root.find('''bananas''' )
return True
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
print(str(__UpperCamelCase ) , '''works!''' if passes else '''doesn\'t work :(''' )
def __UpperCAmelCase ( ):
assert test_trie()
def __UpperCAmelCase ( ):
print_results('''Testing trie functionality''' , test_trie() )
if __name__ == "__main__":
main()
| 76 |
"""simple docstring"""
from __future__ import annotations
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): # noqa: E741
while r - l > 1:
__lowercase : int = (l + r) // 2
if v[m] >= key:
__lowercase : Any = m
else:
__lowercase : List[Any] = m # noqa: E741
return r
def __UpperCAmelCase ( __UpperCamelCase ):
if len(__UpperCamelCase ) == 0:
return 0
__lowercase : List[str] = [0] * len(__UpperCamelCase )
__lowercase : Any = 1
__lowercase : Dict = v[0]
for i in range(1 , len(__UpperCamelCase ) ):
if v[i] < tail[0]:
__lowercase : Tuple = v[i]
elif v[i] > tail[length - 1]:
__lowercase : Optional[Any] = v[i]
length += 1
else:
__lowercase : Dict = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 76 | 1 |
'''simple docstring'''
import json
import os
import tempfile
from unittest.mock import patch
import torch
from torch.utils.data import DataLoader, TensorDataset
from accelerate import DistributedType, infer_auto_device_map, init_empty_weights
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState, PartialState
from accelerate.test_utils import require_bnb, require_multi_gpu, slow
from accelerate.test_utils.testing import AccelerateTestCase, require_cuda
from accelerate.utils import patch_environment
def _A () -> Dict:
'''simple docstring'''
_a = torch.nn.Linear(2 , 4 )
_a = torch.optim.AdamW(model.parameters() , lr=1.0 )
_a = torch.optim.lr_scheduler.OneCycleLR(lowerCAmelCase__ , max_lr=0.0_1 , steps_per_epoch=2 , epochs=1 )
_a = DataLoader(TensorDataset(torch.tensor([1, 2, 3] ) ) )
_a = DataLoader(TensorDataset(torch.tensor([4, 5, 6] ) ) )
return model, optimizer, scheduler, train_dl, valid_dl
def _A (lowerCAmelCase__ :str ) -> Optional[int]:
'''simple docstring'''
return (model.weight.abs().sum() + model.bias.abs().sum()).item()
def _A (lowerCAmelCase__ :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
_a = torch.nn.Linear(*tuple(model.weight.T.shape ) ).state_dict()
model.load_state_dict(lowerCAmelCase__ )
class a ( _SCREAMING_SNAKE_CASE ):
@require_cuda
def __UpperCAmelCase ( self ) -> Union[str, Any]:
_a = Accelerator()
assert PartialState._shared_state["_cpu"] is False
assert PartialState._shared_state["device"].type == "cuda"
with self.assertRaises(__magic_name__ ):
_a = Accelerator(cpu=__magic_name__ )
def __UpperCAmelCase ( self ) -> Optional[int]:
_a = Accelerator()
_a = GradientState()
assert state.num_steps == 1
_a = 4
assert state.num_steps == 4
assert state.sync_gradients is True
_a = False
assert state.sync_gradients is False
GradientState._reset_state()
def __UpperCAmelCase ( self ) -> List[str]:
_a = Accelerator()
_a , _a , _a , _a , _a = create_components()
(
(
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) ,
) = accelerator.prepare(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
self.assertTrue(prepared_model in accelerator._models )
self.assertTrue(prepared_optimizer in accelerator._optimizers )
self.assertTrue(prepared_scheduler in accelerator._schedulers )
self.assertTrue(prepared_train_dl in accelerator._dataloaders )
self.assertTrue(prepared_valid_dl in accelerator._dataloaders )
def __UpperCAmelCase ( self ) -> Tuple:
_a = Accelerator()
_a , _a , _a , _a , _a = create_components()
accelerator.prepare(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
accelerator.free_memory()
self.assertTrue(len(accelerator._models ) == 0 )
self.assertTrue(len(accelerator._optimizers ) == 0 )
self.assertTrue(len(accelerator._schedulers ) == 0 )
self.assertTrue(len(accelerator._dataloaders ) == 0 )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
PartialState._reset_state()
# Mock torch.cuda.set_device to avoid an exception as the device doesn't exist
def noop(*__magic_name__ , **__magic_name__ ):
pass
with patch('torch.cuda.set_device' , __magic_name__ ), patch_environment(ACCELERATE_TORCH_DEVICE='cuda:64' ):
_a = Accelerator()
self.assertEqual(str(accelerator.state.device ) , 'cuda:64' )
def __UpperCAmelCase ( self ) -> List[str]:
_a = Accelerator()
_a , _a , _a , _a , _a = create_components()
accelerator.prepare(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
_a = get_signature(__magic_name__ )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(__magic_name__ )
# make sure random weights don't match
load_random_weights(__magic_name__ )
self.assertTrue(abs(model_signature - get_signature(__magic_name__ ) ) > 1e-3 )
# make sure loaded weights match
accelerator.load_state(__magic_name__ )
self.assertTrue(abs(model_signature - get_signature(__magic_name__ ) ) < 1e-3 )
def __UpperCAmelCase ( self ) -> Tuple:
_a = Accelerator()
_a , _a , _a , _a , _a = create_components()
accelerator.prepare(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
_a = get_signature(__magic_name__ )
# saving hook
def save_config(__magic_name__ , __magic_name__ , __magic_name__ ):
_a = {'class_name': models[0].__class__.__name__}
with open(os.path.join(__magic_name__ , 'data.json' ) , 'w' ) as f:
json.dump(__magic_name__ , __magic_name__ )
# loading hook
def load_config(__magic_name__ , __magic_name__ ):
with open(os.path.join(__magic_name__ , 'data.json' ) , 'r' ) as f:
_a = json.load(__magic_name__ )
_a = config['class_name']
_a = accelerator.register_save_state_pre_hook(__magic_name__ )
_a = accelerator.register_load_state_pre_hook(__magic_name__ )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(__magic_name__ )
# make sure random weights don't match with hooks
load_random_weights(__magic_name__ )
self.assertTrue(abs(model_signature - get_signature(__magic_name__ ) ) > 1e-3 )
# random class name to verify correct one is loaded
_a = 'random'
# make sure loaded weights match with hooks
accelerator.load_state(__magic_name__ )
self.assertTrue(abs(model_signature - get_signature(__magic_name__ ) ) < 1e-3 )
# mode.class_name is loaded from config
self.assertTrue(model.class_name == model.__class__.__name__ )
# remove hooks
save_hook.remove()
load_hook.remove()
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(__magic_name__ )
# make sure random weights don't match with hooks removed
load_random_weights(__magic_name__ )
self.assertTrue(abs(model_signature - get_signature(__magic_name__ ) ) > 1e-3 )
# random class name to verify correct one is loaded
_a = 'random'
# make sure loaded weights match with hooks removed
accelerator.load_state(__magic_name__ )
self.assertTrue(abs(model_signature - get_signature(__magic_name__ ) ) < 1e-3 )
# mode.class_name is NOT loaded from config
self.assertTrue(model.class_name != model.__class__.__name__ )
def __UpperCAmelCase ( self ) -> List[str]:
_a = Accelerator()
_a , _a , _a , _a , _a = create_components()
_a = None
# This should work
_a , _a , _a , _a , _a , _a = accelerator.prepare(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
self.assertTrue(dummy_obj is None )
def __UpperCAmelCase ( self ) -> str:
_a = Accelerator()
_a , _a , _a , _a , _a = create_components()
_a = [1, 2, 3]
# This should work
_a , _a , _a , _a , _a , _a = accelerator.prepare(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
self.assertEqual(
getattr(__magic_name__ , '_is_accelerate_prepared' , __magic_name__ ) , __magic_name__ , 'Dummy object should have `_is_accelerate_prepared` set to `True`' , )
self.assertEqual(
getattr(__magic_name__ , '_is_accelerate_prepared' , __magic_name__ ) , __magic_name__ , 'Model is missing `_is_accelerator_prepared` or is set to `False`' , )
self.assertEqual(
getattr(__magic_name__ , '_is_accelerate_prepared' , __magic_name__ ) , __magic_name__ , 'Optimizer is missing `_is_accelerator_prepared` or is set to `False`' , )
self.assertEqual(
getattr(__magic_name__ , '_is_accelerate_prepared' , __magic_name__ ) , __magic_name__ , 'Scheduler is missing `_is_accelerator_prepared` or is set to `False`' , )
self.assertEqual(
getattr(__magic_name__ , '_is_accelerate_prepared' , __magic_name__ ) , __magic_name__ , 'Train Dataloader is missing `_is_accelerator_prepared` or is set to `False`' , )
self.assertEqual(
getattr(__magic_name__ , '_is_accelerate_prepared' , __magic_name__ ) , __magic_name__ , 'Valid Dataloader is missing `_is_accelerator_prepared` or is set to `False`' , )
@slow
@require_bnb
def __UpperCAmelCase ( self ) -> Optional[int]:
from transformers import AutoModelForCausalLM
_a = AutoModelForCausalLM.from_pretrained(
'EleutherAI/gpt-neo-125m' , load_in_abit=__magic_name__ , device_map={'': 0} , )
_a = Accelerator()
# This should work
_a = accelerator.prepare(__magic_name__ )
@slow
@require_bnb
def __UpperCAmelCase ( self ) -> Optional[int]:
from transformers import AutoModelForCausalLM
_a = Accelerator()
with init_empty_weights():
_a = AutoModelForCausalLM.from_pretrained(
'EleutherAI/gpt-neo-125m' , )
model.tie_weights()
_a = infer_auto_device_map(__magic_name__ )
_a = 'cpu'
_a = AutoModelForCausalLM.from_pretrained(
'EleutherAI/gpt-neo-125m' , device_map=__magic_name__ , load_in_abit=__magic_name__ , llm_inta_enable_fpaa_cpu_offload=__magic_name__ )
# This should not work and get value error
with self.assertRaises(__magic_name__ ):
_a = accelerator.prepare(__magic_name__ )
@slow
@require_bnb
@require_multi_gpu
def __UpperCAmelCase ( self ) -> List[Any]:
from transformers import AutoModelForCausalLM
_a = {'distributed_type': DistributedType.MULTI_GPU}
with init_empty_weights():
_a = AutoModelForCausalLM.from_pretrained(
'EleutherAI/gpt-neo-125m' , )
model.tie_weights()
_a = infer_auto_device_map(__magic_name__ )
_a = 1
_a = AutoModelForCausalLM.from_pretrained(
'EleutherAI/gpt-neo-125m' , load_in_abit=__magic_name__ , device_map=__magic_name__ , )
_a = Accelerator()
# This should not work and get value error
with self.assertRaises(__magic_name__ ):
_a = accelerator.prepare(__magic_name__ )
PartialState._reset_state()
@slow
@require_bnb
@require_multi_gpu
def __UpperCAmelCase ( self ) -> Optional[int]:
from transformers import AutoModelForCausalLM
with init_empty_weights():
_a = AutoModelForCausalLM.from_pretrained(
'EleutherAI/gpt-neo-125m' , )
_a = infer_auto_device_map(__magic_name__ )
_a = 1
_a = AutoModelForCausalLM.from_pretrained(
'EleutherAI/gpt-neo-125m' , load_in_abit=__magic_name__ , device_map=__magic_name__ , )
_a = Accelerator()
# This should work
_a = accelerator.prepare(__magic_name__ )
@require_cuda
def __UpperCAmelCase ( self ) -> List[Any]:
_a = torch.nn.Linear(10 , 10 )
_a = torch.optim.SGD(model.parameters() , lr=0.0_1 )
_a = Accelerator(cpu=__magic_name__ )
_a = accelerator.prepare(__magic_name__ )
| 710 |
'''simple docstring'''
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class a ( _SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = """EncodecFeatureExtractor"""
_lowerCAmelCase = ("""T5Tokenizer""", """T5TokenizerFast""")
def __init__( self , __magic_name__ , __magic_name__ ) -> Optional[Any]:
super().__init__(__magic_name__ , __magic_name__ )
_a = self.feature_extractor
_a = False
def __UpperCAmelCase ( self , __magic_name__=None , __magic_name__=None , __magic_name__=True ) -> Tuple:
return self.tokenizer.get_decoder_prompt_ids(task=__magic_name__ , language=__magic_name__ , no_timestamps=__magic_name__ )
def __call__( self , *__magic_name__ , **__magic_name__ ) -> List[str]:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*__magic_name__ , **__magic_name__ )
_a = kwargs.pop('audio' , __magic_name__ )
_a = kwargs.pop('sampling_rate' , __magic_name__ )
_a = kwargs.pop('text' , __magic_name__ )
if len(__magic_name__ ) > 0:
_a = args[0]
_a = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if text is not None:
_a = self.tokenizer(__magic_name__ , **__magic_name__ )
if audio is not None:
_a = self.feature_extractor(__magic_name__ , *__magic_name__ , sampling_rate=__magic_name__ , **__magic_name__ )
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
_a = audio_inputs['input_values']
if "padding_mask" in audio_inputs:
_a = audio_inputs['padding_mask']
return inputs
def __UpperCAmelCase ( self , *__magic_name__ , **__magic_name__ ) -> Tuple:
_a = kwargs.pop('audio' , __magic_name__ )
_a = kwargs.pop('padding_mask' , __magic_name__ )
if len(__magic_name__ ) > 0:
_a = args[0]
_a = args[1:]
if audio_values is not None:
return self._decode_audio(__magic_name__ , padding_mask=__magic_name__ )
else:
return self.tokenizer.batch_decode(*__magic_name__ , **__magic_name__ )
def __UpperCAmelCase ( self , *__magic_name__ , **__magic_name__ ) -> List[Any]:
return self.tokenizer.decode(*__magic_name__ , **__magic_name__ )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ = None ) -> List[np.ndarray]:
_a = to_numpy(__magic_name__ )
_a , _a , _a = audio_values.shape
if padding_mask is None:
return list(__magic_name__ )
_a = to_numpy(__magic_name__ )
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
_a = seq_len - padding_mask.shape[-1]
_a = 1 - self.feature_extractor.padding_value
_a = np.pad(__magic_name__ , ((0, 0), (0, difference)) , 'constant' , constant_values=__magic_name__ )
_a = audio_values.tolist()
for i in range(__magic_name__ ):
_a = np.asarray(audio_values[i] )[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
_a = sliced_audio.reshape(__magic_name__ , -1 )
return audio_values
| 532 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
"""facebook/xlm-roberta-xl""": """https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json""",
"""facebook/xlm-roberta-xxl""": """https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json""",
# See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl
}
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = '''xlm-roberta-xl'''
def __init__( self : Union[str, Any] , _UpperCAmelCase : Dict=250880 , _UpperCAmelCase : List[str]=2560 , _UpperCAmelCase : Any=36 , _UpperCAmelCase : Dict=32 , _UpperCAmelCase : Optional[int]=10240 , _UpperCAmelCase : List[str]="gelu" , _UpperCAmelCase : Optional[Any]=0.1 , _UpperCAmelCase : Optional[Any]=0.1 , _UpperCAmelCase : Tuple=514 , _UpperCAmelCase : str=1 , _UpperCAmelCase : Union[str, Any]=0.02 , _UpperCAmelCase : Tuple=1e-05 , _UpperCAmelCase : str=1 , _UpperCAmelCase : Any=0 , _UpperCAmelCase : Optional[int]=2 , _UpperCAmelCase : Dict="absolute" , _UpperCAmelCase : List[Any]=True , _UpperCAmelCase : Tuple=None , **_UpperCAmelCase : List[Any] , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = type_vocab_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = position_embedding_type
UpperCAmelCase_ = use_cache
UpperCAmelCase_ = classifier_dropout
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def lowercase__ ( self : int ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
UpperCAmelCase_ = {0: "batch", 1: "choice", 2: "sequence"}
else:
UpperCAmelCase_ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 82 |
"""simple docstring"""
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
lowerCamelCase = 6_378_137.0
lowerCamelCase = 6_356_752.314_245
lowerCamelCase = 6_378_137
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = (AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
UpperCAmelCase_ = atan((1 - flattening) * tan(radians(lowerCAmelCase__ ) ) )
UpperCAmelCase_ = atan((1 - flattening) * tan(radians(lowerCAmelCase__ ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
UpperCAmelCase_ = haversine_distance(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
UpperCAmelCase_ = (b_lata + b_lata) / 2
UpperCAmelCase_ = (b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
UpperCAmelCase_ = (sin(lowerCAmelCase__ ) ** 2) * (cos(lowerCAmelCase__ ) ** 2)
UpperCAmelCase_ = cos(sigma / 2 ) ** 2
UpperCAmelCase_ = (sigma - sin(lowerCAmelCase__ )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
UpperCAmelCase_ = (cos(lowerCAmelCase__ ) ** 2) * (sin(lowerCAmelCase__ ) ** 2)
UpperCAmelCase_ = sin(sigma / 2 ) ** 2
UpperCAmelCase_ = (sigma + sin(lowerCAmelCase__ )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 82 | 1 |
from pathlib import Path
import fire
from tqdm import tqdm
def _A ( __snake_case :str="ro" , __snake_case :Optional[int]="en" , __snake_case :Optional[int]="wmt16" , __snake_case :List[Any]=None ) -> None:
"""simple docstring"""
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError("run pip install datasets" )
__SCREAMING_SNAKE_CASE = f'''{src_lang}-{tgt_lang}'''
print(f'''Converting {dataset}-{pair}''' )
__SCREAMING_SNAKE_CASE = datasets.load_dataset(__snake_case , __snake_case )
if save_dir is None:
__SCREAMING_SNAKE_CASE = f'''{dataset}-{pair}'''
__SCREAMING_SNAKE_CASE = Path(__snake_case )
save_dir.mkdir(exist_ok=__snake_case )
for split in ds.keys():
print(f'''Splitting {split} with {ds[split].num_rows} records''' )
# to save to val.source, val.target like summary datasets
__SCREAMING_SNAKE_CASE = "val" if split == "validation" else split
__SCREAMING_SNAKE_CASE = save_dir.joinpath(f'''{fn}.source''' )
__SCREAMING_SNAKE_CASE = save_dir.joinpath(f'''{fn}.target''' )
__SCREAMING_SNAKE_CASE = src_path.open("w+" )
__SCREAMING_SNAKE_CASE = tgt_path.open("w+" )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
__SCREAMING_SNAKE_CASE = x["translation"]
src_fp.write(ex[src_lang] + "\n" )
tgt_fp.write(ex[tgt_lang] + "\n" )
print(f'''Saved {dataset} dataset to {save_dir}''' )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset)
| 214 |
from __future__ import annotations
import copy
import tempfile
import unittest
from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tensorflow_probability,
require_tf,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
if is_tf_available():
from transformers import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFFunnelBaseModel,
TFFunnelModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
TFTapasForQuestionAnswering,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE__ ="""new-model"""
if is_tf_available():
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE__ =NewModelConfig
@require_tf
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def __lowerCAmelCase ( self ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = "bert-base-cased"
__SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(_a )
self.assertIsNotNone(_a )
self.assertIsInstance(_a, _a )
__SCREAMING_SNAKE_CASE = TFAutoModel.from_pretrained(_a )
self.assertIsNotNone(_a )
self.assertIsInstance(_a, _a )
@slow
def __lowerCAmelCase ( self ) -> Any:
__SCREAMING_SNAKE_CASE = "bert-base-cased"
__SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(_a )
self.assertIsNotNone(_a )
self.assertIsInstance(_a, _a )
__SCREAMING_SNAKE_CASE = TFAutoModelForPreTraining.from_pretrained(_a )
self.assertIsNotNone(_a )
self.assertIsInstance(_a, _a )
@slow
def __lowerCAmelCase ( self ) -> Dict:
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(_a )
self.assertIsNotNone(_a )
self.assertIsInstance(_a, _a )
__SCREAMING_SNAKE_CASE = TFAutoModelForCausalLM.from_pretrained(_a )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = TFAutoModelForCausalLM.from_pretrained(_a, output_loading_info=_a )
self.assertIsNotNone(_a )
self.assertIsInstance(_a, _a )
@slow
def __lowerCAmelCase ( self ) -> List[str]:
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(_a )
self.assertIsNotNone(_a )
self.assertIsInstance(_a, _a )
__SCREAMING_SNAKE_CASE = TFAutoModelWithLMHead.from_pretrained(_a )
self.assertIsNotNone(_a )
self.assertIsInstance(_a, _a )
@slow
def __lowerCAmelCase ( self ) -> int:
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(_a )
self.assertIsNotNone(_a )
self.assertIsInstance(_a, _a )
__SCREAMING_SNAKE_CASE = TFAutoModelForMaskedLM.from_pretrained(_a )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = TFAutoModelForMaskedLM.from_pretrained(_a, output_loading_info=_a )
self.assertIsNotNone(_a )
self.assertIsInstance(_a, _a )
@slow
def __lowerCAmelCase ( self ) -> Optional[int]:
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(_a )
self.assertIsNotNone(_a )
self.assertIsInstance(_a, _a )
__SCREAMING_SNAKE_CASE = TFAutoModelForSeqaSeqLM.from_pretrained(_a )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = TFAutoModelForSeqaSeqLM.from_pretrained(_a, output_loading_info=_a )
self.assertIsNotNone(_a )
self.assertIsInstance(_a, _a )
@slow
def __lowerCAmelCase ( self ) -> Any:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(_a )
self.assertIsNotNone(_a )
self.assertIsInstance(_a, _a )
__SCREAMING_SNAKE_CASE = TFAutoModelForSequenceClassification.from_pretrained(_a )
self.assertIsNotNone(_a )
self.assertIsInstance(_a, _a )
@slow
def __lowerCAmelCase ( self ) -> Optional[Any]:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(_a )
self.assertIsNotNone(_a )
self.assertIsInstance(_a, _a )
__SCREAMING_SNAKE_CASE = TFAutoModelForQuestionAnswering.from_pretrained(_a )
self.assertIsNotNone(_a )
self.assertIsInstance(_a, _a )
@slow
@require_tensorflow_probability
def __lowerCAmelCase ( self ) -> Dict:
for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
__SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(_a )
self.assertIsNotNone(_a )
self.assertIsInstance(_a, _a )
__SCREAMING_SNAKE_CASE = TFAutoModelForTableQuestionAnswering.from_pretrained(_a )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = TFAutoModelForTableQuestionAnswering.from_pretrained(
_a, output_loading_info=_a )
self.assertIsNotNone(_a )
self.assertIsInstance(_a, _a )
def __lowerCAmelCase ( self ) -> int:
__SCREAMING_SNAKE_CASE = TFAutoModelWithLMHead.from_pretrained(_a )
self.assertIsInstance(_a, _a )
self.assertEqual(model.num_parameters(), 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=_a ), 1_44_10 )
def __lowerCAmelCase ( self ) -> Tuple:
__SCREAMING_SNAKE_CASE = TFAutoModelWithLMHead.from_pretrained(_a )
self.assertIsInstance(_a, _a )
self.assertEqual(model.num_parameters(), 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=_a ), 1_44_10 )
def __lowerCAmelCase ( self ) -> List[Any]:
# For the auto model mapping, FunnelConfig has two models: FunnelModel and FunnelBaseModel
__SCREAMING_SNAKE_CASE = TFAutoModel.from_pretrained("sgugger/funnel-random-tiny" )
self.assertIsInstance(_a, _a )
__SCREAMING_SNAKE_CASE = copy.deepcopy(model.config )
__SCREAMING_SNAKE_CASE = ["FunnelBaseModel"]
__SCREAMING_SNAKE_CASE = TFAutoModel.from_config(_a )
self.assertIsInstance(_a, _a )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(_a )
__SCREAMING_SNAKE_CASE = TFAutoModel.from_pretrained(_a )
self.assertIsInstance(_a, _a )
def __lowerCAmelCase ( self ) -> str:
try:
AutoConfig.register("new-model", _a )
__SCREAMING_SNAKE_CASE = [
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
]
for auto_class in auto_classes:
with self.subTest(auto_class.__name__ ):
# Wrong config class will raise an error
with self.assertRaises(_a ):
auto_class.register(_a, _a )
auto_class.register(_a, _a )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_a ):
auto_class.register(_a, _a )
# Now that the config is registered, it can be used as any other config with the auto-API
__SCREAMING_SNAKE_CASE = BertModelTester(self ).get_config()
__SCREAMING_SNAKE_CASE = NewModelConfig(**tiny_config.to_dict() )
__SCREAMING_SNAKE_CASE = auto_class.from_config(_a )
self.assertIsInstance(_a, _a )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(_a )
__SCREAMING_SNAKE_CASE = auto_class.from_pretrained(_a )
self.assertIsInstance(_a, _a )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
for mapping in (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
):
if NewModelConfig in mapping._extra_content:
del mapping._extra_content[NewModelConfig]
def __lowerCAmelCase ( self ) -> List[str]:
with self.assertRaisesRegex(
_a, "bert-base is not a local folder and is not a valid model identifier" ):
__SCREAMING_SNAKE_CASE = TFAutoModel.from_pretrained("bert-base" )
def __lowerCAmelCase ( self ) -> Tuple:
with self.assertRaisesRegex(
_a, r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
__SCREAMING_SNAKE_CASE = TFAutoModel.from_pretrained(_a, revision="aaaaaa" )
def __lowerCAmelCase ( self ) -> Dict:
with self.assertRaisesRegex(
_a, "hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin", ):
__SCREAMING_SNAKE_CASE = TFAutoModel.from_pretrained("hf-internal-testing/config-no-model" )
def __lowerCAmelCase ( self ) -> Optional[int]:
with self.assertRaisesRegex(_a, "Use `from_pt=True` to load this model" ):
__SCREAMING_SNAKE_CASE = TFAutoModel.from_pretrained("hf-internal-testing/tiny-bert-pt-only" )
def __lowerCAmelCase ( self ) -> List[Any]:
# Make sure we have cached the model.
__SCREAMING_SNAKE_CASE = TFAutoModel.from_pretrained("hf-internal-testing/tiny-random-bert" )
with RequestCounter() as counter:
__SCREAMING_SNAKE_CASE = TFAutoModel.from_pretrained("hf-internal-testing/tiny-random-bert" )
self.assertEqual(counter.get_request_count, 0 )
self.assertEqual(counter.head_request_count, 1 )
self.assertEqual(counter.other_request_count, 0 )
# With a sharded checkpoint
__SCREAMING_SNAKE_CASE = TFAutoModel.from_pretrained("ArthurZ/tiny-random-bert-sharded" )
with RequestCounter() as counter:
__SCREAMING_SNAKE_CASE = TFAutoModel.from_pretrained("ArthurZ/tiny-random-bert-sharded" )
self.assertEqual(counter.get_request_count, 0 )
self.assertEqual(counter.head_request_count, 1 )
self.assertEqual(counter.other_request_count, 0 )
| 214 | 1 |
'''simple docstring'''
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 215 |
'''simple docstring'''
def __lowerCamelCase ( __snake_case : Dict, __snake_case : Union[str, Any], __snake_case : Optional[Any], __snake_case : int, __snake_case : int, __snake_case : Tuple ) -> Dict:
"""simple docstring"""
if index == r:
for j in range(__snake_case ):
print(data[j], end=""" """ )
print(""" """ )
return
# When no more elements are there to put in data[]
if i >= n:
return
# current is included, put next at next location
A__ : Optional[int] =arr[i]
combination_util(__snake_case, __snake_case, __snake_case, index + 1, __snake_case, i + 1 )
# current is excluded, replace it with
# next (Note that i+1 is passed, but
# index is not changed)
combination_util(__snake_case, __snake_case, __snake_case, __snake_case, __snake_case, i + 1 )
# The main function that prints all combinations
# of size r in arr[] of size n. This function
# mainly uses combinationUtil()
def __lowerCamelCase ( __snake_case : Any, __snake_case : Dict, __snake_case : str ) -> str:
"""simple docstring"""
A__ : Union[str, Any] =[0] * r
# Print all combination using temporary array 'data[]'
combination_util(__snake_case, __snake_case, __snake_case, 0, __snake_case, 0 )
if __name__ == "__main__":
# Driver code to check the function above
__snake_case : List[Any] = [10, 20, 30, 40, 50]
print_combination(arr, len(arr), 3)
# This code is contributed by Ambuj sahu
| 215 | 1 |
"""simple docstring"""
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
__SCREAMING_SNAKE_CASE = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
__SCREAMING_SNAKE_CASE = direct_transformers_import(PATH_TO_TRANSFORMERS)
__SCREAMING_SNAKE_CASE = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
__SCREAMING_SNAKE_CASE = re.compile(r'\[(.+?)\]\((https://huggingface\.co/.+?)\)')
__SCREAMING_SNAKE_CASE = {
'DecisionTransformerConfig',
'EncoderDecoderConfig',
'MusicgenConfig',
'RagConfig',
'SpeechEncoderDecoderConfig',
'TimmBackboneConfig',
'VisionEncoderDecoderConfig',
'VisionTextDualEncoderConfig',
'LlamaConfig',
}
def A_ ( __lowercase ):
UpperCamelCase_ : List[str] =None
# source code of `config_class`
UpperCamelCase_ : Any =inspect.getsource(__lowercase )
UpperCamelCase_ : Dict =_re_checkpoint.findall(__lowercase )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith('/' ):
UpperCamelCase_ : Any =ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
UpperCamelCase_ : List[str] =F'''https://huggingface.co/{ckpt_name}'''
if ckpt_link == ckpt_link_from_name:
UpperCamelCase_ : Optional[int] =ckpt_name
break
return checkpoint
def A_ ( ):
UpperCamelCase_ : List[Any] =[]
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
UpperCamelCase_ : Tuple =get_checkpoint_from_config_class(__lowercase )
UpperCamelCase_ : str =config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(__lowercase )
if len(__lowercase ) > 0:
UpperCamelCase_ : str ='\n'.join(sorted(__lowercase ) )
raise ValueError(F'''The following configurations don\'t contain any valid checkpoint:\n{message}''' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 395 |
"""simple docstring"""
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils import logging
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def A_ ( __lowercase , __lowercase , __lowercase , __lowercase=False ):
try:
import torch # noqa: F401
except ImportError:
logger.error(
'Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see'
' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'
' instructions.' )
raise
if not is_sharded:
UpperCamelCase_ : Dict =os.path.abspath(__lowercase )
logger.info(F'''Loading PyTorch weights from {pt_path}''' )
UpperCamelCase_ : List[Any] =torch.load(__lowercase , map_location='cpu' )
logger.info(F'''PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.''' )
UpperCamelCase_ : str =convert_pytorch_state_dict_to_flax(__lowercase , __lowercase )
else:
# model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files
UpperCamelCase_ : str =convert_pytorch_sharded_state_dict_to_flax(__lowercase , __lowercase )
return flax_state_dict
def A_ ( __lowercase , __lowercase , __lowercase , __lowercase , ):
def is_key_or_prefix_key_in_dict(__lowercase ) -> bool:
return len(set(__lowercase ) & {key, (model_prefix,) + key} ) > 0
# layer norm
UpperCamelCase_ : Any =pt_tuple_key[:-1] + ('scale',)
if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(__lowercase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer mean
UpperCamelCase_ : Union[str, Any] =pt_tuple_key[:-1] + ('mean',)
if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(__lowercase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer var
UpperCamelCase_ : Optional[int] =pt_tuple_key[:-1] + ('var',)
if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(__lowercase ):
return renamed_pt_tuple_key, pt_tensor
# embedding
UpperCamelCase_ : Union[str, Any] =pt_tuple_key[:-1] + ('embedding',)
if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(__lowercase ):
return renamed_pt_tuple_key, pt_tensor
# conv layer
UpperCamelCase_ : Tuple =pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(__lowercase ):
UpperCamelCase_ : List[Any] =pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
UpperCamelCase_ : Optional[Any] =pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(__lowercase ):
UpperCamelCase_ : Union[str, Any] =pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
UpperCamelCase_ : Any =pt_tuple_key[:-1] + ('weight',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
UpperCamelCase_ : Optional[int] =pt_tuple_key[:-1] + ('bias',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
UpperCamelCase_ : Dict =None
if pt_tuple_key[-3::2] == ("parametrizations", "original0"):
UpperCamelCase_ : int =pt_tuple_key[-2] + '_g'
elif pt_tuple_key[-3::2] == ("parametrizations", "original1"):
UpperCamelCase_ : List[str] =pt_tuple_key[-2] + '_v'
if name is not None:
UpperCamelCase_ : Optional[int] =pt_tuple_key[:-3] + (name,)
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def A_ ( __lowercase , __lowercase ):
# convert pytorch tensor to numpy
UpperCamelCase_ : Optional[Any] ={k: v.numpy() for k, v in pt_state_dict.items()}
UpperCamelCase_ : Any =flax_model.base_model_prefix
# use params dict if the model contains batch norm layers
if "params" in flax_model.params:
UpperCamelCase_ : Dict =flax_model.params['params']
else:
UpperCamelCase_ : Union[str, Any] =flax_model.params
UpperCamelCase_ : Optional[Any] =flatten_dict(__lowercase )
# add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
UpperCamelCase_ : Any =flatten_dict(flax_model.params['batch_stats'] )
random_flax_state_dict.update(__lowercase )
UpperCamelCase_ : Optional[Any] ={}
UpperCamelCase_ : str =(model_prefix not in flax_model_params) and (
model_prefix in {k.split('.' )[0] for k in pt_state_dict.keys()}
)
UpperCamelCase_ : Optional[Any] =(model_prefix in flax_model_params) and (
model_prefix not in {k.split('.' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
UpperCamelCase_ : str =tuple(pt_key.split('.' ) )
# remove base model prefix if necessary
UpperCamelCase_ : Dict =pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
UpperCamelCase_ : str =pt_tuple_key[1:]
# Correctly rename weight parameters
UpperCamelCase_ , UpperCamelCase_ : Tuple =rename_key_and_reshape_tensor(
__lowercase , __lowercase , __lowercase , __lowercase )
# add model prefix if necessary
UpperCamelCase_ : str =(model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
UpperCamelCase_ : List[Any] =(model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
F'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1] or "var" in flax_key[-1]:
UpperCamelCase_ : str =jnp.asarray(__lowercase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(__lowercase , __lowercase )
continue
# also add unexpected weight so that warning is thrown
UpperCamelCase_ : str =jnp.asarray(__lowercase )
else:
# also add unexpected weight so that warning is thrown
UpperCamelCase_ : str =jnp.asarray(__lowercase )
return unflatten_dict(__lowercase )
def A_ ( __lowercase , __lowercase ):
import torch
# Load the index
UpperCamelCase_ : List[str] ={}
for shard_file in shard_filenames:
# load using msgpack utils
UpperCamelCase_ : Dict =torch.load(__lowercase )
UpperCamelCase_ : Tuple ={k: v.numpy() for k, v in pt_state_dict.items()}
UpperCamelCase_ : Optional[int] =flax_model.base_model_prefix
# use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
UpperCamelCase_ : Union[str, Any] =flax_model.params['params']
UpperCamelCase_ : str =flatten_dict(__lowercase )
random_flax_state_dict.update(flatten_dict(flax_model.params['batch_stats'] ) )
else:
UpperCamelCase_ : Optional[Any] =flax_model.params
UpperCamelCase_ : Union[str, Any] =flatten_dict(__lowercase )
UpperCamelCase_ : List[str] =(model_prefix not in flax_model_params) and (
model_prefix in {k.split('.' )[0] for k in pt_state_dict.keys()}
)
UpperCamelCase_ : Any =(model_prefix in flax_model_params) and (
model_prefix not in {k.split('.' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
UpperCamelCase_ : str =tuple(pt_key.split('.' ) )
# remove base model prefix if necessary
UpperCamelCase_ : Optional[Any] =pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
UpperCamelCase_ : Optional[int] =pt_tuple_key[1:]
# Correctly rename weight parameters
UpperCamelCase_ , UpperCamelCase_ : Tuple =rename_key_and_reshape_tensor(
__lowercase , __lowercase , __lowercase , __lowercase )
# add model prefix if necessary
UpperCamelCase_ : Tuple =(model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
UpperCamelCase_ : int =(model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
F'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1]:
UpperCamelCase_ : Union[str, Any] =jnp.asarray(__lowercase )
continue
if "var" in flax_key[-1]:
UpperCamelCase_ : Optional[int] =jnp.asarray(__lowercase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(__lowercase , __lowercase )
continue
# also add unexpected weight so that warning is thrown
UpperCamelCase_ : Dict =jnp.asarray(__lowercase )
else:
# also add unexpected weight so that warning is thrown
UpperCamelCase_ : int =jnp.asarray(__lowercase )
return unflatten_dict(__lowercase )
def A_ ( __lowercase , __lowercase ):
UpperCamelCase_ : str =os.path.abspath(__lowercase )
logger.info(F'''Loading Flax weights from {flax_checkpoint_path}''' )
# import correct flax class
UpperCamelCase_ : Any =getattr(__lowercase , 'Flax' + model.__class__.__name__ )
# load flax weight dict
with open(__lowercase , 'rb' ) as state_f:
try:
UpperCamelCase_ : Any =from_bytes(__lowercase , state_f.read() )
except UnpicklingError:
raise EnvironmentError(F'''Unable to convert {flax_checkpoint_path} to Flax deserializable object. ''' )
return load_flax_weights_in_pytorch_model(__lowercase , __lowercase )
def A_ ( __lowercase , __lowercase ):
try:
import torch # noqa: F401
except ImportError:
logger.error(
'Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see'
' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'
' instructions.' )
raise
# check if we have bf16 weights
UpperCamelCase_ : List[Any] =flatten_dict(jax.tree_util.tree_map(lambda __lowercase : x.dtype == jnp.bfloataa , __lowercase ) ).values()
if any(__lowercase ):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
'Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` '
'before loading those in PyTorch model.' )
UpperCamelCase_ : List[Any] =jax.tree_util.tree_map(
lambda __lowercase : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , __lowercase )
UpperCamelCase_ : str =flatten_dict(__lowercase )
UpperCamelCase_ : Union[str, Any] =pt_model.state_dict()
UpperCamelCase_ : int =(pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in {k.split('.' )[0] for k in pt_model_dict.keys()}
)
UpperCamelCase_ : Optional[int] =(pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in {k.split('.' )[0] for k in pt_model_dict.keys()}
)
# keep track of unexpected & missing keys
UpperCamelCase_ : Tuple =[]
UpperCamelCase_ : Tuple =set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
UpperCamelCase_ : str =flax_key_tuple[0] == pt_model.base_model_prefix
UpperCamelCase_ : Tuple ='.'.join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
UpperCamelCase_ : Union[str, Any] =flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
UpperCamelCase_ : int =(pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(__lowercase ) not in pt_model_dict:
# conv layer
UpperCamelCase_ : Union[str, Any] =flax_key_tuple[:-1] + ('weight',)
UpperCamelCase_ : Optional[int] =jnp.transpose(__lowercase , (3, 2, 0, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(__lowercase ) not in pt_model_dict:
# linear layer
UpperCamelCase_ : str =flax_key_tuple[:-1] + ('weight',)
UpperCamelCase_ : List[Any] =flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
UpperCamelCase_ : Optional[Any] =flax_key_tuple[:-1] + ('weight',)
# adding batch stats from flax batch norm to pt
elif "mean" in flax_key_tuple[-1]:
UpperCamelCase_ : int =flax_key_tuple[:-1] + ('running_mean',)
elif "var" in flax_key_tuple[-1]:
UpperCamelCase_ : int =flax_key_tuple[:-1] + ('running_var',)
if "batch_stats" in flax_state:
UpperCamelCase_ : Optional[Any] ='.'.join(flax_key_tuple[1:] ) # Remove the params/batch_stats header
else:
UpperCamelCase_ : Any ='.'.join(__lowercase )
# We also need to look at `pt_model_dict` and see if there are keys requiring further transformation.
UpperCamelCase_ : Optional[Any] ={}
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
for key in pt_model_dict:
UpperCamelCase_ : List[Any] =key.split('.' )
UpperCamelCase_ : Optional[Any] =None
if key_components[-3::2] == ["parametrizations", "original0"]:
UpperCamelCase_ : Optional[int] =key_components[-2] + '_g'
elif key_components[-3::2] == ["parametrizations", "original1"]:
UpperCamelCase_ : Optional[int] =key_components[-2] + '_v'
if name is not None:
UpperCamelCase_ : str =key_components[:-3] + [name]
UpperCamelCase_ : int ='.'.join(__lowercase )
UpperCamelCase_ : int =key
if flax_key in special_pt_names:
UpperCamelCase_ : str =special_pt_names[flax_key]
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F'''Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '''
F'''to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
else:
# add weight to pytorch dict
UpperCamelCase_ : Any =np.asarray(__lowercase ) if not isinstance(__lowercase , np.ndarray ) else flax_tensor
UpperCamelCase_ : Any =torch.from_numpy(__lowercase )
# remove from missing keys
missing_keys.remove(__lowercase )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(__lowercase )
pt_model.load_state_dict(__lowercase )
# re-transform missing_keys to list
UpperCamelCase_ : Tuple =list(__lowercase )
if len(__lowercase ) > 0:
logger.warning(
'Some weights of the Flax model were not used when initializing the PyTorch model'
F''' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'''
F''' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'''
' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This'
F''' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'''
' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a'
' FlaxBertForSequenceClassification model).' )
else:
logger.warning(F'''All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n''' )
if len(__lowercase ) > 0:
logger.warning(
F'''Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'''
F''' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'''
' use it for predictions and inference.' )
else:
logger.warning(
F'''All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n'''
'If your task is similar to the task the model of the checkpoint was trained on, '
F'''you can already use {pt_model.__class__.__name__} for predictions without further training.''' )
return pt_model
| 395 | 1 |
def lowerCamelCase__ ( _a):
if n == 1 or not isinstance(_a , _a):
return 0
elif n == 2:
return 1
else:
SCREAMING_SNAKE_CASE : Optional[int] = [0, 1]
for i in range(2 , n + 1):
sequence.append(sequence[i - 1] + sequence[i - 2])
return sequence[n]
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE : str = 0
SCREAMING_SNAKE_CASE : List[str] = 2
while digits < n:
index += 1
SCREAMING_SNAKE_CASE : Dict = len(str(fibonacci(_a)))
return index
def lowerCamelCase__ ( _a = 1000):
return fibonacci_digits_index(_a)
if __name__ == "__main__":
print(solution(int(str(input()).strip()))) | 25 |
'''simple docstring'''
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
__lowerCAmelCase =numpy.array([0, 0])
__lowerCAmelCase =numpy.array([0.5, 0.866_0254])
__lowerCAmelCase =numpy.array([1, 0])
__lowerCAmelCase =[VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def a ( _UpperCAmelCase , _UpperCAmelCase ) -> list[numpy.ndarray]:
"""simple docstring"""
a_ = initial_vectors
for _ in range(_UpperCAmelCase ):
a_ = iteration_step(_UpperCAmelCase )
return vectors
def a ( _UpperCAmelCase ) -> list[numpy.ndarray]:
"""simple docstring"""
a_ = []
for i, start_vector in enumerate(vectors[:-1] ):
a_ = vectors[i + 1]
new_vectors.append(_UpperCAmelCase )
a_ = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 6_0 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def a ( _UpperCAmelCase , _UpperCAmelCase ) -> numpy.ndarray:
"""simple docstring"""
a_ = numpy.radians(_UpperCAmelCase )
a_ , a_ = numpy.cos(_UpperCAmelCase ), numpy.sin(_UpperCAmelCase )
a_ = numpy.array(((c, -s), (s, c)) )
return numpy.dot(_UpperCAmelCase , _UpperCAmelCase )
def a ( _UpperCAmelCase ) -> None:
"""simple docstring"""
a_ = plt.gca()
axes.set_aspect('equal' )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
a_ , a_ = zip(*_UpperCAmelCase )
plt.plot(_UpperCAmelCase , _UpperCAmelCase )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCAmelCase =iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 697 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
"microsoft/markuplm-base": "https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json",
"microsoft/markuplm-large": "https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json",
}
class __SCREAMING_SNAKE_CASE ( _a ):
snake_case : Union[str, Any] = """markuplm"""
def __init__( self , __lowerCAmelCase=30522 , __lowerCAmelCase=768 , __lowerCAmelCase=12 , __lowerCAmelCase=12 , __lowerCAmelCase=3072 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=512 , __lowerCAmelCase=2 , __lowerCAmelCase=0.02 , __lowerCAmelCase=1E-12 , __lowerCAmelCase=0 , __lowerCAmelCase=0 , __lowerCAmelCase=2 , __lowerCAmelCase=256 , __lowerCAmelCase=1024 , __lowerCAmelCase=216 , __lowerCAmelCase=1001 , __lowerCAmelCase=32 , __lowerCAmelCase=50 , __lowerCAmelCase="absolute" , __lowerCAmelCase=True , __lowerCAmelCase=None , **__lowerCAmelCase , ):
super().__init__(
pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase , )
UpperCamelCase__ = vocab_size
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = hidden_act
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = type_vocab_size
UpperCamelCase__ = initializer_range
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = position_embedding_type
UpperCamelCase__ = use_cache
UpperCamelCase__ = classifier_dropout
# additional properties
UpperCamelCase__ = max_depth
UpperCamelCase__ = max_xpath_tag_unit_embeddings
UpperCamelCase__ = max_xpath_subs_unit_embeddings
UpperCamelCase__ = tag_pad_id
UpperCamelCase__ = subs_pad_id
UpperCamelCase__ = xpath_unit_hidden_size
| 548 |
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
snake_case : Any = MODEL_FOR_MASKED_LM_MAPPING
snake_case : Dict = TF_MODEL_FOR_MASKED_LM_MAPPING
def _lowerCamelCase ( self ):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def _lowerCamelCase ( self ):
UpperCamelCase__ = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , top_k=2 , framework="""tf""" )
UpperCamelCase__ = unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(__lowerCAmelCase , decimals=6 ) , [
{"""sequence""": """My name is grouped""", """score""": 2.1E-05, """token""": 38015, """token_str""": """ grouped"""},
{"""sequence""": """My name is accuser""", """score""": 2.1E-05, """token""": 25506, """token_str""": """ accuser"""},
] , )
UpperCamelCase__ = unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(__lowerCAmelCase , decimals=6 ) , [
{
"""sequence""": """The largest city in France is grouped""",
"""score""": 2.1E-05,
"""token""": 38015,
"""token_str""": """ grouped""",
},
{
"""sequence""": """The largest city in France is accuser""",
"""score""": 2.1E-05,
"""token""": 25506,
"""token_str""": """ accuser""",
},
] , )
UpperCamelCase__ = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 )
self.assertEqual(
nested_simplify(__lowerCAmelCase , decimals=6 ) , [
{"""sequence""": """My name is Clara""", """score""": 2E-05, """token""": 13606, """token_str""": """ Clara"""},
{"""sequence""": """My name is Patrick""", """score""": 2E-05, """token""": 3499, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Te""", """score""": 1.9E-05, """token""": 2941, """token_str""": """ Te"""},
] , )
@require_torch
def _lowerCamelCase ( self ):
UpperCamelCase__ = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , top_k=2 , framework="""pt""" )
UpperCamelCase__ = unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(__lowerCAmelCase , decimals=6 ) , [
{"""sequence""": """My name is Maul""", """score""": 2.2E-05, """token""": 35676, """token_str""": """ Maul"""},
{"""sequence""": """My name isELS""", """score""": 2.2E-05, """token""": 16416, """token_str""": """ELS"""},
] , )
UpperCamelCase__ = unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(__lowerCAmelCase , decimals=6 ) , [
{
"""sequence""": """The largest city in France is Maul""",
"""score""": 2.2E-05,
"""token""": 35676,
"""token_str""": """ Maul""",
},
{"""sequence""": """The largest city in France isELS""", """score""": 2.2E-05, """token""": 16416, """token_str""": """ELS"""},
] , )
UpperCamelCase__ = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 )
self.assertEqual(
nested_simplify(__lowerCAmelCase , decimals=6 ) , [
{"""sequence""": """My name is Patrick""", """score""": 2.1E-05, """token""": 3499, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Te""", """score""": 2E-05, """token""": 2941, """token_str""": """ Te"""},
{"""sequence""": """My name is Clara""", """score""": 2E-05, """token""": 13606, """token_str""": """ Clara"""},
] , )
UpperCamelCase__ = unmasker("""My name is <mask> <mask>""" , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCAmelCase , decimals=6 ) , [
[
{
"""score""": 2.2E-05,
"""token""": 35676,
"""token_str""": """ Maul""",
"""sequence""": """<s>My name is Maul<mask></s>""",
},
{"""score""": 2.2E-05, """token""": 16416, """token_str""": """ELS""", """sequence""": """<s>My name isELS<mask></s>"""},
],
[
{
"""score""": 2.2E-05,
"""token""": 35676,
"""token_str""": """ Maul""",
"""sequence""": """<s>My name is<mask> Maul</s>""",
},
{"""score""": 2.2E-05, """token""": 16416, """token_str""": """ELS""", """sequence""": """<s>My name is<mask>ELS</s>"""},
],
] , )
@require_torch_gpu
def _lowerCamelCase ( self ):
UpperCamelCase__ = pipeline("""fill-mask""" , model="""hf-internal-testing/tiny-random-distilbert""" , device=0 , framework="""pt""" )
# convert model to fp16
pipe.model.half()
UpperCamelCase__ = pipe("""Paris is the [MASK] of France.""" )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
@slow
@require_torch
def _lowerCamelCase ( self ):
UpperCamelCase__ = pipeline(task="""fill-mask""" , model="""distilroberta-base""" , top_k=2 , framework="""pt""" )
self.run_large_test(__lowerCAmelCase )
@slow
@require_tf
def _lowerCamelCase ( self ):
UpperCamelCase__ = pipeline(task="""fill-mask""" , model="""distilroberta-base""" , top_k=2 , framework="""tf""" )
self.run_large_test(__lowerCAmelCase )
def _lowerCamelCase ( self , __lowerCAmelCase ):
UpperCamelCase__ = unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [
{"""sequence""": """My name is John""", """score""": 0.008, """token""": 610, """token_str""": """ John"""},
{"""sequence""": """My name is Chris""", """score""": 0.007, """token""": 1573, """token_str""": """ Chris"""},
] , )
UpperCamelCase__ = unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [
{
"""sequence""": """The largest city in France is Paris""",
"""score""": 0.251,
"""token""": 2201,
"""token_str""": """ Paris""",
},
{
"""sequence""": """The largest city in France is Lyon""",
"""score""": 0.214,
"""token""": 12790,
"""token_str""": """ Lyon""",
},
] , )
UpperCamelCase__ = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [
{"""sequence""": """My name is Patrick""", """score""": 0.005, """token""": 3499, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Clara""", """score""": 0.000, """token""": 13606, """token_str""": """ Clara"""},
{"""sequence""": """My name is Te""", """score""": 0.000, """token""": 2941, """token_str""": """ Te"""},
] , )
@require_torch
def _lowerCamelCase ( self ):
UpperCamelCase__ = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , framework="""pt""" )
UpperCamelCase__ = None
UpperCamelCase__ = None
self.run_pipeline_test(__lowerCAmelCase , [] )
@require_tf
def _lowerCamelCase ( self ):
UpperCamelCase__ = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , framework="""tf""" )
UpperCamelCase__ = None
UpperCamelCase__ = None
self.run_pipeline_test(__lowerCAmelCase , [] )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest("""The provided tokenizer has no mask token, (probably reformer or wav2vec2)""" )
UpperCamelCase__ = FillMaskPipeline(model=__lowerCAmelCase , tokenizer=__lowerCAmelCase )
UpperCamelCase__ = [
f"""This is another {tokenizer.mask_token} test""",
]
return fill_masker, examples
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ = fill_masker.tokenizer
UpperCamelCase__ = fill_masker.model
UpperCamelCase__ = fill_masker(
f"""This is a {tokenizer.mask_token}""" , )
self.assertEqual(
__lowerCAmelCase , [
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
] , )
UpperCamelCase__ = fill_masker([f"""This is a {tokenizer.mask_token}"""] )
self.assertEqual(
__lowerCAmelCase , [
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
] , )
UpperCamelCase__ = fill_masker([f"""This is a {tokenizer.mask_token}""", f"""Another {tokenizer.mask_token} great test."""] )
self.assertEqual(
__lowerCAmelCase , [
[
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
],
[
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
],
] , )
with self.assertRaises(__lowerCAmelCase ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(__lowerCAmelCase ):
fill_masker("""This is""" )
self.run_test_top_k(__lowerCAmelCase , __lowerCAmelCase )
self.run_test_targets(__lowerCAmelCase , __lowerCAmelCase )
self.run_test_top_k_targets(__lowerCAmelCase , __lowerCAmelCase )
self.fill_mask_with_duplicate_targets_and_top_k(__lowerCAmelCase , __lowerCAmelCase )
self.fill_mask_with_multiple_masks(__lowerCAmelCase , __lowerCAmelCase )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ = tokenizer.get_vocab()
UpperCamelCase__ = sorted(vocab.keys() )[:2]
# Pipeline argument
UpperCamelCase__ = FillMaskPipeline(model=__lowerCAmelCase , tokenizer=__lowerCAmelCase , targets=__lowerCAmelCase )
UpperCamelCase__ = fill_masker(f"""This is a {tokenizer.mask_token}""" )
self.assertEqual(
__lowerCAmelCase , [
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
] , )
UpperCamelCase__ = {vocab[el] for el in targets}
self.assertEqual({el["""token"""] for el in outputs} , __lowerCAmelCase )
UpperCamelCase__ = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["""token_str"""] for el in outputs} , set(__lowerCAmelCase ) )
# Call argument
UpperCamelCase__ = FillMaskPipeline(model=__lowerCAmelCase , tokenizer=__lowerCAmelCase )
UpperCamelCase__ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=__lowerCAmelCase )
self.assertEqual(
__lowerCAmelCase , [
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
] , )
UpperCamelCase__ = {vocab[el] for el in targets}
self.assertEqual({el["""token"""] for el in outputs} , __lowerCAmelCase )
UpperCamelCase__ = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["""token_str"""] for el in outputs} , set(__lowerCAmelCase ) )
# Score equivalence
UpperCamelCase__ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=__lowerCAmelCase )
UpperCamelCase__ = [top_mask["""token_str"""] for top_mask in outputs]
UpperCamelCase__ = [top_mask["""score"""] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(__lowerCAmelCase ) == set(__lowerCAmelCase ):
UpperCamelCase__ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=__lowerCAmelCase )
UpperCamelCase__ = [top_mask["""score"""] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(__lowerCAmelCase ) , nested_simplify(__lowerCAmelCase ) )
# Raises with invalid
with self.assertRaises(__lowerCAmelCase ):
UpperCamelCase__ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(__lowerCAmelCase ):
UpperCamelCase__ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=[""""""] )
with self.assertRaises(__lowerCAmelCase ):
UpperCamelCase__ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets="""""" )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ = FillMaskPipeline(model=__lowerCAmelCase , tokenizer=__lowerCAmelCase , top_k=2 )
UpperCamelCase__ = fill_masker(f"""This is a {tokenizer.mask_token}""" )
self.assertEqual(
__lowerCAmelCase , [
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
] , )
UpperCamelCase__ = FillMaskPipeline(model=__lowerCAmelCase , tokenizer=__lowerCAmelCase )
UpperCamelCase__ = fill_masker(f"""This is a {tokenizer.mask_token}""" , top_k=2 )
self.assertEqual(
__lowerCAmelCase , [
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
] , )
self.assertEqual(nested_simplify(__lowerCAmelCase ) , nested_simplify(__lowerCAmelCase ) )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ = tokenizer.get_vocab()
UpperCamelCase__ = FillMaskPipeline(model=__lowerCAmelCase , tokenizer=__lowerCAmelCase )
# top_k=2, ntargets=3
UpperCamelCase__ = sorted(vocab.keys() )[:3]
UpperCamelCase__ = fill_masker(f"""This is a {tokenizer.mask_token}""" , top_k=2 , targets=__lowerCAmelCase )
# If we use the most probably targets, and filter differently, we should still
# have the same results
UpperCamelCase__ = [el["""token_str"""] for el in sorted(__lowerCAmelCase , key=lambda __lowerCAmelCase : x["score"] , reverse=__lowerCAmelCase )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(__lowerCAmelCase ).issubset(__lowerCAmelCase ):
UpperCamelCase__ = fill_masker(f"""This is a {tokenizer.mask_token}""" , top_k=3 , targets=__lowerCAmelCase )
# They should yield exactly the same result
self.assertEqual(nested_simplify(__lowerCAmelCase ) , nested_simplify(__lowerCAmelCase ) )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ = FillMaskPipeline(model=__lowerCAmelCase , tokenizer=__lowerCAmelCase )
UpperCamelCase__ = tokenizer.get_vocab()
# String duplicates + id duplicates
UpperCamelCase__ = sorted(vocab.keys() )[:3]
UpperCamelCase__ = [targets[0], targets[1], targets[0], targets[2], targets[1]]
UpperCamelCase__ = fill_masker(f"""My name is {tokenizer.mask_token}""" , targets=__lowerCAmelCase , top_k=10 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(__lowerCAmelCase ) , 3 )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ = FillMaskPipeline(model=__lowerCAmelCase , tokenizer=__lowerCAmelCase )
UpperCamelCase__ = fill_masker(
f"""This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}""" , top_k=2 )
self.assertEqual(
__lowerCAmelCase , [
[
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
],
[
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
],
[
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
],
] , )
| 548 | 1 |
'''simple docstring'''
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
_lowerCAmelCase = "\n@inproceedings{xu-etal-2016-optimizing,\n title = {Optimizing Statistical Machine Translation for Text Simplification},\n authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},\n journal = {Transactions of the Association for Computational Linguistics},\n volume = {4},\n year={2016},\n url = {https://www.aclweb.org/anthology/Q16-1029},\n pages = {401--415\n},\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n"
_lowerCAmelCase = "\\nWIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU\nIt can be used to evaluate the quality of machine-generated texts.\n"
_lowerCAmelCase = "\nCalculates sari score (between 0 and 100) given a list of source and predicted\nsentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.\nArgs:\n sources: list of source sentences where each sentence should be a string.\n predictions: list of predicted sentences where each sentence should be a string.\n references: list of lists of reference sentences where each sentence should be a string.\nReturns:\n sari: sari score\n sacrebleu: sacrebleu score\n exact: exact score\n\nExamples:\n >>> sources=[\"About 95 species are currently accepted .\"]\n >>> predictions=[\"About 95 you now get in .\"]\n >>> references=[[\"About 95 species are currently known .\"]]\n >>> wiki_split = datasets.load_metric(\"wiki_split\")\n >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)\n >>> print(results)\n {'sari': 21.805555555555557, 'sacrebleu': 14.535768424205482, 'exact': 0.0}\n"
def _lowerCAmelCase ( lowercase : List[Any] ) ->Any:
"""simple docstring"""
def remove_articles(lowercase : Tuple ):
lowercase__ = re.compile(R'''\b(a|an|the)\b''' , re.UNICODE )
return re.sub(lowercase , ''' ''' , lowercase )
def white_space_fix(lowercase : int ):
return " ".join(text.split() )
def remove_punc(lowercase : Optional[Any] ):
lowercase__ = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(lowercase : Optional[Any] ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(lowercase ) ) ) )
def _lowerCAmelCase ( lowercase : Tuple , lowercase : Optional[int] ) ->Any:
"""simple docstring"""
return int(normalize_answer(lowercase ) == normalize_answer(lowercase ) )
def _lowerCAmelCase ( lowercase : Union[str, Any] , lowercase : List[str] ) ->Optional[Any]:
"""simple docstring"""
lowercase__ = [any(compute_exact(lowercase , lowercase ) for ref in refs ) for pred, refs in zip(lowercase , lowercase )]
return (sum(lowercase ) / len(lowercase )) * 1_0_0
def _lowerCAmelCase ( lowercase : List[str] , lowercase : Union[str, Any] , lowercase : List[Any] , lowercase : List[str] ) ->Any:
"""simple docstring"""
lowercase__ = [rgram for rgrams in rgramslist for rgram in rgrams]
lowercase__ = Counter(lowercase )
lowercase__ = Counter(lowercase )
lowercase__ = Counter()
for sgram, scount in sgramcounter.items():
lowercase__ = scount * numref
lowercase__ = Counter(lowercase )
lowercase__ = Counter()
for cgram, ccount in cgramcounter.items():
lowercase__ = ccount * numref
# KEEP
lowercase__ = sgramcounter_rep & cgramcounter_rep
lowercase__ = keepgramcounter_rep & rgramcounter
lowercase__ = sgramcounter_rep & rgramcounter
lowercase__ = 0
lowercase__ = 0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
lowercase__ = 1
lowercase__ = 1
if len(lowercase ) > 0:
lowercase__ = keeptmpscorea / len(lowercase )
if len(lowercase ) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
lowercase__ = keeptmpscorea / sum(keepgramcounterall_rep.values() )
lowercase__ = 0
if keepscore_precision > 0 or keepscore_recall > 0:
lowercase__ = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
lowercase__ = sgramcounter_rep - cgramcounter_rep
lowercase__ = delgramcounter_rep - rgramcounter
lowercase__ = sgramcounter_rep - rgramcounter
lowercase__ = 0
lowercase__ = 0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
lowercase__ = 1
if len(lowercase ) > 0:
lowercase__ = deltmpscorea / len(lowercase )
# ADDITION
lowercase__ = set(lowercase ) - set(lowercase )
lowercase__ = set(lowercase ) & set(lowercase )
lowercase__ = set(lowercase ) - set(lowercase )
lowercase__ = 0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
lowercase__ = 1
lowercase__ = 1
if len(lowercase ) > 0:
lowercase__ = addtmpscore / len(lowercase )
if len(lowercase ) > 0:
lowercase__ = addtmpscore / len(lowercase )
lowercase__ = 0
if addscore_precision > 0 or addscore_recall > 0:
lowercase__ = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def _lowerCAmelCase ( lowercase : Optional[Any] , lowercase : Optional[int] , lowercase : Tuple ) ->Union[str, Any]:
"""simple docstring"""
lowercase__ = len(lowercase )
lowercase__ = ssent.split(''' ''' )
lowercase__ = csent.split(''' ''' )
lowercase__ = []
lowercase__ = []
lowercase__ = []
lowercase__ = []
lowercase__ = []
lowercase__ = []
lowercase__ = []
lowercase__ = []
lowercase__ = []
lowercase__ = []
for rsent in rsents:
lowercase__ = rsent.split(''' ''' )
lowercase__ = []
lowercase__ = []
lowercase__ = []
ragramslist.append(lowercase )
for i in range(0 , len(lowercase ) - 1 ):
if i < len(lowercase ) - 1:
lowercase__ = ragrams[i] + ''' ''' + ragrams[i + 1]
ragrams.append(lowercase )
if i < len(lowercase ) - 2:
lowercase__ = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2]
ragrams.append(lowercase )
if i < len(lowercase ) - 3:
lowercase__ = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2] + ''' ''' + ragrams[i + 3]
ragrams.append(lowercase )
ragramslist.append(lowercase )
ragramslist.append(lowercase )
ragramslist.append(lowercase )
for i in range(0 , len(lowercase ) - 1 ):
if i < len(lowercase ) - 1:
lowercase__ = sagrams[i] + ''' ''' + sagrams[i + 1]
sagrams.append(lowercase )
if i < len(lowercase ) - 2:
lowercase__ = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2]
sagrams.append(lowercase )
if i < len(lowercase ) - 3:
lowercase__ = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2] + ''' ''' + sagrams[i + 3]
sagrams.append(lowercase )
for i in range(0 , len(lowercase ) - 1 ):
if i < len(lowercase ) - 1:
lowercase__ = cagrams[i] + ''' ''' + cagrams[i + 1]
cagrams.append(lowercase )
if i < len(lowercase ) - 2:
lowercase__ = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2]
cagrams.append(lowercase )
if i < len(lowercase ) - 3:
lowercase__ = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2] + ''' ''' + cagrams[i + 3]
cagrams.append(lowercase )
((lowercase__) , (lowercase__) , (lowercase__)) = SARIngram(lowercase , lowercase , lowercase , lowercase )
((lowercase__) , (lowercase__) , (lowercase__)) = SARIngram(lowercase , lowercase , lowercase , lowercase )
((lowercase__) , (lowercase__) , (lowercase__)) = SARIngram(lowercase , lowercase , lowercase , lowercase )
((lowercase__) , (lowercase__) , (lowercase__)) = SARIngram(lowercase , lowercase , lowercase , lowercase )
lowercase__ = sum([keepascore, keepascore, keepascore, keepascore] ) / 4
lowercase__ = sum([delascore, delascore, delascore, delascore] ) / 4
lowercase__ = sum([addascore, addascore, addascore, addascore] ) / 4
lowercase__ = (avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def _lowerCAmelCase ( lowercase : Optional[Any] , lowercase : bool = True , lowercase : str = "13a" , lowercase : bool = True ) ->Optional[Any]:
"""simple docstring"""
if lowercase:
lowercase__ = sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__ ).major >= 2:
lowercase__ = sacrebleu.metrics.bleu._get_tokenizer(lowercase )()(lowercase )
else:
lowercase__ = sacrebleu.TOKENIZERS[tokenizer]()(lowercase )
elif tokenizer == "moses":
lowercase__ = sacremoses.MosesTokenizer().tokenize(lowercase , return_str=lowercase , escape=lowercase )
elif tokenizer == "penn":
lowercase__ = sacremoses.MosesTokenizer().penn_tokenize(lowercase , return_str=lowercase )
else:
lowercase__ = sentence
if not return_str:
lowercase__ = normalized_sent.split()
return normalized_sent
def _lowerCAmelCase ( lowercase : Union[str, Any] , lowercase : Optional[int] , lowercase : Dict ) ->Tuple:
"""simple docstring"""
if not (len(lowercase ) == len(lowercase ) == len(lowercase )):
raise ValueError('''Sources length must match predictions and references lengths.''' )
lowercase__ = 0
for src, pred, refs in zip(lowercase , lowercase , lowercase ):
sari_score += SARIsent(normalize(lowercase ) , normalize(lowercase ) , [normalize(lowercase ) for sent in refs] )
lowercase__ = sari_score / len(lowercase )
return 1_0_0 * sari_score
def _lowerCAmelCase ( lowercase : str , lowercase : int , lowercase : Dict="exp" , lowercase : str=None , lowercase : int=False , lowercase : Union[str, Any]=False , lowercase : List[Any]=False , ) ->Union[str, Any]:
"""simple docstring"""
lowercase__ = len(references[0] )
if any(len(lowercase ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
lowercase__ = [[refs[i] for refs in references] for i in range(lowercase )]
lowercase__ = sacrebleu.corpus_bleu(
lowercase , lowercase , smooth_method=lowercase , smooth_value=lowercase , force=lowercase , lowercase=lowercase , use_effective_order=lowercase , )
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
"""simple docstring"""
def snake_case_( self )-> List[str]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=[
'''https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py''',
'''https://github.com/cocoxu/simplification/blob/master/SARI.py''',
'''https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py''',
'''https://github.com/mjpost/sacreBLEU''',
] , reference_urls=[
'''https://www.aclweb.org/anthology/Q16-1029.pdf''',
'''https://github.com/mjpost/sacreBLEU''',
'''https://en.wikipedia.org/wiki/BLEU''',
'''https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213''',
] , )
def snake_case_( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )-> Optional[int]:
lowercase__ = {}
result.update({'''sari''': compute_sari(sources=_lowerCamelCase , predictions=_lowerCamelCase , references=_lowerCamelCase )} )
result.update({'''sacrebleu''': compute_sacrebleu(predictions=_lowerCamelCase , references=_lowerCamelCase )} )
result.update({'''exact''': compute_em(predictions=_lowerCamelCase , references=_lowerCamelCase )} )
return result
| 161 |
'''simple docstring'''
def _lowerCAmelCase ( lowercase : int ) ->List[Any]:
"""simple docstring"""
lowercase__ = []
lowercase__ = []
lowercase__ = {
'''^''': 3,
'''*''': 2,
'''/''': 2,
'''%''': 2,
'''+''': 1,
'''-''': 1,
} # Priority of each operator
lowercase__ = len(lowercase ) if (len(lowercase ) > 7) else 7
# Print table header for output
print(
'''Symbol'''.center(8 ) , '''Stack'''.center(lowercase ) , '''Postfix'''.center(lowercase ) , sep=''' | ''' , )
print('''-''' * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(lowercase ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(lowercase ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(lowercase ) == 0:
stack.append(lowercase ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(lowercase ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(lowercase ) # push x to stack
print(
x.center(8 ) , (''''''.join(lowercase )).ljust(lowercase ) , (''''''.join(lowercase )).ljust(lowercase ) , sep=''' | ''' , ) # Output in tabular format
while len(lowercase ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
''' '''.center(8 ) , (''''''.join(lowercase )).ljust(lowercase ) , (''''''.join(lowercase )).ljust(lowercase ) , sep=''' | ''' , ) # Output in tabular format
return "".join(lowercase ) # return Postfix as str
def _lowerCAmelCase ( lowercase : Tuple ) ->int:
"""simple docstring"""
lowercase__ = list(infix[::-1] ) # reverse the infix equation
for i in range(len(lowercase ) ):
if infix[i] == "(":
lowercase__ = ''')''' # change "(" to ")"
elif infix[i] == ")":
lowercase__ = '''(''' # change ")" to "("
return (infix_2_postfix(''''''.join(lowercase ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
_lowerCAmelCase = input("\nEnter an Infix Equation = ") # Input an Infix equation
_lowerCAmelCase = "".join(Infix.split()) # Remove spaces from the input
print("\n\t", Infix, "(Infix) -> ", infix_2_prefix(Infix), "(Prefix)")
| 161 | 1 |
snake_case__ : Dict = 8.31_4462 # Unit - J mol-1 K-1
def _snake_case (__lowercase , __lowercase , __lowercase):
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError('Invalid inputs. Enter positive value.')
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def _snake_case (__lowercase , __lowercase , __lowercase):
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError('Invalid inputs. Enter positive value.')
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 709 |
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
snake_case__ : str = logging.getLogger(__name__)
@dataclass(frozen=UpperCAmelCase__ )
class _a :
"""simple docstring"""
A_ = 42
A_ = 42
A_ = None
A_ = None
A_ = None
@dataclass(frozen=UpperCAmelCase__ )
class _a :
"""simple docstring"""
A_ = 42
A_ = None
A_ = None
A_ = None
A_ = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
A_ = 42
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase=False , _UpperCAmelCase = False , ) -> Any:
UpperCamelCase_ = hans_processors[task]()
UpperCamelCase_ = os.path.join(
_UpperCAmelCase , 'cached_{}_{}_{}_{}'.format(
'dev' if evaluate else 'train' , tokenizer.__class__.__name__ , str(_UpperCAmelCase ) , _UpperCAmelCase , ) , )
UpperCamelCase_ = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCamelCase_ , UpperCamelCase_ = label_list[2], label_list[1]
UpperCamelCase_ = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
UpperCamelCase_ = cached_features_file + '.lock'
with FileLock(_UpperCAmelCase ):
if os.path.exists(_UpperCAmelCase ) and not overwrite_cache:
logger.info(f"""Loading features from cached file {cached_features_file}""" )
UpperCamelCase_ = torch.load(_UpperCAmelCase )
else:
logger.info(f"""Creating features from dataset file at {data_dir}""" )
UpperCamelCase_ = (
processor.get_dev_examples(_UpperCAmelCase ) if evaluate else processor.get_train_examples(_UpperCAmelCase )
)
logger.info('Training examples: %s' , len(_UpperCAmelCase ) )
UpperCamelCase_ = hans_convert_examples_to_features(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
logger.info('Saving features into cached file %s' , _UpperCAmelCase )
torch.save(self.features , _UpperCAmelCase )
def __len__( self ) -> Dict:
return len(self.features )
def __getitem__( self , _UpperCAmelCase ) -> InputFeatures:
return self.features[i]
def _UpperCAmelCase ( self ) -> Dict:
return self.label_list
if is_tf_available():
import tensorflow as tf
class _a :
"""simple docstring"""
A_ = 42
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 128 , _UpperCAmelCase=False , _UpperCAmelCase = False , ) -> int:
UpperCamelCase_ = hans_processors[task]()
UpperCamelCase_ = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCamelCase_ , UpperCamelCase_ = label_list[2], label_list[1]
UpperCamelCase_ = label_list
UpperCamelCase_ = processor.get_dev_examples(_UpperCAmelCase ) if evaluate else processor.get_train_examples(_UpperCAmelCase )
UpperCamelCase_ = hans_convert_examples_to_features(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='convert examples to features' ):
if ex_index % 10000 == 0:
logger.info('Writing example %d of %d' % (ex_index, len(_UpperCAmelCase )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
UpperCamelCase_ = tf.data.Dataset.from_generator(
_UpperCAmelCase , (
{
'example_id': tf.intaa,
'input_ids': tf.intaa,
'attention_mask': tf.intaa,
'token_type_ids': tf.intaa,
},
tf.intaa,
) , (
{
'example_id': tf.TensorShape([] ),
'input_ids': tf.TensorShape([None, None] ),
'attention_mask': tf.TensorShape([None, None] ),
'token_type_ids': tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def _UpperCAmelCase ( self ) -> List[str]:
return self.dataset
def __len__( self ) -> str:
return len(self.features )
def __getitem__( self , _UpperCAmelCase ) -> InputFeatures:
return self.features[i]
def _UpperCAmelCase ( self ) -> int:
return self.label_list
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> Tuple:
return self._create_examples(self._read_tsv(os.path.join(_UpperCAmelCase , 'heuristics_train_set.txt' ) ) , 'train' )
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> Dict:
return self._create_examples(self._read_tsv(os.path.join(_UpperCAmelCase , 'heuristics_evaluation_set.txt' ) ) , 'dev' )
def _UpperCAmelCase ( self ) -> List[Any]:
return ["contradiction", "entailment", "neutral"]
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> List[str]:
UpperCamelCase_ = []
for i, line in enumerate(_UpperCAmelCase ):
if i == 0:
continue
UpperCamelCase_ = '%s-%s' % (set_type, line[0])
UpperCamelCase_ = line[5]
UpperCamelCase_ = line[6]
UpperCamelCase_ = line[7][2:] if line[7].startswith('ex' ) else line[7]
UpperCamelCase_ = line[0]
examples.append(InputExample(guid=_UpperCAmelCase , text_a=_UpperCAmelCase , text_b=_UpperCAmelCase , label=_UpperCAmelCase , pairID=_UpperCAmelCase ) )
return examples
def _snake_case (__lowercase , __lowercase , __lowercase , __lowercase , ):
UpperCamelCase_ = {label: i for i, label in enumerate(__lowercase)}
UpperCamelCase_ = []
for ex_index, example in tqdm.tqdm(enumerate(__lowercase) , desc='convert examples to features'):
if ex_index % 10000 == 0:
logger.info('Writing example %d' % (ex_index))
UpperCamelCase_ = tokenizer(
example.text_a , example.text_b , add_special_tokens=__lowercase , max_length=__lowercase , padding='max_length' , truncation=__lowercase , return_overflowing_tokens=__lowercase , )
UpperCamelCase_ = label_map[example.label] if example.label in label_map else 0
UpperCamelCase_ = int(example.pairID)
features.append(InputFeatures(**__lowercase , label=__lowercase , pairID=__lowercase))
for i, example in enumerate(examples[:5]):
logger.info('*** Example ***')
logger.info(f"""guid: {example}""")
logger.info(f"""features: {features[i]}""")
return features
snake_case__ : List[str] = {
"""hans""": 3,
}
snake_case__ : Union[str, Any] = {
"""hans""": HansProcessor,
}
| 618 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
_UpperCAmelCase : str = logging.get_logger(__name__)
_UpperCAmelCase : Any = {
'''Helsinki-NLP/opus-mt-en-de''': '''https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json''',
# See all Marian models at https://huggingface.co/models?filter=marian
}
class __magic_name__ ( __SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = 'marian'
UpperCamelCase__ = ['past_key_values']
UpperCamelCase__ = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self , snake_case_=5_81_01 , snake_case_=None , snake_case_=10_24 , snake_case_=12 , snake_case_=40_96 , snake_case_=16 , snake_case_=12 , snake_case_=40_96 , snake_case_=16 , snake_case_=0.0 , snake_case_=0.0 , snake_case_=True , snake_case_=True , snake_case_="gelu" , snake_case_=10_24 , snake_case_=0.1 , snake_case_=0.0 , snake_case_=0.0 , snake_case_=0.02 , snake_case_=5_81_00 , snake_case_=False , snake_case_=5_81_00 , snake_case_=0 , snake_case_=0 , snake_case_=True , **snake_case_ , ):
lowercase =vocab_size
lowercase =decoder_vocab_size or vocab_size
lowercase =max_position_embeddings
lowercase =d_model
lowercase =encoder_ffn_dim
lowercase =encoder_layers
lowercase =encoder_attention_heads
lowercase =decoder_ffn_dim
lowercase =decoder_layers
lowercase =decoder_attention_heads
lowercase =dropout
lowercase =attention_dropout
lowercase =activation_dropout
lowercase =activation_function
lowercase =init_std
lowercase =encoder_layerdrop
lowercase =decoder_layerdrop
lowercase =use_cache
lowercase =encoder_layers
lowercase =scale_embedding # scale factor will be sqrt(d_model) if True
lowercase =share_encoder_decoder_embeddings
super().__init__(
pad_token_id=snake_case_ , eos_token_id=snake_case_ , is_encoder_decoder=snake_case_ , decoder_start_token_id=snake_case_ , forced_eos_token_id=snake_case_ , **snake_case_ , )
class __magic_name__ ( __SCREAMING_SNAKE_CASE ):
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def _A( self ):
if self.task in ["default", "seq2seq-lm"]:
lowercase =OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
lowercase ={0: '''batch'''}
lowercase ={0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
lowercase ={0: '''batch''', 1: '''decoder_sequence'''}
lowercase ={0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(snake_case_ , direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
lowercase =OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
lowercase , lowercase =self.num_layers
for i in range(snake_case_ ):
lowercase ={0: '''batch''', 2: '''past_sequence + sequence'''}
lowercase ={0: '''batch''', 2: '''past_sequence + sequence'''}
else:
lowercase =OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def _A( self ):
if self.task in ["default", "seq2seq-lm"]:
lowercase =super().outputs
else:
lowercase =super(snake_case_ , self ).outputs
if self.use_past:
lowercase , lowercase =self.num_layers
for i in range(snake_case_ ):
lowercase ={0: '''batch''', 2: '''past_sequence + sequence'''}
lowercase ={0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def _A( self , snake_case_ , snake_case_ = -1 , snake_case_ = -1 , snake_case_ = False , snake_case_ = None , ):
lowercase =self._generate_dummy_inputs_for_encoder_and_decoder(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# Generate decoder inputs
lowercase =seq_length if not self.use_past else 1
lowercase =self._generate_dummy_inputs_for_encoder_and_decoder(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
lowercase ={f'decoder_{name}': tensor for name, tensor in decoder_inputs.items()}
lowercase =dict(**snake_case_ , **snake_case_ )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
lowercase , lowercase =common_inputs['''input_ids'''].shape
lowercase =common_inputs['''decoder_input_ids'''].shape[1]
lowercase , lowercase =self.num_attention_heads
lowercase =(
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
lowercase =decoder_seq_length + 3
lowercase =(
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
lowercase =torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(snake_case_ , snake_case_ )] , dim=1 )
lowercase =[]
# If the number of encoder and decoder layers are present in the model configuration, both are considered
lowercase , lowercase =self.num_layers
lowercase =min(snake_case_ , snake_case_ )
lowercase =max(snake_case_ , snake_case_ ) - min_num_layers
lowercase ='''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(snake_case_ ):
common_inputs["past_key_values"].append(
(
torch.zeros(snake_case_ ),
torch.zeros(snake_case_ ),
torch.zeros(snake_case_ ),
torch.zeros(snake_case_ ),
) )
# TODO: test this.
lowercase =encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(snake_case_ , snake_case_ ):
common_inputs["past_key_values"].append((torch.zeros(snake_case_ ), torch.zeros(snake_case_ )) )
return common_inputs
def _A( self , snake_case_ , snake_case_ = -1 , snake_case_ = -1 , snake_case_ = False , snake_case_ = None , ):
lowercase =self._generate_dummy_inputs_for_encoder_and_decoder(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
lowercase , lowercase =common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
lowercase =seqlen + 2
lowercase , lowercase =self.num_layers
lowercase , lowercase =self.num_attention_heads
lowercase =(
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
lowercase =common_inputs['''attention_mask'''].dtype
lowercase =torch.cat(
[common_inputs['''attention_mask'''], torch.ones(snake_case_ , snake_case_ , dtype=snake_case_ )] , dim=1 )
lowercase =[
(torch.zeros(snake_case_ ), torch.zeros(snake_case_ )) for _ in range(snake_case_ )
]
return common_inputs
def _A( self , snake_case_ , snake_case_ = -1 , snake_case_ = -1 , snake_case_ = False , snake_case_ = None , ):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowercase =compute_effective_axis_dimension(
snake_case_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowercase =tokenizer.num_special_tokens_to_add(snake_case_ )
lowercase =compute_effective_axis_dimension(
snake_case_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=snake_case_ )
# Generate dummy inputs according to compute batch and sequence
lowercase =[''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
lowercase =dict(tokenizer(snake_case_ , return_tensors=snake_case_ ) )
return common_inputs
def _A( self , snake_case_ , snake_case_ = -1 , snake_case_ = -1 , snake_case_ = False , snake_case_ = None , ):
if self.task in ["default", "seq2seq-lm"]:
lowercase =self._generate_dummy_inputs_for_default_and_seqaseq_lm(
snake_case_ , batch_size=snake_case_ , seq_length=snake_case_ , is_pair=snake_case_ , framework=snake_case_ )
else:
lowercase =self._generate_dummy_inputs_for_causal_lm(
snake_case_ , batch_size=snake_case_ , seq_length=snake_case_ , is_pair=snake_case_ , framework=snake_case_ )
return common_inputs
def _A( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
if self.task in ["default", "seq2seq-lm"]:
lowercase =super()._flatten_past_key_values_(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
else:
lowercase =super(snake_case_ , self )._flatten_past_key_values_(
snake_case_ , snake_case_ , snake_case_ , snake_case_ )
@property
def _A( self ):
return 1E-4
| 72 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
SCREAMING_SNAKE_CASE_ = {
'''configuration_rag''': ['''RagConfig'''],
'''retrieval_rag''': ['''RagRetriever'''],
'''tokenization_rag''': ['''RagTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
'''RagModel''',
'''RagPreTrainedModel''',
'''RagSequenceForGeneration''',
'''RagTokenForGeneration''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
'''TFRagModel''',
'''TFRagPreTrainedModel''',
'''TFRagSequenceForGeneration''',
'''TFRagTokenForGeneration''',
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 465 | 0 |
import unittest
from knapsack import greedy_knapsack as kp
class lowercase_ ( unittest.TestCase ):
def UpperCamelCase ( self ):
_snake_case : Tuple = [10, 20, 30, 40, 50, 60]
_snake_case : Union[str, Any] = [2, 4, 6, 8, 10, 12]
_snake_case : Union[str, Any] = 100
self.assertEqual(kp.calc_profit(__a , __a , __a ) , 210 )
def UpperCamelCase ( self ):
self.assertRaisesRegex(__a , "max_weight must greater than zero." )
def UpperCamelCase ( self ):
self.assertRaisesRegex(__a , "Weight can not be negative." )
def UpperCamelCase ( self ):
self.assertRaisesRegex(__a , "Profit can not be negative." )
def UpperCamelCase ( self ):
self.assertRaisesRegex(__a , "max_weight must greater than zero." )
def UpperCamelCase ( self ):
self.assertRaisesRegex(
__a , "The length of profit and weight must be same." )
if __name__ == "__main__":
unittest.main() | 720 | def snake_case (__lowercase , __lowercase , __lowercase ) -> list:
'''simple docstring'''
_snake_case : int = len(__lowercase )
_snake_case : int = [[0] * n for i in range(__lowercase )]
for i in range(__lowercase ):
_snake_case : str = y_points[i]
for i in range(2 , __lowercase ):
for j in range(__lowercase , __lowercase ):
_snake_case : Union[str, Any] = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod() | 580 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.