code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : int = {
'kssteven/ibert-roberta-base': 'https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json',
'kssteven/ibert-roberta-large': 'https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json',
'kssteven/ibert-roberta-large-mnli': (
'https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json'
),
}
class __A (snake_case__):
'''simple docstring'''
__lowercase: List[Any] = """ibert"""
def __init__( self : List[Any] , UpperCAmelCase_ : int=30_522 , UpperCAmelCase_ : Any=768 , UpperCAmelCase_ : Optional[Any]=12 , UpperCAmelCase_ : Optional[Any]=12 , UpperCAmelCase_ : int=3_072 , UpperCAmelCase_ : Any="gelu" , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : int=512 , UpperCAmelCase_ : Dict=2 , UpperCAmelCase_ : Optional[int]=0.02 , UpperCAmelCase_ : Tuple=1E-12 , UpperCAmelCase_ : Tuple=1 , UpperCAmelCase_ : Union[str, Any]=0 , UpperCAmelCase_ : int=2 , UpperCAmelCase_ : Tuple="absolute" , UpperCAmelCase_ : int=False , UpperCAmelCase_ : Any="none" , **UpperCAmelCase_ : Any , ) ->Optional[Any]:
"""simple docstring"""
super().__init__(pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , **UpperCAmelCase_ )
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = hidden_act
snake_case_ = intermediate_size
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = initializer_range
snake_case_ = layer_norm_eps
snake_case_ = position_embedding_type
snake_case_ = quant_mode
snake_case_ = force_dequant
class __A (snake_case__):
'''simple docstring'''
@property
def lowerCAmelCase ( self : str ) ->Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
snake_case_ = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
snake_case_ = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 347 |
"""simple docstring"""
def _a ( _SCREAMING_SNAKE_CASE = 1_000_000 ) -> int:
snake_case_ = [i - 1 for i in range(limit + 1 )]
for i in range(2 , limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i , limit + 1 , _SCREAMING_SNAKE_CASE ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 347 | 1 |
"""simple docstring"""
import datasets
__SCREAMING_SNAKE_CASE : Tuple = '\\n@InProceedings{conneau2018xnli,\n author = "Conneau, Alexis\n and Rinott, Ruty\n and Lample, Guillaume\n and Williams, Adina\n and Bowman, Samuel R.\n and Schwenk, Holger\n and Stoyanov, Veselin",\n title = "XNLI: Evaluating Cross-lingual Sentence Representations",\n booktitle = "Proceedings of the 2018 Conference on Empirical Methods\n in Natural Language Processing",\n year = "2018",\n publisher = "Association for Computational Linguistics",\n location = "Brussels, Belgium",\n}\n'
__SCREAMING_SNAKE_CASE : Dict = '\\nXNLI is a subset of a few thousand examples from MNLI which has been translated\ninto a 14 different languages (some low-ish resource). As with MNLI, the goal is\nto predict textual entailment (does sentence A imply/contradict/neither sentence\nB) and is a classification task (given two sentences, predict one of three\nlabels).\n'
__SCREAMING_SNAKE_CASE : List[str] = '\nComputes XNLI score which is just simple accuracy.\nArgs:\n predictions: Predicted labels.\n references: Ground truth labels.\nReturns:\n \'accuracy\': accuracy\nExamples:\n\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> xnli_metric = datasets.load_metric("xnli")\n >>> results = xnli_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n'
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]:
return (preds == labels).mean()
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class __A (datasets.Metric):
'''simple docstring'''
def lowerCAmelCase ( self : str ) ->Any:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""int64""" if self.config_name != """sts-b""" else """float32""" ),
"""references""": datasets.Value("""int64""" if self.config_name != """sts-b""" else """float32""" ),
} ) , codebase_urls=[] , reference_urls=[] , format="""numpy""" , )
def lowerCAmelCase ( self : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any ) ->int:
"""simple docstring"""
return {"accuracy": simple_accuracy(UpperCAmelCase_ , UpperCAmelCase_ )}
| 347 |
"""simple docstring"""
from __future__ import annotations
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]:
print(f"""Vertex\tShortest Distance from vertex {src}""" )
for i, d in enumerate(_SCREAMING_SNAKE_CASE ):
print(f"""{i}\t\t{d}""" )
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
for j in range(_SCREAMING_SNAKE_CASE ):
snake_case_ , snake_case_ , snake_case_ = (graph[j][k] for k in ["""src""", """dst""", """weight"""])
if distance[u] != float("""inf""" ) and distance[u] + w < distance[v]:
return True
return False
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> list[float]:
snake_case_ = [float("""inf""" )] * vertex_count
snake_case_ = 0.0
for _ in range(vertex_count - 1 ):
for j in range(_SCREAMING_SNAKE_CASE ):
snake_case_ , snake_case_ , snake_case_ = (graph[j][k] for k in ["""src""", """dst""", """weight"""])
if distance[u] != float("""inf""" ) and distance[u] + w < distance[v]:
snake_case_ = distance[u] + w
snake_case_ = check_negative_cycle(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if negative_cycle_exists:
raise Exception("""Negative cycle found""" )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
__SCREAMING_SNAKE_CASE : int = int(input('Enter number of vertices: ').strip())
__SCREAMING_SNAKE_CASE : Dict = int(input('Enter number of edges: ').strip())
__SCREAMING_SNAKE_CASE : list[dict[str, int]] = [{} for _ in range(E)]
for i in range(E):
print('Edge ', i + 1)
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = (
int(x)
for x in input('Enter source, destination, weight: ').strip().split(' ')
)
__SCREAMING_SNAKE_CASE : Union[str, Any] = {'src': src, 'dst': dest, 'weight': weight}
__SCREAMING_SNAKE_CASE : Union[str, Any] = int(input('\nEnter shortest path source:').strip())
__SCREAMING_SNAKE_CASE : str = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 347 | 1 |
"""simple docstring"""
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class __A (unittest.TestCase):
'''simple docstring'''
def lowerCAmelCase ( self : Optional[Any] ) ->Dict:
"""simple docstring"""
super().tearDown()
gc.collect()
def lowerCAmelCase ( self : str ) ->Any:
"""simple docstring"""
snake_case_ , snake_case_ = FlaxStableDiffusionPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2""" , revision="""bf16""" , dtype=jnp.bfloataa , )
snake_case_ = """A painting of a squirrel eating a burger"""
snake_case_ = jax.device_count()
snake_case_ = num_samples * [prompt]
snake_case_ = sd_pipe.prepare_inputs(UpperCAmelCase_ )
snake_case_ = replicate(UpperCAmelCase_ )
snake_case_ = shard(UpperCAmelCase_ )
snake_case_ = jax.random.PRNGKey(0 )
snake_case_ = jax.random.split(UpperCAmelCase_ , jax.device_count() )
snake_case_ = sd_pipe(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , num_inference_steps=25 , jit=UpperCAmelCase_ )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
snake_case_ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
snake_case_ = images[0, 253:256, 253:256, -1]
snake_case_ = jnp.asarray(jax.device_get(image_slice.flatten() ) )
snake_case_ = jnp.array([0.4_238, 0.4_414, 0.4_395, 0.4_453, 0.4_629, 0.4_590, 0.4_531, 0.45_508, 0.4_512] )
print(F"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def lowerCAmelCase ( self : Tuple ) ->Tuple:
"""simple docstring"""
snake_case_ = """stabilityai/stable-diffusion-2"""
snake_case_ , snake_case_ = FlaxDPMSolverMultistepScheduler.from_pretrained(UpperCAmelCase_ , subfolder="""scheduler""" )
snake_case_ , snake_case_ = FlaxStableDiffusionPipeline.from_pretrained(
UpperCAmelCase_ , scheduler=UpperCAmelCase_ , revision="""bf16""" , dtype=jnp.bfloataa , )
snake_case_ = scheduler_params
snake_case_ = """A painting of a squirrel eating a burger"""
snake_case_ = jax.device_count()
snake_case_ = num_samples * [prompt]
snake_case_ = sd_pipe.prepare_inputs(UpperCAmelCase_ )
snake_case_ = replicate(UpperCAmelCase_ )
snake_case_ = shard(UpperCAmelCase_ )
snake_case_ = jax.random.PRNGKey(0 )
snake_case_ = jax.random.split(UpperCAmelCase_ , jax.device_count() )
snake_case_ = sd_pipe(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , num_inference_steps=25 , jit=UpperCAmelCase_ )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
snake_case_ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
snake_case_ = images[0, 253:256, 253:256, -1]
snake_case_ = jnp.asarray(jax.device_get(image_slice.flatten() ) )
snake_case_ = jnp.array([0.4_336, 0.42_969, 0.4_453, 0.4_199, 0.4_297, 0.4_531, 0.4_434, 0.4_434, 0.4_297] )
print(F"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 347 |
"""simple docstring"""
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
__SCREAMING_SNAKE_CASE : List[str] = logging.getLogger(__name__)
__SCREAMING_SNAKE_CASE : str = tf.data.AUTOTUNE
def _a ( ) -> List[str]:
snake_case_ = argparse.ArgumentParser(description="""Train a masked language model on TPU.""" )
parser.add_argument(
"""--pretrained_model_config""" , type=_SCREAMING_SNAKE_CASE , default="""roberta-base""" , help="""The model config to use. Note that we don't copy the model's weights, only the config!""" , )
parser.add_argument(
"""--tokenizer""" , type=_SCREAMING_SNAKE_CASE , default="""unigram-tokenizer-wikitext""" , help="""The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model's vocab size.""" , )
parser.add_argument(
"""--per_replica_batch_size""" , type=_SCREAMING_SNAKE_CASE , default=8 , help="""Batch size per TPU core.""" , )
parser.add_argument(
"""--no_tpu""" , action="""store_true""" , help="""If set, run on CPU and don't try to initialize a TPU. Useful for debugging on non-TPU instances.""" , )
parser.add_argument(
"""--tpu_name""" , type=_SCREAMING_SNAKE_CASE , help="""Name of TPU resource to initialize. Should be blank on Colab, and 'local' on TPU VMs.""" , default="""local""" , )
parser.add_argument(
"""--tpu_zone""" , type=_SCREAMING_SNAKE_CASE , help="""Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.""" , )
parser.add_argument(
"""--gcp_project""" , type=_SCREAMING_SNAKE_CASE , help="""Google cloud project name. Only used for non-Colab TPU nodes.""" )
parser.add_argument(
"""--bfloat16""" , action="""store_true""" , help="""Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.""" , )
parser.add_argument(
"""--train_dataset""" , type=_SCREAMING_SNAKE_CASE , help="""Path to training dataset to load. If the path begins with `gs://`"""
""" then the dataset will be loaded from a Google Cloud Storage bucket.""" , )
parser.add_argument(
"""--shuffle_buffer_size""" , type=_SCREAMING_SNAKE_CASE , default=2**18 , help="""Size of the shuffle buffer (in samples)""" , )
parser.add_argument(
"""--eval_dataset""" , type=_SCREAMING_SNAKE_CASE , help="""Path to evaluation dataset to load. If the path begins with `gs://`"""
""" then the dataset will be loaded from a Google Cloud Storage bucket.""" , )
parser.add_argument(
"""--num_epochs""" , type=_SCREAMING_SNAKE_CASE , default=1 , help="""Number of epochs to train for.""" , )
parser.add_argument(
"""--learning_rate""" , type=_SCREAMING_SNAKE_CASE , default=1E-4 , help="""Learning rate to use for training.""" , )
parser.add_argument(
"""--weight_decay_rate""" , type=_SCREAMING_SNAKE_CASE , default=1E-3 , help="""Weight decay rate to use for training.""" , )
parser.add_argument(
"""--max_length""" , type=_SCREAMING_SNAKE_CASE , default=512 , help="""Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py""" , )
parser.add_argument(
"""--mlm_probability""" , type=_SCREAMING_SNAKE_CASE , default=0.15 , help="""Fraction of tokens to mask during training.""" , )
parser.add_argument("""--output_dir""" , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , help="""Path to save model checkpoints to.""" )
parser.add_argument("""--hub_model_id""" , type=_SCREAMING_SNAKE_CASE , help="""Model ID to upload to on the Hugging Face Hub.""" )
snake_case_ = parser.parse_args()
return args
def _a ( _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
try:
if args.tpu_name:
snake_case_ = tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name , zone=args.tpu_zone , project=args.gcp_project )
else:
snake_case_ = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
"""Couldn't connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or """
"""--gcp_project. When running on a TPU VM, use --tpu_name local.""" )
tf.config.experimental_connect_to_cluster(_SCREAMING_SNAKE_CASE )
tf.tpu.experimental.initialize_tpu_system(_SCREAMING_SNAKE_CASE )
return tpu
def _a ( _SCREAMING_SNAKE_CASE ) -> List[str]:
snake_case_ = 0
for file in file_list:
snake_case_ = file.split("""/""" )[-1]
snake_case_ = re.search(r"""-\d+-(\d+)\.tfrecord""" , _SCREAMING_SNAKE_CASE ).group(1 )
snake_case_ = int(_SCREAMING_SNAKE_CASE )
num_samples += sample_count
return num_samples
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> Union[str, Any]:
snake_case_ = count_samples(_SCREAMING_SNAKE_CASE )
snake_case_ = tf.data.Dataset.from_tensor_slices(_SCREAMING_SNAKE_CASE )
if shuffle:
snake_case_ = dataset.shuffle(len(_SCREAMING_SNAKE_CASE ) )
snake_case_ = tf.data.TFRecordDataset(_SCREAMING_SNAKE_CASE , num_parallel_reads=_SCREAMING_SNAKE_CASE )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
snake_case_ = dataset.apply(tf.data.experimental.assert_cardinality(_SCREAMING_SNAKE_CASE ) )
snake_case_ = dataset.map(_SCREAMING_SNAKE_CASE , num_parallel_calls=_SCREAMING_SNAKE_CASE )
if shuffle:
assert shuffle_buffer_size is not None
snake_case_ = dataset.shuffle(args.shuffle_buffer_size )
snake_case_ = dataset.batch(_SCREAMING_SNAKE_CASE , drop_remainder=_SCREAMING_SNAKE_CASE )
snake_case_ = dataset.map(_SCREAMING_SNAKE_CASE , num_parallel_calls=_SCREAMING_SNAKE_CASE )
snake_case_ = dataset.prefetch(_SCREAMING_SNAKE_CASE )
return dataset
def _a ( _SCREAMING_SNAKE_CASE ) -> List[Any]:
if not args.no_tpu:
snake_case_ = initialize_tpu(_SCREAMING_SNAKE_CASE )
snake_case_ = tf.distribute.TPUStrategy(_SCREAMING_SNAKE_CASE )
else:
snake_case_ = tf.distribute.OneDeviceStrategy(device="""/gpu:0""" )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy("""mixed_bfloat16""" )
snake_case_ = AutoTokenizer.from_pretrained(args.tokenizer )
snake_case_ = AutoConfig.from_pretrained(args.pretrained_model_config )
snake_case_ = tokenizer.vocab_size
snake_case_ = tf.io.gfile.glob(os.path.join(args.train_dataset , """*.tfrecord""" ) )
if not training_records:
raise ValueError(f"""No .tfrecord files found in {args.train_dataset}.""" )
snake_case_ = tf.io.gfile.glob(os.path.join(args.eval_dataset , """*.tfrecord""" ) )
if not eval_records:
raise ValueError(f"""No .tfrecord files found in {args.eval_dataset}.""" )
snake_case_ = count_samples(_SCREAMING_SNAKE_CASE )
snake_case_ = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
snake_case_ = steps_per_epoch * args.num_epochs
with strategy.scope():
snake_case_ = TFAutoModelForMaskedLM.from_config(_SCREAMING_SNAKE_CASE )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
snake_case_ , snake_case_ = create_optimizer(
num_train_steps=_SCREAMING_SNAKE_CASE , num_warmup_steps=total_train_steps // 20 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=_SCREAMING_SNAKE_CASE , metrics=["""accuracy"""] )
def decode_fn(_SCREAMING_SNAKE_CASE ):
snake_case_ = {
"""input_ids""": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
"""attention_mask""": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
}
return tf.io.parse_single_example(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
snake_case_ = DataCollatorForLanguageModeling(
tokenizer=_SCREAMING_SNAKE_CASE , mlm_probability=args.mlm_probability , mlm=_SCREAMING_SNAKE_CASE , return_tensors="""tf""" )
def mask_with_collator(_SCREAMING_SNAKE_CASE ):
# TF really needs an isin() function
snake_case_ = (
~tf.cast(batch["""attention_mask"""] , tf.bool )
| (batch["""input_ids"""] == tokenizer.cls_token_id)
| (batch["""input_ids"""] == tokenizer.sep_token_id)
)
snake_case_ , snake_case_ = data_collator.tf_mask_tokens(
batch["""input_ids"""] , vocab_size=len(_SCREAMING_SNAKE_CASE ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=_SCREAMING_SNAKE_CASE , )
return batch
snake_case_ = args.per_replica_batch_size * strategy.num_replicas_in_sync
snake_case_ = prepare_dataset(
_SCREAMING_SNAKE_CASE , decode_fn=_SCREAMING_SNAKE_CASE , mask_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , shuffle=_SCREAMING_SNAKE_CASE , shuffle_buffer_size=args.shuffle_buffer_size , )
snake_case_ = prepare_dataset(
_SCREAMING_SNAKE_CASE , decode_fn=_SCREAMING_SNAKE_CASE , mask_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , shuffle=_SCREAMING_SNAKE_CASE , )
snake_case_ = []
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=_SCREAMING_SNAKE_CASE ) )
model.fit(
_SCREAMING_SNAKE_CASE , validation_data=_SCREAMING_SNAKE_CASE , epochs=args.num_epochs , callbacks=_SCREAMING_SNAKE_CASE , )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Union[str, Any] = parse_args()
main(args)
| 347 | 1 |
"""simple docstring"""
def _a ( _SCREAMING_SNAKE_CASE = 1_000_000 ) -> int:
snake_case_ = [i - 1 for i in range(limit + 1 )]
for i in range(2 , limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i , limit + 1 , _SCREAMING_SNAKE_CASE ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 347 |
"""simple docstring"""
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
if density <= 0:
raise ValueError("""Impossible fluid density""" )
if bulk_modulus <= 0:
raise ValueError("""Impossible bulk modulus""" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 347 | 1 |
"""simple docstring"""
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__SCREAMING_SNAKE_CASE : List[str] = logging.getLogger(__name__)
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
return (preds == labels).mean()
@dataclass
class __A :
'''simple docstring'''
__lowercase: str = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""})
__lowercase: Optional[str] = field(
default=snake_case__ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""})
__lowercase: Optional[str] = field(
default=snake_case__ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""})
__lowercase: Optional[str] = field(
default=snake_case__ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class __A :
'''simple docstring'''
__lowercase: str = field(metadata={"""help""": """The name of the task to train on: """ + """, """.join(processors.keys())})
__lowercase: str = field(metadata={"""help""": """Should contain the data files for the task."""})
__lowercase: int = field(
default=1_28 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
__lowercase: bool = field(
default=snake_case__ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""})
def _a ( ) -> Optional[int]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
snake_case_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
snake_case_ , snake_case_ , snake_case_ = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
""" --overwrite_output_dir to overcome.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" , _SCREAMING_SNAKE_CASE )
# Set seed
set_seed(training_args.seed )
try:
snake_case_ = processors[data_args.task_name]()
snake_case_ = processor.get_labels()
snake_case_ = len(_SCREAMING_SNAKE_CASE )
except KeyError:
raise ValueError("""Task not found: %s""" % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
snake_case_ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_SCREAMING_SNAKE_CASE , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
snake_case_ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
snake_case_ = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=_SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , )
# Get datasets
snake_case_ = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=_SCREAMING_SNAKE_CASE , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
snake_case_ = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=_SCREAMING_SNAKE_CASE , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(_SCREAMING_SNAKE_CASE ) -> Dict:
snake_case_ = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(_SCREAMING_SNAKE_CASE , p.label_ids )}
# Data collator
snake_case_ = DataCollatorWithPadding(_SCREAMING_SNAKE_CASE , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
snake_case_ = Trainer(
model=_SCREAMING_SNAKE_CASE , args=_SCREAMING_SNAKE_CASE , train_dataset=_SCREAMING_SNAKE_CASE , eval_dataset=_SCREAMING_SNAKE_CASE , compute_metrics=_SCREAMING_SNAKE_CASE , data_collator=_SCREAMING_SNAKE_CASE , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
snake_case_ = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
snake_case_ = trainer.evaluate()
snake_case_ = os.path.join(training_args.output_dir , """eval_results.txt""" )
if trainer.is_world_master():
with open(_SCREAMING_SNAKE_CASE , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in result.items():
logger.info(""" %s = %s""" , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
writer.write("""%s = %s\n""" % (key, value) )
results.update(_SCREAMING_SNAKE_CASE )
return results
def _a ( _SCREAMING_SNAKE_CASE ) -> List[str]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 347 |
"""simple docstring"""
def _a ( _SCREAMING_SNAKE_CASE ) -> bool:
if num < 0:
return False
snake_case_ = num
snake_case_ = 0
while num > 0:
snake_case_ = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 347 | 1 |
"""simple docstring"""
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
assert x is not None
assert y is not None
snake_case_ = len(_SCREAMING_SNAKE_CASE )
snake_case_ = len(_SCREAMING_SNAKE_CASE )
# declaring the array for storing the dp values
snake_case_ = [[0] * (n + 1) for _ in range(m + 1 )] # noqa: E741
for i in range(1 , m + 1 ):
for j in range(1 , n + 1 ):
snake_case_ = 1 if x[i - 1] == y[j - 1] else 0
snake_case_ = max(l[i - 1][j] , l[i][j - 1] , l[i - 1][j - 1] + match )
snake_case_ = """"""
snake_case_ , snake_case_ = m, n
while i > 0 and j > 0:
snake_case_ = 1 if x[i - 1] == y[j - 1] else 0
if l[i][j] == l[i - 1][j - 1] + match:
if match == 1:
snake_case_ = x[i - 1] + seq
i -= 1
j -= 1
elif l[i][j] == l[i - 1][j]:
i -= 1
else:
j -= 1
return l[m][n], seq
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : int = 'AGGTAB'
__SCREAMING_SNAKE_CASE : Union[str, Any] = 'GXTXAYB'
__SCREAMING_SNAKE_CASE : Optional[Any] = 4
__SCREAMING_SNAKE_CASE : Tuple = 'GTAB'
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[Any] = longest_common_subsequence(a, b)
print('len =', ln, ', sub-sequence =', subseq)
import doctest
doctest.testmod()
| 347 |
"""simple docstring"""
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
__SCREAMING_SNAKE_CASE : Tuple = get_tests_dir('fixtures/test_sentencepiece_bpe_char.model')
@require_sentencepiece
@require_tokenizers
class __A (snake_case__ , unittest.TestCase):
'''simple docstring'''
__lowercase: Tuple = SpeechTaTokenizer
__lowercase: int = False
__lowercase: List[str] = True
def lowerCAmelCase ( self : Any ) ->str:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
snake_case_ = SpeechTaTokenizer(UpperCAmelCase_ )
snake_case_ = AddedToken("""<mask>""" , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ )
snake_case_ = mask_token
tokenizer.add_special_tokens({"""mask_token""": mask_token} )
tokenizer.add_tokens(["""<ctc_blank>"""] )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase ( self : Optional[Any] , UpperCAmelCase_ : Optional[Any] ) ->Dict:
"""simple docstring"""
snake_case_ = """this is a test"""
snake_case_ = """this is a test"""
return input_text, output_text
def lowerCAmelCase ( self : str , UpperCAmelCase_ : int , UpperCAmelCase_ : Any=False , UpperCAmelCase_ : Tuple=20 , UpperCAmelCase_ : Dict=5 ) ->List[Any]:
"""simple docstring"""
snake_case_ , snake_case_ = self.get_input_output_texts(UpperCAmelCase_ )
snake_case_ = tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ )
snake_case_ = tokenizer.decode(UpperCAmelCase_ , clean_up_tokenization_spaces=UpperCAmelCase_ )
return text, ids
def lowerCAmelCase ( self : Union[str, Any] ) ->Optional[Any]:
"""simple docstring"""
snake_case_ = """<pad>"""
snake_case_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase_ ) , UpperCAmelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase_ ) , UpperCAmelCase_ )
def lowerCAmelCase ( self : int ) ->str:
"""simple docstring"""
snake_case_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-4] , """œ""" )
self.assertEqual(vocab_keys[-2] , """<mask>""" )
self.assertEqual(vocab_keys[-1] , """<ctc_blank>""" )
self.assertEqual(len(UpperCAmelCase_ ) , 81 )
def lowerCAmelCase ( self : Optional[int] ) ->int:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 79 )
def lowerCAmelCase ( self : Optional[int] ) ->List[Any]:
"""simple docstring"""
snake_case_ = self.get_tokenizers(do_lower_case=UpperCAmelCase_ )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
snake_case_ = tokenizer.vocab_size
snake_case_ = len(UpperCAmelCase_ )
self.assertNotEqual(UpperCAmelCase_ , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
snake_case_ = ["""aaaaa bbbbbb""", """cccccccccdddddddd"""]
snake_case_ = tokenizer.add_tokens(UpperCAmelCase_ )
snake_case_ = tokenizer.vocab_size
snake_case_ = len(UpperCAmelCase_ )
self.assertNotEqual(UpperCAmelCase_ , 0 )
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ )
self.assertEqual(UpperCAmelCase_ , len(UpperCAmelCase_ ) )
self.assertEqual(UpperCAmelCase_ , all_size + len(UpperCAmelCase_ ) )
snake_case_ = tokenizer.encode("""aaaaa bbbbbb low cccccccccdddddddd l""" , add_special_tokens=UpperCAmelCase_ )
self.assertGreaterEqual(len(UpperCAmelCase_ ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
snake_case_ = {"""eos_token""": """>>>>|||<||<<|<<""", """pad_token""": """<<<<<|||>|>>>>|>"""}
snake_case_ = tokenizer.add_special_tokens(UpperCAmelCase_ )
snake_case_ = tokenizer.vocab_size
snake_case_ = len(UpperCAmelCase_ )
self.assertNotEqual(UpperCAmelCase_ , 0 )
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ )
self.assertEqual(UpperCAmelCase_ , len(UpperCAmelCase_ ) )
self.assertEqual(UpperCAmelCase_ , all_size_a + len(UpperCAmelCase_ ) )
snake_case_ = tokenizer.encode(
""">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l""" , add_special_tokens=UpperCAmelCase_ )
self.assertGreaterEqual(len(UpperCAmelCase_ ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
def lowerCAmelCase ( self : Optional[Any] ) ->Tuple:
"""simple docstring"""
pass
def lowerCAmelCase ( self : List[str] ) ->Optional[Any]:
"""simple docstring"""
pass
def lowerCAmelCase ( self : List[str] ) ->List[str]:
"""simple docstring"""
snake_case_ = self.get_tokenizer()
snake_case_ = tokenizer.tokenize("""This is a test""" )
# fmt: off
self.assertListEqual(UpperCAmelCase_ , [SPIECE_UNDERLINE, """T""", """h""", """i""", """s""", SPIECE_UNDERLINE, """i""", """s""", SPIECE_UNDERLINE, """a""", SPIECE_UNDERLINE, """t""", """e""", """s""", """t"""] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , )
snake_case_ = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
UpperCAmelCase_ , [SPIECE_UNDERLINE, """I""", SPIECE_UNDERLINE, """w""", """a""", """s""", SPIECE_UNDERLINE, """b""", """o""", """r""", """n""", SPIECE_UNDERLINE, """i""", """n""", SPIECE_UNDERLINE, """92000""", """,""", SPIECE_UNDERLINE, """a""", """n""", """d""", SPIECE_UNDERLINE, """t""", """h""", """i""", """s""", SPIECE_UNDERLINE, """i""", """s""", SPIECE_UNDERLINE, """f""", """a""", """l""", """s""", """é""", """."""] )
snake_case_ = tokenizer.convert_tokens_to_ids(UpperCAmelCase_ )
# fmt: off
self.assertListEqual(UpperCAmelCase_ , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] )
# fmt: on
snake_case_ = tokenizer.convert_ids_to_tokens(UpperCAmelCase_ )
self.assertListEqual(
UpperCAmelCase_ , [SPIECE_UNDERLINE, """I""", SPIECE_UNDERLINE, """w""", """a""", """s""", SPIECE_UNDERLINE, """b""", """o""", """r""", """n""", SPIECE_UNDERLINE, """i""", """n""", SPIECE_UNDERLINE, """<unk>""", """,""", SPIECE_UNDERLINE, """a""", """n""", """d""", SPIECE_UNDERLINE, """t""", """h""", """i""", """s""", SPIECE_UNDERLINE, """i""", """s""", SPIECE_UNDERLINE, """f""", """a""", """l""", """s""", """é""", """."""] )
@slow
def lowerCAmelCase ( self : str ) ->Dict:
"""simple docstring"""
snake_case_ = [
"""Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides """
"""general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural """
"""Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained """
"""models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.""",
"""BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly """
"""conditioning on both left and right context in all layers.""",
"""The quick brown fox jumps over the lazy dog.""",
]
# fmt: off
snake_case_ = {
"""input_ids""": [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
"""attention_mask""": [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase_ , model_name="""microsoft/speecht5_asr""" , revision="""c5ef64c71905caeccde0e4462ef3f9077224c524""" , sequences=UpperCAmelCase_ , )
| 347 | 1 |
"""simple docstring"""
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
# Load configuration defined in the metadata file
with open(_SCREAMING_SNAKE_CASE ) as metadata_file:
snake_case_ = json.load(_SCREAMING_SNAKE_CASE )
snake_case_ = LukeConfig(use_entity_aware_attention=_SCREAMING_SNAKE_CASE , **metadata["""model_config"""] )
# Load in the weights from the checkpoint_path
snake_case_ = torch.load(_SCREAMING_SNAKE_CASE , map_location="""cpu""" )
# Load the entity vocab file
snake_case_ = load_entity_vocab(_SCREAMING_SNAKE_CASE )
snake_case_ = RobertaTokenizer.from_pretrained(metadata["""model_config"""]["""bert_model_name"""] )
# Add special tokens to the token vocabulary for downstream tasks
snake_case_ = AddedToken("""<ent>""" , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE )
snake_case_ = AddedToken("""<ent2>""" , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE )
tokenizer.add_special_tokens({"""additional_special_tokens""": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f"""Saving tokenizer to {pytorch_dump_folder_path}""" )
tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
with open(os.path.join(_SCREAMING_SNAKE_CASE , LukeTokenizer.vocab_files_names["""entity_vocab_file"""] ) , """w""" ) as f:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case_ = LukeTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
# Initialize the embeddings of the special tokens
snake_case_ = state_dict["""embeddings.word_embeddings.weight"""]
snake_case_ = word_emb[tokenizer.convert_tokens_to_ids(["""@"""] )[0]].unsqueeze(0 )
snake_case_ = word_emb[tokenizer.convert_tokens_to_ids(["""#"""] )[0]].unsqueeze(0 )
snake_case_ = torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
snake_case_ = f"""encoder.layer.{layer_index}.attention.self."""
snake_case_ = state_dict[prefix + matrix_name]
snake_case_ = state_dict[prefix + matrix_name]
snake_case_ = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
snake_case_ = state_dict["""entity_embeddings.entity_embeddings.weight"""]
snake_case_ = entity_emb[entity_vocab["""[MASK]"""]]
snake_case_ = LukeModel(config=_SCREAMING_SNAKE_CASE ).eval()
snake_case_ , snake_case_ = model.load_state_dict(_SCREAMING_SNAKE_CASE , strict=_SCREAMING_SNAKE_CASE )
if not (len(_SCREAMING_SNAKE_CASE ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(f"""Missing keys {", ".join(_SCREAMING_SNAKE_CASE )}. Expected only missing embeddings.position_ids""" )
if not (all(key.startswith("""entity_predictions""" ) or key.startswith("""lm_head""" ) for key in unexpected_keys )):
raise ValueError(
"""Unexpected keys"""
f""" {", ".join([key for key in unexpected_keys if not (key.startswith("entity_predictions" ) or key.startswith("lm_head" ))] )}""" )
# Check outputs
snake_case_ = LukeTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE , task="""entity_classification""" )
snake_case_ = (
"""Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the"""
""" new world number one avoid a humiliating second- round exit at Wimbledon ."""
)
snake_case_ = (39, 42)
snake_case_ = tokenizer(_SCREAMING_SNAKE_CASE , entity_spans=[span] , add_prefix_space=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
snake_case_ = model(**_SCREAMING_SNAKE_CASE )
# Verify word hidden states
if model_size == "large":
snake_case_ = torch.Size((1, 42, 1_024) )
snake_case_ = torch.tensor(
[[0.0133, 0.0865, 0.0095], [0.3093, -0.2576, -0.7418], [-0.1720, -0.2117, -0.2869]] )
else: # base
snake_case_ = torch.Size((1, 42, 768) )
snake_case_ = torch.tensor([[0.0037, 0.1368, -0.0091], [0.1099, 0.3329, -0.1095], [0.0765, 0.5335, 0.1179]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f"""Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}""" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
snake_case_ = torch.Size((1, 1, 1_024) )
snake_case_ = torch.tensor([[0.0466, -0.0106, -0.0179]] )
else: # base
snake_case_ = torch.Size((1, 1, 768) )
snake_case_ = torch.tensor([[0.1457, 0.1044, 0.0174]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
f"""Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"""
f""" {expected_shape}""" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print("""Saving PyTorch model to {}""".format(_SCREAMING_SNAKE_CASE ) )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
def _a ( _SCREAMING_SNAKE_CASE ) -> List[str]:
snake_case_ = {}
with open(_SCREAMING_SNAKE_CASE , """r""" , encoding="""utf-8""" ) as f:
for index, line in enumerate(_SCREAMING_SNAKE_CASE ):
snake_case_ , snake_case_ = line.rstrip().split("""\t""" )
snake_case_ = index
return entity_vocab
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.')
parser.add_argument(
'--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.'
)
parser.add_argument(
'--entity_vocab_path',
default=None,
type=str,
help='Path to an entity_vocab.tsv file, containing the entity vocabulary.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.'
)
parser.add_argument(
'--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.'
)
__SCREAMING_SNAKE_CASE : int = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 347 |
"""simple docstring"""
import datasets
__SCREAMING_SNAKE_CASE : Tuple = '\\n@InProceedings{conneau2018xnli,\n author = "Conneau, Alexis\n and Rinott, Ruty\n and Lample, Guillaume\n and Williams, Adina\n and Bowman, Samuel R.\n and Schwenk, Holger\n and Stoyanov, Veselin",\n title = "XNLI: Evaluating Cross-lingual Sentence Representations",\n booktitle = "Proceedings of the 2018 Conference on Empirical Methods\n in Natural Language Processing",\n year = "2018",\n publisher = "Association for Computational Linguistics",\n location = "Brussels, Belgium",\n}\n'
__SCREAMING_SNAKE_CASE : Dict = '\\nXNLI is a subset of a few thousand examples from MNLI which has been translated\ninto a 14 different languages (some low-ish resource). As with MNLI, the goal is\nto predict textual entailment (does sentence A imply/contradict/neither sentence\nB) and is a classification task (given two sentences, predict one of three\nlabels).\n'
__SCREAMING_SNAKE_CASE : List[str] = '\nComputes XNLI score which is just simple accuracy.\nArgs:\n predictions: Predicted labels.\n references: Ground truth labels.\nReturns:\n \'accuracy\': accuracy\nExamples:\n\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> xnli_metric = datasets.load_metric("xnli")\n >>> results = xnli_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n'
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]:
return (preds == labels).mean()
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class __A (datasets.Metric):
'''simple docstring'''
def lowerCAmelCase ( self : str ) ->Any:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""int64""" if self.config_name != """sts-b""" else """float32""" ),
"""references""": datasets.Value("""int64""" if self.config_name != """sts-b""" else """float32""" ),
} ) , codebase_urls=[] , reference_urls=[] , format="""numpy""" , )
def lowerCAmelCase ( self : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any ) ->int:
"""simple docstring"""
return {"accuracy": simple_accuracy(UpperCAmelCase_ , UpperCAmelCase_ )}
| 347 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class __A :
'''simple docstring'''
def __init__( self : str , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[Any]=13 , UpperCAmelCase_ : Tuple=7 , UpperCAmelCase_ : Dict=True , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : str=99 , UpperCAmelCase_ : List[str]=[1, 1, 2] , UpperCAmelCase_ : Dict=1 , UpperCAmelCase_ : Dict=32 , UpperCAmelCase_ : List[Any]=4 , UpperCAmelCase_ : str=8 , UpperCAmelCase_ : List[Any]=37 , UpperCAmelCase_ : List[Any]="gelu_new" , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : Optional[int]=0.0 , UpperCAmelCase_ : List[str]=512 , UpperCAmelCase_ : Any=3 , UpperCAmelCase_ : Optional[Any]=0.02 , UpperCAmelCase_ : int=3 , UpperCAmelCase_ : Tuple=4 , UpperCAmelCase_ : str=None , UpperCAmelCase_ : Optional[Any]=False , ) ->int:
"""simple docstring"""
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = seq_length
snake_case_ = is_training
snake_case_ = use_input_mask
snake_case_ = use_token_type_ids
snake_case_ = use_labels
snake_case_ = vocab_size
snake_case_ = block_sizes
snake_case_ = num_decoder_layers
snake_case_ = d_model
snake_case_ = n_head
snake_case_ = d_head
snake_case_ = d_inner
snake_case_ = hidden_act
snake_case_ = hidden_dropout
snake_case_ = attention_dropout
snake_case_ = activation_dropout
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = 2
snake_case_ = num_labels
snake_case_ = num_choices
snake_case_ = scope
snake_case_ = initializer_std
# Used in the tests to check the size of the first attention layer
snake_case_ = n_head
# Used in the tests to check the size of the first hidden state
snake_case_ = self.d_model
# Used in the tests to check the number of output hidden states/attentions
snake_case_ = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
snake_case_ = self.num_hidden_layers + 2
def lowerCAmelCase ( self : Optional[Any] ) ->Any:
"""simple docstring"""
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ = None
if self.use_input_mask:
snake_case_ = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ = None
if self.use_token_type_ids:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ = None
snake_case_ = None
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case_ = ids_tensor([self.batch_size] , self.num_choices )
snake_case_ = FunnelConfig(
vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def lowerCAmelCase ( self : Optional[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Dict , ) ->str:
"""simple docstring"""
snake_case_ = TFFunnelModel(config=UpperCAmelCase_ )
snake_case_ = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
snake_case_ = model(UpperCAmelCase_ )
snake_case_ = [input_ids, input_mask]
snake_case_ = model(UpperCAmelCase_ )
snake_case_ = model(UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
snake_case_ = False
snake_case_ = TFFunnelModel(config=UpperCAmelCase_ )
snake_case_ = model(UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
snake_case_ = False
snake_case_ = TFFunnelModel(config=UpperCAmelCase_ )
snake_case_ = model(UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
def lowerCAmelCase ( self : Dict , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : str , ) ->Optional[Any]:
"""simple docstring"""
snake_case_ = TFFunnelBaseModel(config=UpperCAmelCase_ )
snake_case_ = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
snake_case_ = model(UpperCAmelCase_ )
snake_case_ = [input_ids, input_mask]
snake_case_ = model(UpperCAmelCase_ )
snake_case_ = model(UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
snake_case_ = False
snake_case_ = TFFunnelBaseModel(config=UpperCAmelCase_ )
snake_case_ = model(UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model) )
snake_case_ = False
snake_case_ = TFFunnelBaseModel(config=UpperCAmelCase_ )
snake_case_ = model(UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
def lowerCAmelCase ( self : Any , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[Any] , ) ->List[Any]:
"""simple docstring"""
snake_case_ = TFFunnelForPreTraining(config=UpperCAmelCase_ )
snake_case_ = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
snake_case_ = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase ( self : str , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Tuple , ) ->Tuple:
"""simple docstring"""
snake_case_ = TFFunnelForMaskedLM(config=UpperCAmelCase_ )
snake_case_ = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
snake_case_ = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase ( self : List[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : str , UpperCAmelCase_ : int , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Any , ) ->Union[str, Any]:
"""simple docstring"""
snake_case_ = self.num_labels
snake_case_ = TFFunnelForSequenceClassification(config=UpperCAmelCase_ )
snake_case_ = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
snake_case_ = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase ( self : Optional[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any , UpperCAmelCase_ : str , UpperCAmelCase_ : Any , ) ->Union[str, Any]:
"""simple docstring"""
snake_case_ = self.num_choices
snake_case_ = TFFunnelForMultipleChoice(config=UpperCAmelCase_ )
snake_case_ = tf.tile(tf.expand_dims(UpperCAmelCase_ , 1 ) , (1, self.num_choices, 1) )
snake_case_ = tf.tile(tf.expand_dims(UpperCAmelCase_ , 1 ) , (1, self.num_choices, 1) )
snake_case_ = tf.tile(tf.expand_dims(UpperCAmelCase_ , 1 ) , (1, self.num_choices, 1) )
snake_case_ = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
snake_case_ = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase ( self : Optional[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Any , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int] , ) ->str:
"""simple docstring"""
snake_case_ = self.num_labels
snake_case_ = TFFunnelForTokenClassification(config=UpperCAmelCase_ )
snake_case_ = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
snake_case_ = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase ( self : int , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : int , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : int , ) ->int:
"""simple docstring"""
snake_case_ = TFFunnelForQuestionAnswering(config=UpperCAmelCase_ )
snake_case_ = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
snake_case_ = model(UpperCAmelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase ( self : Optional[Any] ) ->str:
"""simple docstring"""
snake_case_ = self.prepare_config_and_inputs()
(
(
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) ,
) = config_and_inputs
snake_case_ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class __A (snake_case__ , snake_case__ , unittest.TestCase):
'''simple docstring'''
__lowercase: Union[str, Any] = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
__lowercase: Any = (
{
"""feature-extraction""": (TFFunnelBaseModel, TFFunnelModel),
"""fill-mask""": TFFunnelForMaskedLM,
"""question-answering""": TFFunnelForQuestionAnswering,
"""text-classification""": TFFunnelForSequenceClassification,
"""token-classification""": TFFunnelForTokenClassification,
"""zero-shot""": TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
__lowercase: Optional[Any] = False
__lowercase: Any = False
def lowerCAmelCase ( self : int ) ->str:
"""simple docstring"""
snake_case_ = TFFunnelModelTester(self )
snake_case_ = ConfigTester(self , config_class=UpperCAmelCase_ )
def lowerCAmelCase ( self : int ) ->Dict:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : Tuple ) ->List[Any]:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def lowerCAmelCase ( self : str ) ->Dict:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCAmelCase_ )
def lowerCAmelCase ( self : Union[str, Any] ) ->Union[str, Any]:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase_ )
def lowerCAmelCase ( self : str ) ->Dict:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase_ )
def lowerCAmelCase ( self : Union[str, Any] ) ->Optional[Any]:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase_ )
@require_tf
class __A (snake_case__ , unittest.TestCase):
'''simple docstring'''
__lowercase: Any = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
__lowercase: Dict = False
__lowercase: str = False
def lowerCAmelCase ( self : Tuple ) ->Optional[Any]:
"""simple docstring"""
snake_case_ = TFFunnelModelTester(self , base=UpperCAmelCase_ )
snake_case_ = ConfigTester(self , config_class=UpperCAmelCase_ )
def lowerCAmelCase ( self : List[Any] ) ->int:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : List[str] ) ->List[Any]:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*UpperCAmelCase_ )
def lowerCAmelCase ( self : Union[str, Any] ) ->Tuple:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase_ )
def lowerCAmelCase ( self : Optional[int] ) ->Optional[int]:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCAmelCase_ )
| 347 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: List[Any] = ["""sentencepiece"""]
def __init__( self : int , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : List[str] ) ->List[Any]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: Optional[int] = ["""sentencepiece"""]
def __init__( self : Optional[int] , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Tuple ) ->Dict:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: Any = ["""sentencepiece"""]
def __init__( self : Any , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : List[Any] ) ->List[Any]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: Dict = ["""sentencepiece"""]
def __init__( self : List[str] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : int ) ->Optional[int]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: List[str] = ["""sentencepiece"""]
def __init__( self : Dict , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : int ) ->List[str]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: int = ["""sentencepiece"""]
def __init__( self : Tuple , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Dict ) ->Optional[int]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: int = ["""sentencepiece"""]
def __init__( self : int , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Union[str, Any] ) ->Dict:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: Optional[Any] = ["""sentencepiece"""]
def __init__( self : List[str] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : Optional[Any] ) ->Union[str, Any]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: Optional[int] = ["""sentencepiece"""]
def __init__( self : Optional[Any] , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : List[Any] ) ->Tuple:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: Any = ["""sentencepiece"""]
def __init__( self : List[Any] , *UpperCAmelCase_ : str , **UpperCAmelCase_ : int ) ->List[Any]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: Any = ["""sentencepiece"""]
def __init__( self : int , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : List[Any] ) ->Tuple:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: int = ["""sentencepiece"""]
def __init__( self : Optional[int] , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : Union[str, Any] ) ->Union[str, Any]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: Tuple = ["""sentencepiece"""]
def __init__( self : Dict , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : List[Any] ) ->Dict:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: Union[str, Any] = ["""sentencepiece"""]
def __init__( self : List[Any] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : List[Any] ) ->Optional[int]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: int = ["""sentencepiece"""]
def __init__( self : int , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : List[str] ) ->Union[str, Any]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: Tuple = ["""sentencepiece"""]
def __init__( self : int , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Optional[Any] ) ->str:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: List[Any] = ["""sentencepiece"""]
def __init__( self : Dict , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Optional[Any] ) ->int:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: Union[str, Any] = ["""sentencepiece"""]
def __init__( self : Optional[Any] , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Optional[int] ) ->Optional[int]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: Any = ["""sentencepiece"""]
def __init__( self : Optional[Any] , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Dict ) ->Dict:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: Any = ["""sentencepiece"""]
def __init__( self : List[Any] , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : List[str] ) ->List[Any]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: int = ["""sentencepiece"""]
def __init__( self : Union[str, Any] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Optional[Any] ) ->List[str]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: int = ["""sentencepiece"""]
def __init__( self : Union[str, Any] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : List[Any] ) ->Union[str, Any]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: List[Any] = ["""sentencepiece"""]
def __init__( self : Any , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Optional[Any] ) ->List[str]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: Dict = ["""sentencepiece"""]
def __init__( self : int , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Union[str, Any] ) ->List[Any]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: List[Any] = ["""sentencepiece"""]
def __init__( self : List[Any] , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Optional[int] ) ->Optional[Any]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: Union[str, Any] = ["""sentencepiece"""]
def __init__( self : Union[str, Any] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : str ) ->Optional[int]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: Optional[int] = ["""sentencepiece"""]
def __init__( self : Tuple , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Optional[int] ) ->Dict:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: Dict = ["""sentencepiece"""]
def __init__( self : Optional[int] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : List[str] ) ->Optional[Any]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: int = ["""sentencepiece"""]
def __init__( self : Dict , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Optional[int] ) ->Any:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: List[str] = ["""sentencepiece"""]
def __init__( self : List[str] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Union[str, Any] ) ->Optional[Any]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: Any = ["""sentencepiece"""]
def __init__( self : List[str] , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Optional[int] ) ->str:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
| 347 | 1 |
"""simple docstring"""
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def _a ( _SCREAMING_SNAKE_CASE ) -> bool:
snake_case_ = int(number**0.5 )
return number == sq * sq
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> tuple[int, int]:
snake_case_ = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
snake_case_ = x_den * y_den * z_den
snake_case_ = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
top //= hcf
bottom //= hcf
return top, bottom
def _a ( _SCREAMING_SNAKE_CASE = 35 ) -> int:
snake_case_ = set()
snake_case_ = 42
snake_case_ = Fraction(0 )
snake_case_ = 42
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
snake_case_ = x_num * y_den + x_den * y_num
snake_case_ = x_den * y_den
snake_case_ = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
snake_case_ = add_three(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
unique_s.add(_SCREAMING_SNAKE_CASE )
# n=2
snake_case_ = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
snake_case_ = x_den * x_den * y_den * y_den
if is_sq(_SCREAMING_SNAKE_CASE ) and is_sq(_SCREAMING_SNAKE_CASE ):
snake_case_ = int(sqrt(_SCREAMING_SNAKE_CASE ) )
snake_case_ = int(sqrt(_SCREAMING_SNAKE_CASE ) )
snake_case_ = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
snake_case_ = add_three(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
unique_s.add(_SCREAMING_SNAKE_CASE )
# n=-1
snake_case_ = x_num * y_num
snake_case_ = x_den * y_num + x_num * y_den
snake_case_ = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
snake_case_ = add_three(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
unique_s.add(_SCREAMING_SNAKE_CASE )
# n=2
snake_case_ = x_num * x_num * y_num * y_num
snake_case_ = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(_SCREAMING_SNAKE_CASE ) and is_sq(_SCREAMING_SNAKE_CASE ):
snake_case_ = int(sqrt(_SCREAMING_SNAKE_CASE ) )
snake_case_ = int(sqrt(_SCREAMING_SNAKE_CASE ) )
snake_case_ = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
snake_case_ = add_three(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
unique_s.add(_SCREAMING_SNAKE_CASE )
for num, den in unique_s:
total += Fraction(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return total.denominator + total.numerator
if __name__ == "__main__":
print(f"""{solution() = }""")
| 347 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
__SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
class __A (snake_case__):
'''simple docstring'''
def __init__( self : str , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : int ) ->None:
"""simple docstring"""
warnings.warn(
"""The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use MobileViTImageProcessor instead.""" , UpperCAmelCase_ , )
super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_ )
| 347 | 1 |
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = tuple[float, float, float]
__SCREAMING_SNAKE_CASE : Optional[int] = tuple[float, float, float]
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Vectorad:
snake_case_ = end_pointa[0] - end_pointa[0]
snake_case_ = end_pointa[1] - end_pointa[1]
snake_case_ = end_pointa[2] - end_pointa[2]
return (x, y, z)
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Vectorad:
snake_case_ = ab[1] * ac[2] - ab[2] * ac[1] # *i
snake_case_ = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
snake_case_ = ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> bool:
return tuple(round(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for x in vector ) == (0, 0, 0)
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 10 ) -> bool:
snake_case_ = create_vector(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case_ = create_vector(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return is_zero_vector(get_ad_vectors_cross(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
| 347 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Any:
snake_case_ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""blocks.{i}.norm1.weight""", f"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""blocks.{i}.norm1.bias""", f"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((f"""blocks.{i}.attn.proj.weight""", f"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.attn.proj.bias""", f"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""blocks.{i}.norm2.weight""", f"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""blocks.{i}.norm2.bias""", f"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.weight""", f"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.bias""", f"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.weight""", f"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.bias""", f"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
("""cls_token""", """vit.embeddings.cls_token"""),
("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""),
("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""),
("""pos_embed""", """vit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
("""pre_logits.fc.weight""", """pooler.dense.weight"""),
("""pre_logits.fc.bias""", """pooler.dense.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
snake_case_ = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Tuple:
for i in range(config.num_hidden_layers ):
if base_model:
snake_case_ = """"""
else:
snake_case_ = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
snake_case_ = state_dict.pop(f"""blocks.{i}.attn.qkv.weight""" )
snake_case_ = state_dict.pop(f"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
snake_case_ = in_proj_weight[
: config.hidden_size, :
]
snake_case_ = in_proj_bias[: config.hidden_size]
snake_case_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case_ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
snake_case_ = in_proj_weight[
-config.hidden_size :, :
]
snake_case_ = in_proj_bias[-config.hidden_size :]
def _a ( _SCREAMING_SNAKE_CASE ) -> List[str]:
snake_case_ = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
snake_case_ = dct.pop(_SCREAMING_SNAKE_CASE )
snake_case_ = val
def _a ( ) -> Any:
snake_case_ = """http://images.cocodataset.org/val2017/000000039769.jpg"""
snake_case_ = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
snake_case_ = ViTConfig()
snake_case_ = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
snake_case_ = True
snake_case_ = int(vit_name[-12:-10] )
snake_case_ = int(vit_name[-9:-6] )
else:
snake_case_ = 1_000
snake_case_ = """huggingface/label-files"""
snake_case_ = """imagenet-1k-id2label.json"""
snake_case_ = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="""dataset""" ) , """r""" ) )
snake_case_ = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
snake_case_ = idalabel
snake_case_ = {v: k for k, v in idalabel.items()}
snake_case_ = int(vit_name[-6:-4] )
snake_case_ = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith("""tiny""" ):
snake_case_ = 192
snake_case_ = 768
snake_case_ = 12
snake_case_ = 3
elif vit_name[9:].startswith("""small""" ):
snake_case_ = 384
snake_case_ = 1_536
snake_case_ = 12
snake_case_ = 6
else:
pass
else:
if vit_name[4:].startswith("""small""" ):
snake_case_ = 768
snake_case_ = 2_304
snake_case_ = 8
snake_case_ = 8
elif vit_name[4:].startswith("""base""" ):
pass
elif vit_name[4:].startswith("""large""" ):
snake_case_ = 1_024
snake_case_ = 4_096
snake_case_ = 24
snake_case_ = 16
elif vit_name[4:].startswith("""huge""" ):
snake_case_ = 1_280
snake_case_ = 5_120
snake_case_ = 32
snake_case_ = 16
# load original model from timm
snake_case_ = timm.create_model(_SCREAMING_SNAKE_CASE , pretrained=_SCREAMING_SNAKE_CASE )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
snake_case_ = timm_model.state_dict()
if base_model:
remove_classification_head_(_SCREAMING_SNAKE_CASE )
snake_case_ = create_rename_keys(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for src, dest in rename_keys:
rename_key(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
read_in_q_k_v(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# load HuggingFace model
if vit_name[-5:] == "in21k":
snake_case_ = ViTModel(_SCREAMING_SNAKE_CASE ).eval()
else:
snake_case_ = ViTForImageClassification(_SCREAMING_SNAKE_CASE ).eval()
model.load_state_dict(_SCREAMING_SNAKE_CASE )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
snake_case_ = DeiTImageProcessor(size=config.image_size )
else:
snake_case_ = ViTImageProcessor(size=config.image_size )
snake_case_ = image_processor(images=prepare_img() , return_tensors="""pt""" )
snake_case_ = encoding["""pixel_values"""]
snake_case_ = model(_SCREAMING_SNAKE_CASE )
if base_model:
snake_case_ = timm_model.forward_features(_SCREAMING_SNAKE_CASE )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(_SCREAMING_SNAKE_CASE , outputs.pooler_output , atol=1E-3 )
else:
snake_case_ = timm_model(_SCREAMING_SNAKE_CASE )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_SCREAMING_SNAKE_CASE , outputs.logits , atol=1E-3 )
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
print(f"""Saving model {vit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--vit_name',
default='vit_base_patch16_224',
type=str,
help='Name of the ViT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
__SCREAMING_SNAKE_CASE : int = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 347 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __A (snake_case__ , unittest.TestCase):
'''simple docstring'''
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class __A (unittest.TestCase):
'''simple docstring'''
@property
def lowerCAmelCase ( self : List[Any] ) ->Tuple:
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowerCAmelCase ( self : Optional[Any] ) ->Optional[Any]:
"""simple docstring"""
snake_case_ = ort.SessionOptions()
snake_case_ = False
return options
def lowerCAmelCase ( self : Union[str, Any] ) ->List[str]:
"""simple docstring"""
snake_case_ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo.png""" )
snake_case_ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo_mask.png""" )
snake_case_ = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"""runwayml/stable-diffusion-inpainting""" , revision="""onnx""" , safety_checker=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
snake_case_ = """A red cat sitting on a park bench"""
snake_case_ = np.random.RandomState(0 )
snake_case_ = pipe(
prompt=UpperCAmelCase_ , image=UpperCAmelCase_ , mask_image=UpperCAmelCase_ , guidance_scale=7.5 , num_inference_steps=10 , generator=UpperCAmelCase_ , output_type="""np""" , )
snake_case_ = output.images
snake_case_ = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
snake_case_ = np.array([0.2_514, 0.3_007, 0.3_517, 0.1_790, 0.2_382, 0.3_167, 0.1_944, 0.2_273, 0.2_464] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowerCAmelCase ( self : int ) ->Tuple:
"""simple docstring"""
snake_case_ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo.png""" )
snake_case_ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo_mask.png""" )
snake_case_ = LMSDiscreteScheduler.from_pretrained(
"""runwayml/stable-diffusion-inpainting""" , subfolder="""scheduler""" , revision="""onnx""" )
snake_case_ = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"""runwayml/stable-diffusion-inpainting""" , revision="""onnx""" , scheduler=UpperCAmelCase_ , safety_checker=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
snake_case_ = """A red cat sitting on a park bench"""
snake_case_ = np.random.RandomState(0 )
snake_case_ = pipe(
prompt=UpperCAmelCase_ , image=UpperCAmelCase_ , mask_image=UpperCAmelCase_ , guidance_scale=7.5 , num_inference_steps=20 , generator=UpperCAmelCase_ , output_type="""np""" , )
snake_case_ = output.images
snake_case_ = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
snake_case_ = np.array([0.0_086, 0.0_077, 0.0_083, 0.0_093, 0.0_107, 0.0_139, 0.0_094, 0.0_097, 0.0_125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
| 347 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class __A (unittest.TestCase):
'''simple docstring'''
def __init__( self : List[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple=13 , UpperCAmelCase_ : List[Any]=7 , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Dict=True , UpperCAmelCase_ : int=True , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Dict=99 , UpperCAmelCase_ : str=32 , UpperCAmelCase_ : Tuple=5 , UpperCAmelCase_ : Union[str, Any]=4 , UpperCAmelCase_ : Any=37 , UpperCAmelCase_ : int="gelu" , UpperCAmelCase_ : Any=0.1 , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : Dict=512 , UpperCAmelCase_ : Optional[Any]=16 , UpperCAmelCase_ : Dict=2 , UpperCAmelCase_ : str=0.02 , UpperCAmelCase_ : str=4 , ) ->Tuple:
"""simple docstring"""
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = seq_length
snake_case_ = is_training
snake_case_ = use_attention_mask
snake_case_ = use_token_type_ids
snake_case_ = use_labels
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = type_sequence_label_size
snake_case_ = initializer_range
snake_case_ = num_choices
def lowerCAmelCase ( self : Optional[int] ) ->str:
"""simple docstring"""
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ = None
if self.use_attention_mask:
snake_case_ = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ = None
if self.use_token_type_ids:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase_ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCAmelCase ( self : List[str] ) ->Dict:
"""simple docstring"""
snake_case_ = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ , snake_case_ = config_and_inputs
snake_case_ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class __A (snake_case__ , unittest.TestCase):
'''simple docstring'''
__lowercase: Union[str, Any] = True
__lowercase: int = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCAmelCase ( self : Optional[Any] ) ->Tuple:
"""simple docstring"""
snake_case_ = FlaxRoFormerModelTester(self )
@slow
def lowerCAmelCase ( self : Any ) ->List[str]:
"""simple docstring"""
for model_class_name in self.all_model_classes:
snake_case_ = model_class_name.from_pretrained("""junnyu/roformer_chinese_small""" , from_pt=UpperCAmelCase_ )
snake_case_ = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCAmelCase_ )
@require_flax
class __A (unittest.TestCase):
'''simple docstring'''
@slow
def lowerCAmelCase ( self : str ) ->Dict:
"""simple docstring"""
snake_case_ = FlaxRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" )
snake_case_ = jnp.array([[0, 1, 2, 3, 4, 5]] )
snake_case_ = model(UpperCAmelCase_ )[0]
snake_case_ = 50_000
snake_case_ = (1, 6, vocab_size)
self.assertEqual(output.shape , UpperCAmelCase_ )
snake_case_ = jnp.array(
[[[-0.1_205, -1.0_265, 0.2_922], [-1.5_134, 0.1_974, 0.1_519], [-5.0_135, -3.9_003, -0.8_404]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , UpperCAmelCase_ , atol=1E-4 ) )
| 347 | 1 |
"""simple docstring"""
class __A :
'''simple docstring'''
def __init__( self : List[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[int] ) ->Optional[Any]:
"""simple docstring"""
snake_case_ = name
snake_case_ = val
def __str__( self : List[str] ) ->int:
"""simple docstring"""
return F"""{self.__class__.__name__}({self.name}, {self.val})"""
def __lt__( self : Dict , UpperCAmelCase_ : Optional[Any] ) ->str:
"""simple docstring"""
return self.val < other.val
class __A :
'''simple docstring'''
def __init__( self : Union[str, Any] , UpperCAmelCase_ : str ) ->Union[str, Any]:
"""simple docstring"""
snake_case_ = {}
snake_case_ = {}
snake_case_ = self.build_heap(UpperCAmelCase_ )
def __getitem__( self : str , UpperCAmelCase_ : Optional[Any] ) ->int:
"""simple docstring"""
return self.get_value(UpperCAmelCase_ )
def lowerCAmelCase ( self : List[Any] , UpperCAmelCase_ : Optional[int] ) ->Optional[Any]:
"""simple docstring"""
return (idx - 1) // 2
def lowerCAmelCase ( self : List[str] , UpperCAmelCase_ : List[Any] ) ->Optional[Any]:
"""simple docstring"""
return idx * 2 + 1
def lowerCAmelCase ( self : Union[str, Any] , UpperCAmelCase_ : List[str] ) ->Dict:
"""simple docstring"""
return idx * 2 + 2
def lowerCAmelCase ( self : Optional[int] , UpperCAmelCase_ : List[str] ) ->Optional[int]:
"""simple docstring"""
return self.heap_dict[key]
def lowerCAmelCase ( self : Any , UpperCAmelCase_ : Optional[int] ) ->List[Any]:
"""simple docstring"""
snake_case_ = len(UpperCAmelCase_ ) - 1
snake_case_ = self.get_parent_idx(UpperCAmelCase_ )
for idx, i in enumerate(UpperCAmelCase_ ):
snake_case_ = idx
snake_case_ = i.val
for i in range(UpperCAmelCase_ , -1 , -1 ):
self.sift_down(UpperCAmelCase_ , UpperCAmelCase_ )
return array
def lowerCAmelCase ( self : Dict , UpperCAmelCase_ : Any , UpperCAmelCase_ : Tuple ) ->Optional[int]:
"""simple docstring"""
while True:
snake_case_ = self.get_left_child_idx(UpperCAmelCase_ ) # noqa: E741
snake_case_ = self.get_right_child_idx(UpperCAmelCase_ )
snake_case_ = idx
if l < len(UpperCAmelCase_ ) and array[l] < array[idx]:
snake_case_ = l
if r < len(UpperCAmelCase_ ) and array[r] < array[smallest]:
snake_case_ = r
if smallest != idx:
snake_case_ , snake_case_ = array[smallest], array[idx]
(
(
snake_case_
) , (
snake_case_
) ,
) = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
snake_case_ = smallest
else:
break
def lowerCAmelCase ( self : Optional[Any] , UpperCAmelCase_ : List[str] ) ->Tuple:
"""simple docstring"""
snake_case_ = self.get_parent_idx(UpperCAmelCase_ )
while p >= 0 and self.heap[p] > self.heap[idx]:
snake_case_ , snake_case_ = self.heap[idx], self.heap[p]
snake_case_ , snake_case_ = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
snake_case_ = p
snake_case_ = self.get_parent_idx(UpperCAmelCase_ )
def lowerCAmelCase ( self : Dict ) ->List[str]:
"""simple docstring"""
return self.heap[0]
def lowerCAmelCase ( self : int ) ->str:
"""simple docstring"""
snake_case_ , snake_case_ = self.heap[-1], self.heap[0]
snake_case_ , snake_case_ = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
snake_case_ = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 , self.heap )
return x
def lowerCAmelCase ( self : Any , UpperCAmelCase_ : Optional[int] ) ->Any:
"""simple docstring"""
self.heap.append(UpperCAmelCase_ )
snake_case_ = len(self.heap ) - 1
snake_case_ = node.val
self.sift_up(len(self.heap ) - 1 )
def lowerCAmelCase ( self : Union[str, Any] ) ->List[Any]:
"""simple docstring"""
return len(self.heap ) == 0
def lowerCAmelCase ( self : Tuple , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[Any] ) ->Optional[int]:
"""simple docstring"""
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
snake_case_ = new_value
snake_case_ = new_value
self.sift_up(self.idx_of_element[node] )
__SCREAMING_SNAKE_CASE : int = Node('R', -1)
__SCREAMING_SNAKE_CASE : Optional[Any] = Node('B', 6)
__SCREAMING_SNAKE_CASE : Optional[int] = Node('A', 3)
__SCREAMING_SNAKE_CASE : Union[str, Any] = Node('X', 1)
__SCREAMING_SNAKE_CASE : Any = Node('E', 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
__SCREAMING_SNAKE_CASE : Optional[int] = MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print('Min Heap - before decrease key')
for i in my_min_heap.heap:
print(i)
print('Min Heap - After decrease key of node [B -> -17]')
my_min_heap.decrease_key(b, -17)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 347 |
"""simple docstring"""
from __future__ import annotations
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> bool:
snake_case_ = get_failure_array(_SCREAMING_SNAKE_CASE )
# 2) Step through text searching for pattern
snake_case_ , snake_case_ = 0, 0 # index into text, pattern
while i < len(_SCREAMING_SNAKE_CASE ):
if pattern[j] == text[i]:
if j == (len(_SCREAMING_SNAKE_CASE ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
snake_case_ = failure[j - 1]
continue
i += 1
return False
def _a ( _SCREAMING_SNAKE_CASE ) -> list[int]:
snake_case_ = [0]
snake_case_ = 0
snake_case_ = 1
while j < len(_SCREAMING_SNAKE_CASE ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
snake_case_ = failure[i - 1]
continue
j += 1
failure.append(_SCREAMING_SNAKE_CASE )
return failure
if __name__ == "__main__":
# Test 1)
__SCREAMING_SNAKE_CASE : Optional[int] = 'abc1abc12'
__SCREAMING_SNAKE_CASE : Optional[int] = 'alskfjaldsabc1abc1abc12k23adsfabcabc'
__SCREAMING_SNAKE_CASE : List[str] = 'alskfjaldsk23adsfabcabc'
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
__SCREAMING_SNAKE_CASE : int = 'ABABX'
__SCREAMING_SNAKE_CASE : Optional[Any] = 'ABABZABABYABABX'
assert kmp(pattern, text)
# Test 3)
__SCREAMING_SNAKE_CASE : Any = 'AAAB'
__SCREAMING_SNAKE_CASE : List[Any] = 'ABAAAAAB'
assert kmp(pattern, text)
# Test 4)
__SCREAMING_SNAKE_CASE : Optional[int] = 'abcdabcy'
__SCREAMING_SNAKE_CASE : str = 'abcxabcdabxabcdabcdabcy'
assert kmp(pattern, text)
# Test 5)
__SCREAMING_SNAKE_CASE : Any = 'aabaabaaa'
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 347 | 1 |
"""simple docstring"""
import math
import unittest
def _a ( _SCREAMING_SNAKE_CASE ) -> bool:
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_SCREAMING_SNAKE_CASE ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class __A (unittest.TestCase):
'''simple docstring'''
def lowerCAmelCase ( self : List[str] ) ->List[str]:
"""simple docstring"""
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(11 ) )
self.assertTrue(is_prime(13 ) )
self.assertTrue(is_prime(17 ) )
self.assertTrue(is_prime(19 ) )
self.assertTrue(is_prime(23 ) )
self.assertTrue(is_prime(29 ) )
def lowerCAmelCase ( self : int ) ->Union[str, Any]:
"""simple docstring"""
with self.assertRaises(UpperCAmelCase_ ):
is_prime(-19 )
self.assertFalse(
is_prime(0 ) , """Zero doesn't have any positive factors, primes must have exactly two.""" , )
self.assertFalse(
is_prime(1 ) , """One only has 1 positive factor, primes must have exactly two.""" , )
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main()
| 347 |
"""simple docstring"""
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class __A (snake_case__):
'''simple docstring'''
@slow
@require_torch
def lowerCAmelCase ( self : Union[str, Any] ) ->Dict:
"""simple docstring"""
snake_case_ = EncoderDecoderModel.from_encoder_decoder_pretrained("""prajjwal1/bert-tiny""" , """prajjwal1/bert-tiny""" )
snake_case_ = BertTokenizer.from_pretrained("""bert-base-uncased""" )
snake_case_ = bertabert.config.encoder.vocab_size
snake_case_ = tokenizer.sep_token_id
snake_case_ = tokenizer.cls_token_id
snake_case_ = 128
snake_case_ = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""train[:1%]""" )
snake_case_ = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""validation[:1%]""" )
snake_case_ = train_dataset.select(range(32 ) )
snake_case_ = val_dataset.select(range(16 ) )
snake_case_ = 4
def _map_to_encoder_decoder_inputs(UpperCAmelCase_ : int ):
# Tokenizer will automatically set [BOS] <text> [EOS]
snake_case_ = tokenizer(batch["""article"""] , padding="""max_length""" , truncation=UpperCAmelCase_ , max_length=512 )
snake_case_ = tokenizer(batch["""highlights"""] , padding="""max_length""" , truncation=UpperCAmelCase_ , max_length=128 )
snake_case_ = inputs.input_ids
snake_case_ = inputs.attention_mask
snake_case_ = outputs.input_ids
snake_case_ = outputs.input_ids.copy()
snake_case_ = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["""labels"""]
]
snake_case_ = outputs.attention_mask
assert all(len(UpperCAmelCase_ ) == 512 for x in inputs.input_ids )
assert all(len(UpperCAmelCase_ ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(UpperCAmelCase_ : Union[str, Any] ):
snake_case_ = pred.label_ids
snake_case_ = pred.predictions
# all unnecessary tokens are removed
snake_case_ = tokenizer.batch_decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ )
snake_case_ = tokenizer.batch_decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ )
snake_case_ = sum([int(pred_str[i] == label_str[i] ) for i in range(len(UpperCAmelCase_ ) )] ) / len(UpperCAmelCase_ )
return {"accuracy": accuracy}
# map train dataset
snake_case_ = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=UpperCAmelCase_ , batch_size=UpperCAmelCase_ , remove_columns=["""article""", """highlights"""] , )
train_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
# same for validation dataset
snake_case_ = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=UpperCAmelCase_ , batch_size=UpperCAmelCase_ , remove_columns=["""article""", """highlights"""] , )
val_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
snake_case_ = self.get_auto_remove_tmp_dir()
snake_case_ = SeqaSeqTrainingArguments(
output_dir=UpperCAmelCase_ , per_device_train_batch_size=UpperCAmelCase_ , per_device_eval_batch_size=UpperCAmelCase_ , predict_with_generate=UpperCAmelCase_ , evaluation_strategy="""steps""" , do_train=UpperCAmelCase_ , do_eval=UpperCAmelCase_ , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
snake_case_ = SeqaSeqTrainer(
model=UpperCAmelCase_ , args=UpperCAmelCase_ , compute_metrics=_compute_metrics , train_dataset=UpperCAmelCase_ , eval_dataset=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , )
# start training
trainer.train()
| 347 | 1 |
"""simple docstring"""
from __future__ import annotations
import math
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
snake_case_ = u
for i in range(1 , _SCREAMING_SNAKE_CASE ):
snake_case_ = temp * (u - i)
return temp
def _a ( ) -> None:
snake_case_ = int(input("""enter the numbers of values: """ ) )
snake_case_ = []
for _ in range(_SCREAMING_SNAKE_CASE ):
y.append([] )
for i in range(_SCREAMING_SNAKE_CASE ):
for j in range(_SCREAMING_SNAKE_CASE ):
y[i].append(_SCREAMING_SNAKE_CASE )
snake_case_ = 0
print("""enter the values of parameters in a list: """ )
snake_case_ = list(map(_SCREAMING_SNAKE_CASE , input().split() ) )
print("""enter the values of corresponding parameters: """ )
for i in range(_SCREAMING_SNAKE_CASE ):
snake_case_ = float(input() )
snake_case_ = int(input("""enter the value to interpolate: """ ) )
snake_case_ = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , _SCREAMING_SNAKE_CASE ):
for j in range(n - i ):
snake_case_ = y[j + 1][i - 1] - y[j][i - 1]
snake_case_ = y[0][0]
for i in range(1 , _SCREAMING_SNAKE_CASE ):
summ += (ucal(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) * y[0][i]) / math.factorial(_SCREAMING_SNAKE_CASE )
print(f"""the value at {value} is {summ}""" )
if __name__ == "__main__":
main()
| 347 |
"""simple docstring"""
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
__SCREAMING_SNAKE_CASE : Tuple = subprocess.check_output('git merge-base main HEAD'.split()).decode('utf-8')
__SCREAMING_SNAKE_CASE : Tuple = subprocess.check_output(f"""git diff --name-only {fork_point_sha}""".split()).decode('utf-8').split()
__SCREAMING_SNAKE_CASE : Any = '|'.join(sys.argv[1:])
__SCREAMING_SNAKE_CASE : Optional[Any] = re.compile(Rf"""^({joined_dirs}).*?\.py$""")
__SCREAMING_SNAKE_CASE : List[str] = [x for x in modified_files if regex.match(x)]
print(' '.join(relevant_modified_files), end='')
| 347 | 1 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
__SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__) # pylint: disable=invalid-name
__SCREAMING_SNAKE_CASE : Optional[int] = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "A red cartoon frog, 4k"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16\n ... )\n >>> pipe.to("cuda")\n\n >>> init_image = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/frog.png"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save("red_frog.png")\n ```\n'
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=8 ) -> List[str]:
snake_case_ = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
snake_case_ = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=512 ) -> List[str]:
snake_case_ = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
snake_case_ = np.array(pil_image.convert("""RGB""" ) )
snake_case_ = arr.astype(np.floataa ) / 127.5 - 1
snake_case_ = np.transpose(_SCREAMING_SNAKE_CASE , [2, 0, 1] )
snake_case_ = torch.from_numpy(_SCREAMING_SNAKE_CASE ).unsqueeze(0 )
return image
class __A (snake_case__):
'''simple docstring'''
def __init__( self : int , UpperCAmelCase_ : UNetaDConditionModel , UpperCAmelCase_ : DDPMScheduler , UpperCAmelCase_ : VQModel , ) ->Optional[Any]:
"""simple docstring"""
super().__init__()
self.register_modules(
unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , movq=UpperCAmelCase_ , )
snake_case_ = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCAmelCase ( self : Union[str, Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : int ) ->List[Any]:
"""simple docstring"""
snake_case_ = min(int(num_inference_steps * strength ) , UpperCAmelCase_ )
snake_case_ = max(num_inference_steps - init_timestep , 0 )
snake_case_ = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def lowerCAmelCase ( self : Optional[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any]=None ) ->Dict:
"""simple docstring"""
if not isinstance(UpperCAmelCase_ , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(UpperCAmelCase_ )}""" )
snake_case_ = image.to(device=UpperCAmelCase_ , dtype=UpperCAmelCase_ )
snake_case_ = batch_size * num_images_per_prompt
if image.shape[1] == 4:
snake_case_ = image
else:
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) and len(UpperCAmelCase_ ) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(UpperCAmelCase_ )}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
elif isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
snake_case_ = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(UpperCAmelCase_ )
]
snake_case_ = torch.cat(UpperCAmelCase_ , dim=0 )
else:
snake_case_ = self.movq.encode(UpperCAmelCase_ ).latent_dist.sample(UpperCAmelCase_ )
snake_case_ = self.movq.config.scaling_factor * init_latents
snake_case_ = torch.cat([init_latents] , dim=0 )
snake_case_ = init_latents.shape
snake_case_ = randn_tensor(UpperCAmelCase_ , generator=UpperCAmelCase_ , device=UpperCAmelCase_ , dtype=UpperCAmelCase_ )
# get latents
snake_case_ = self.scheduler.add_noise(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = init_latents
return latents
def lowerCAmelCase ( self : Any , UpperCAmelCase_ : int=0 ) ->List[Any]:
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
snake_case_ = torch.device(F"""cuda:{gpu_id}""" )
snake_case_ = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCAmelCase ( self : Optional[int] , UpperCAmelCase_ : List[str]=0 ) ->Dict:
"""simple docstring"""
if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" )
snake_case_ = torch.device(F"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to("""cpu""" , silence_dtype_warnings=UpperCAmelCase_ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
snake_case_ = None
for cpu_offloaded_model in [self.unet, self.movq]:
snake_case_ , snake_case_ = cpu_offload_with_hook(UpperCAmelCase_ , UpperCAmelCase_ , prev_module_hook=UpperCAmelCase_ )
# We'll offload the last model manually.
snake_case_ = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCAmelCase ( self : int ) ->str:
"""simple docstring"""
if not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(UpperCAmelCase_ , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(UpperCAmelCase_ )
def __call__( self : Any , UpperCAmelCase_ : Union[torch.FloatTensor, List[torch.FloatTensor]] , UpperCAmelCase_ : Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]] , UpperCAmelCase_ : Union[torch.FloatTensor, List[torch.FloatTensor]] , UpperCAmelCase_ : int = 512 , UpperCAmelCase_ : int = 512 , UpperCAmelCase_ : int = 100 , UpperCAmelCase_ : float = 4.0 , UpperCAmelCase_ : float = 0.3 , UpperCAmelCase_ : int = 1 , UpperCAmelCase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCAmelCase_ : Optional[str] = "pil" , UpperCAmelCase_ : bool = True , ) ->Optional[Any]:
"""simple docstring"""
snake_case_ = self._execution_device
snake_case_ = guidance_scale > 1.0
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
snake_case_ = torch.cat(UpperCAmelCase_ , dim=0 )
snake_case_ = image_embeds.shape[0]
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
snake_case_ = torch.cat(UpperCAmelCase_ , dim=0 )
if do_classifier_free_guidance:
snake_case_ = image_embeds.repeat_interleave(UpperCAmelCase_ , dim=0 )
snake_case_ = negative_image_embeds.repeat_interleave(UpperCAmelCase_ , dim=0 )
snake_case_ = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=UpperCAmelCase_ )
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
snake_case_ = [image]
if not all(isinstance(UpperCAmelCase_ , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
F"""Input is in incorrect format: {[type(UpperCAmelCase_ ) for i in image]}. Currently, we only support PIL image and pytorch tensor""" )
snake_case_ = torch.cat([prepare_image(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) for i in image] , dim=0 )
snake_case_ = image.to(dtype=image_embeds.dtype , device=UpperCAmelCase_ )
snake_case_ = self.movq.encode(UpperCAmelCase_ )["""latents"""]
snake_case_ = latents.repeat_interleave(UpperCAmelCase_ , dim=0 )
self.scheduler.set_timesteps(UpperCAmelCase_ , device=UpperCAmelCase_ )
snake_case_ , snake_case_ = self.get_timesteps(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = timesteps[:1].repeat(batch_size * num_images_per_prompt )
snake_case_ , snake_case_ = downscale_height_and_width(UpperCAmelCase_ , UpperCAmelCase_ , self.movq_scale_factor )
snake_case_ = self.prepare_latents(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , image_embeds.dtype , UpperCAmelCase_ , UpperCAmelCase_ )
for i, t in enumerate(self.progress_bar(UpperCAmelCase_ ) ):
# expand the latents if we are doing classifier free guidance
snake_case_ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
snake_case_ = {"""image_embeds""": image_embeds}
snake_case_ = self.unet(
sample=UpperCAmelCase_ , timestep=UpperCAmelCase_ , encoder_hidden_states=UpperCAmelCase_ , added_cond_kwargs=UpperCAmelCase_ , return_dict=UpperCAmelCase_ , )[0]
if do_classifier_free_guidance:
snake_case_ , snake_case_ = noise_pred.split(latents.shape[1] , dim=1 )
snake_case_ , snake_case_ = noise_pred.chunk(2 )
snake_case_ , snake_case_ = variance_pred.chunk(2 )
snake_case_ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
snake_case_ = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , """variance_type""" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
snake_case_ , snake_case_ = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
snake_case_ = self.scheduler.step(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , generator=UpperCAmelCase_ , )[0]
# post-processing
snake_case_ = self.movq.decode(UpperCAmelCase_ , force_not_quantize=UpperCAmelCase_ )["""sample"""]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
snake_case_ = image * 0.5 + 0.5
snake_case_ = image.clamp(0 , 1 )
snake_case_ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
snake_case_ = self.numpy_to_pil(UpperCAmelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCAmelCase_ )
| 347 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Tuple = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'adapter_layer': 'encoder.layers.*.adapter_layer',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
'pooling_layer.linear': 'projector',
'pooling_layer.projection': 'classifier',
}
__SCREAMING_SNAKE_CASE : List[Any] = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
'projector',
'classifier',
]
def _a ( _SCREAMING_SNAKE_CASE ) -> List[str]:
snake_case_ = {}
with open(_SCREAMING_SNAKE_CASE , """r""" ) as file:
for line_number, line in enumerate(_SCREAMING_SNAKE_CASE ):
snake_case_ = line.strip()
if line:
snake_case_ = line.split()
snake_case_ = line_number
snake_case_ = words[0]
snake_case_ = value
return result
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
for attribute in key.split(""".""" ):
snake_case_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case_ = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(_SCREAMING_SNAKE_CASE ):
snake_case_ = PARAM_MAPPING[full_name.split(""".""" )[-1]]
snake_case_ = """param"""
if weight_type is not None and weight_type != "param":
snake_case_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).shape
elif weight_type is not None and weight_type == "param":
snake_case_ = hf_pointer
for attribute in hf_param_name.split(""".""" ):
snake_case_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case_ = shape_pointer.shape
# let's reduce dimension
snake_case_ = value[0]
else:
snake_case_ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
snake_case_ = value
elif weight_type == "weight_g":
snake_case_ = value
elif weight_type == "weight_v":
snake_case_ = value
elif weight_type == "bias":
snake_case_ = value
elif weight_type == "param":
for attribute in hf_param_name.split(""".""" ):
snake_case_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case_ = value
else:
snake_case_ = value
logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
snake_case_ = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(_SCREAMING_SNAKE_CASE ):
snake_case_ = PARAM_MAPPING[full_name.split(""".""" )[-1]]
snake_case_ = """param"""
if weight_type is not None and weight_type != "param":
snake_case_ = """.""".join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
snake_case_ = """.""".join([key, hf_param_name] )
else:
snake_case_ = key
snake_case_ = value if """lm_head""" in full_key else value[0]
__SCREAMING_SNAKE_CASE : int = {
'W_a': 'linear_1.weight',
'W_b': 'linear_2.weight',
'b_a': 'linear_1.bias',
'b_b': 'linear_2.bias',
'ln_W': 'norm.weight',
'ln_b': 'norm.bias',
}
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> List[str]:
snake_case_ = False
for key, mapped_key in MAPPING.items():
snake_case_ = """wav2vec2.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
snake_case_ = True
if "*" in mapped_key:
snake_case_ = name.split(_SCREAMING_SNAKE_CASE )[0].split(""".""" )[-2]
snake_case_ = mapped_key.replace("""*""" , _SCREAMING_SNAKE_CASE )
if "weight_g" in name:
snake_case_ = """weight_g"""
elif "weight_v" in name:
snake_case_ = """weight_v"""
elif "bias" in name:
snake_case_ = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
snake_case_ = """weight"""
else:
snake_case_ = None
if hf_dict is not None:
rename_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
set_recursively(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return is_used
return is_used
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
snake_case_ = []
snake_case_ = fairseq_model.state_dict()
snake_case_ = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
snake_case_ = False
if "conv_layers" in name:
load_conv_layer(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == """group""" , )
snake_case_ = True
else:
snake_case_ = load_wavaveca_layer(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if not is_used:
unused_weights.append(_SCREAMING_SNAKE_CASE )
logger.warning(f"""Unused weights: {unused_weights}""" )
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
snake_case_ = full_name.split("""conv_layers.""" )[-1]
snake_case_ = name.split(""".""" )
snake_case_ = int(items[0] )
snake_case_ = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
snake_case_ = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
snake_case_ = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
snake_case_ = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
snake_case_ = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_SCREAMING_SNAKE_CASE )
@torch.no_grad()
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False ) -> int:
if config_path is not None:
snake_case_ = WavaVecaConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
else:
snake_case_ = WavaVecaConfig()
if is_seq_class:
snake_case_ = read_txt_into_dict(_SCREAMING_SNAKE_CASE )
snake_case_ = idalabel
snake_case_ = WavaVecaForSequenceClassification(_SCREAMING_SNAKE_CASE )
snake_case_ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , )
feature_extractor.save_pretrained(_SCREAMING_SNAKE_CASE )
elif is_finetuned:
if dict_path:
snake_case_ = Dictionary.load(_SCREAMING_SNAKE_CASE )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
snake_case_ = target_dict.pad_index
snake_case_ = target_dict.bos_index
snake_case_ = target_dict.eos_index
snake_case_ = len(target_dict.symbols )
snake_case_ = os.path.join(_SCREAMING_SNAKE_CASE , """vocab.json""" )
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(_SCREAMING_SNAKE_CASE ) )
return
os.makedirs(_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE )
snake_case_ = target_dict.indices
# fairseq has the <pad> and <s> switched
snake_case_ = 0
snake_case_ = 1
with open(_SCREAMING_SNAKE_CASE , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case_ = WavaVecaCTCTokenizer(
_SCREAMING_SNAKE_CASE , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=_SCREAMING_SNAKE_CASE , )
snake_case_ = True if config.feat_extract_norm == """layer""" else False
snake_case_ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , )
snake_case_ = WavaVecaProcessor(feature_extractor=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE )
processor.save_pretrained(_SCREAMING_SNAKE_CASE )
snake_case_ = WavaVecaForCTC(_SCREAMING_SNAKE_CASE )
else:
snake_case_ = WavaVecaForPreTraining(_SCREAMING_SNAKE_CASE )
if is_finetuned or is_seq_class:
snake_case_ , snake_case_ , snake_case_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
snake_case_ = argparse.Namespace(task="""audio_pretraining""" )
snake_case_ = fairseq.tasks.setup_task(_SCREAMING_SNAKE_CASE )
snake_case_ , snake_case_ , snake_case_ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=_SCREAMING_SNAKE_CASE )
snake_case_ = model[0].eval()
recursively_load_weights(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , not is_finetuned )
hf_wavavec.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : str = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
parser.add_argument(
'--is_seq_class',
action='store_true',
help='Whether the model to convert is a fine-tuned sequence classification model or not',
)
__SCREAMING_SNAKE_CASE : Any = parser.parse_args()
__SCREAMING_SNAKE_CASE : List[Any] = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 347 | 1 |
"""simple docstring"""
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __A :
'''simple docstring'''
@staticmethod
def lowerCAmelCase ( *UpperCAmelCase_ : int , **UpperCAmelCase_ : Tuple ) ->Union[str, Any]:
"""simple docstring"""
pass
@is_pipeline_test
@require_vision
class __A (unittest.TestCase):
'''simple docstring'''
@require_torch
def lowerCAmelCase ( self : Any ) ->Dict:
"""simple docstring"""
snake_case_ = pipeline(
model="""hf-internal-testing/tiny-random-clip-zero-shot-image-classification""" , )
snake_case_ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
snake_case_ = image_classifier(UpperCAmelCase_ , candidate_labels=["""a""", """b""", """c"""] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(UpperCAmelCase_ ) , [
[{"""score""": 0.333, """label""": """a"""}, {"""score""": 0.333, """label""": """b"""}, {"""score""": 0.333, """label""": """c"""}],
[{"""score""": 0.333, """label""": """a"""}, {"""score""": 0.333, """label""": """c"""}, {"""score""": 0.333, """label""": """b"""}],
] , )
snake_case_ = image_classifier([image] * 5 , candidate_labels=["""A""", """B""", """C"""] , batch_size=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase_ ) , [
[
{"""score""": 0.333, """label""": ANY(UpperCAmelCase_ )},
{"""score""": 0.333, """label""": ANY(UpperCAmelCase_ )},
{"""score""": 0.333, """label""": ANY(UpperCAmelCase_ )},
],
[
{"""score""": 0.333, """label""": ANY(UpperCAmelCase_ )},
{"""score""": 0.333, """label""": ANY(UpperCAmelCase_ )},
{"""score""": 0.333, """label""": ANY(UpperCAmelCase_ )},
],
[
{"""score""": 0.333, """label""": ANY(UpperCAmelCase_ )},
{"""score""": 0.333, """label""": ANY(UpperCAmelCase_ )},
{"""score""": 0.333, """label""": ANY(UpperCAmelCase_ )},
],
[
{"""score""": 0.333, """label""": ANY(UpperCAmelCase_ )},
{"""score""": 0.333, """label""": ANY(UpperCAmelCase_ )},
{"""score""": 0.333, """label""": ANY(UpperCAmelCase_ )},
],
[
{"""score""": 0.333, """label""": ANY(UpperCAmelCase_ )},
{"""score""": 0.333, """label""": ANY(UpperCAmelCase_ )},
{"""score""": 0.333, """label""": ANY(UpperCAmelCase_ )},
],
] , )
@require_tf
def lowerCAmelCase ( self : Dict ) ->Any:
"""simple docstring"""
snake_case_ = pipeline(
model="""hf-internal-testing/tiny-random-clip-zero-shot-image-classification""" , framework="""tf""" )
snake_case_ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
snake_case_ = image_classifier(UpperCAmelCase_ , candidate_labels=["""a""", """b""", """c"""] )
self.assertEqual(
nested_simplify(UpperCAmelCase_ ) , [{"""score""": 0.333, """label""": """a"""}, {"""score""": 0.333, """label""": """b"""}, {"""score""": 0.333, """label""": """c"""}] , )
snake_case_ = image_classifier([image] * 5 , candidate_labels=["""A""", """B""", """C"""] , batch_size=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase_ ) , [
[
{"""score""": 0.333, """label""": ANY(UpperCAmelCase_ )},
{"""score""": 0.333, """label""": ANY(UpperCAmelCase_ )},
{"""score""": 0.333, """label""": ANY(UpperCAmelCase_ )},
],
[
{"""score""": 0.333, """label""": ANY(UpperCAmelCase_ )},
{"""score""": 0.333, """label""": ANY(UpperCAmelCase_ )},
{"""score""": 0.333, """label""": ANY(UpperCAmelCase_ )},
],
[
{"""score""": 0.333, """label""": ANY(UpperCAmelCase_ )},
{"""score""": 0.333, """label""": ANY(UpperCAmelCase_ )},
{"""score""": 0.333, """label""": ANY(UpperCAmelCase_ )},
],
[
{"""score""": 0.333, """label""": ANY(UpperCAmelCase_ )},
{"""score""": 0.333, """label""": ANY(UpperCAmelCase_ )},
{"""score""": 0.333, """label""": ANY(UpperCAmelCase_ )},
],
[
{"""score""": 0.333, """label""": ANY(UpperCAmelCase_ )},
{"""score""": 0.333, """label""": ANY(UpperCAmelCase_ )},
{"""score""": 0.333, """label""": ANY(UpperCAmelCase_ )},
],
] , )
@slow
@require_torch
def lowerCAmelCase ( self : Dict ) ->Union[str, Any]:
"""simple docstring"""
snake_case_ = pipeline(
task="""zero-shot-image-classification""" , model="""openai/clip-vit-base-patch32""" , )
# This is an image of 2 cats with remotes and no planes
snake_case_ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
snake_case_ = image_classifier(UpperCAmelCase_ , candidate_labels=["""cat""", """plane""", """remote"""] )
self.assertEqual(
nested_simplify(UpperCAmelCase_ ) , [
{"""score""": 0.511, """label""": """remote"""},
{"""score""": 0.485, """label""": """cat"""},
{"""score""": 0.004, """label""": """plane"""},
] , )
snake_case_ = image_classifier([image] * 5 , candidate_labels=["""cat""", """plane""", """remote"""] , batch_size=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase_ ) , [
[
{"""score""": 0.511, """label""": """remote"""},
{"""score""": 0.485, """label""": """cat"""},
{"""score""": 0.004, """label""": """plane"""},
],
]
* 5 , )
@slow
@require_tf
def lowerCAmelCase ( self : Union[str, Any] ) ->int:
"""simple docstring"""
snake_case_ = pipeline(
task="""zero-shot-image-classification""" , model="""openai/clip-vit-base-patch32""" , framework="""tf""" )
# This is an image of 2 cats with remotes and no planes
snake_case_ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
snake_case_ = image_classifier(UpperCAmelCase_ , candidate_labels=["""cat""", """plane""", """remote"""] )
self.assertEqual(
nested_simplify(UpperCAmelCase_ ) , [
{"""score""": 0.511, """label""": """remote"""},
{"""score""": 0.485, """label""": """cat"""},
{"""score""": 0.004, """label""": """plane"""},
] , )
snake_case_ = image_classifier([image] * 5 , candidate_labels=["""cat""", """plane""", """remote"""] , batch_size=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase_ ) , [
[
{"""score""": 0.511, """label""": """remote"""},
{"""score""": 0.485, """label""": """cat"""},
{"""score""": 0.004, """label""": """plane"""},
],
]
* 5 , )
| 347 |
"""simple docstring"""
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class __A :
'''simple docstring'''
def __init__( self : Dict , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Any=14 , UpperCAmelCase_ : Union[str, Any]=7 , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : Union[str, Any]=False , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : str=99 , UpperCAmelCase_ : Union[str, Any]=32 , UpperCAmelCase_ : List[Any]=4 , UpperCAmelCase_ : Optional[int]=4 , UpperCAmelCase_ : int=4 , UpperCAmelCase_ : str=37 , UpperCAmelCase_ : Any="gelu" , UpperCAmelCase_ : str=0.1 , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : int=512 , UpperCAmelCase_ : Tuple=0.02 , ) ->List[str]:
"""simple docstring"""
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = seq_length
snake_case_ = is_training
snake_case_ = use_input_mask
snake_case_ = use_token_type_ids
snake_case_ = use_labels
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = rotary_dim
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = initializer_range
snake_case_ = None
snake_case_ = vocab_size - 1
snake_case_ = vocab_size - 1
snake_case_ = vocab_size - 1
def lowerCAmelCase ( self : int ) ->Optional[int]:
"""simple docstring"""
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ = None
if self.use_input_mask:
snake_case_ = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=UpperCAmelCase_ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def lowerCAmelCase ( self : Dict ) ->Tuple:
"""simple docstring"""
snake_case_ = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ = config_and_inputs
snake_case_ = {"""input_ids""": input_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def lowerCAmelCase ( self : int , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict ) ->Tuple:
"""simple docstring"""
snake_case_ = 20
snake_case_ = model_class_name(UpperCAmelCase_ )
snake_case_ = model.init_cache(input_ids.shape[0] , UpperCAmelCase_ )
snake_case_ = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype="""i4""" )
snake_case_ = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
snake_case_ = model(
input_ids[:, :-1] , attention_mask=UpperCAmelCase_ , past_key_values=UpperCAmelCase_ , position_ids=UpperCAmelCase_ , )
snake_case_ = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""" )
snake_case_ = model(
input_ids[:, -1:] , attention_mask=UpperCAmelCase_ , past_key_values=outputs_cache.past_key_values , position_ids=UpperCAmelCase_ , )
snake_case_ = model(UpperCAmelCase_ )
snake_case_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" )
def lowerCAmelCase ( self : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[Any] ) ->Dict:
"""simple docstring"""
snake_case_ = 20
snake_case_ = model_class_name(UpperCAmelCase_ )
snake_case_ = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
snake_case_ = model.init_cache(input_ids.shape[0] , UpperCAmelCase_ )
snake_case_ = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
snake_case_ = model(
input_ids[:, :-1] , attention_mask=UpperCAmelCase_ , past_key_values=UpperCAmelCase_ , position_ids=UpperCAmelCase_ , )
snake_case_ = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""" )
snake_case_ = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=UpperCAmelCase_ , position_ids=UpperCAmelCase_ , )
snake_case_ = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )
snake_case_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" )
@require_flax
class __A (snake_case__ , snake_case__ , unittest.TestCase):
'''simple docstring'''
__lowercase: Any = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
__lowercase: List[str] = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def lowerCAmelCase ( self : Tuple ) ->List[str]:
"""simple docstring"""
snake_case_ = FlaxGPTJModelTester(self )
def lowerCAmelCase ( self : int ) ->List[Any]:
"""simple docstring"""
for model_class_name in self.all_model_classes:
snake_case_ , snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCAmelCase ( self : List[str] ) ->Any:
"""simple docstring"""
for model_class_name in self.all_model_classes:
snake_case_ , snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
@tooslow
def lowerCAmelCase ( self : List[str] ) ->Optional[Any]:
"""simple docstring"""
snake_case_ = GPTaTokenizer.from_pretrained("""gpt2""" , pad_token="""<|endoftext|>""" , padding_side="""left""" )
snake_case_ = tokenizer(["""Hello this is a long string""", """Hey"""] , return_tensors="""np""" , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ )
snake_case_ = FlaxGPTJForCausalLM.from_pretrained("""EleutherAI/gpt-j-6B""" )
snake_case_ = False
snake_case_ = model.config.eos_token_id
snake_case_ = jax.jit(model.generate )
snake_case_ = jit_generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , pad_token_id=tokenizer.pad_token_id ).sequences
snake_case_ = tokenizer.batch_decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ )
snake_case_ = [
"""Hello this is a long string of text.\n\nI'm trying to get the text of the""",
"""Hey, I'm a little late to the party. I'm going to""",
]
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
@is_pt_flax_cross_test
def lowerCAmelCase ( self : int ) ->str:
"""simple docstring"""
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
snake_case_ = self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
snake_case_ = model_class.__name__[4:] # Skip the "Flax" at the beginning
snake_case_ = getattr(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ , snake_case_ = pt_inputs["""input_ids"""].shape
snake_case_ = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(UpperCAmelCase_ ):
snake_case_ = 0
snake_case_ = 1
snake_case_ = 0
snake_case_ = 1
snake_case_ = pt_model_class(UpperCAmelCase_ ).eval()
snake_case_ = model_class(UpperCAmelCase_ , dtype=jnp.floataa )
snake_case_ = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , UpperCAmelCase_ )
snake_case_ = fx_state
with torch.no_grad():
snake_case_ = pt_model(**UpperCAmelCase_ ).to_tuple()
snake_case_ = fx_model(**UpperCAmelCase_ ).to_tuple()
self.assertEqual(len(UpperCAmelCase_ ) , len(UpperCAmelCase_ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(UpperCAmelCase_ )
snake_case_ = model_class.from_pretrained(UpperCAmelCase_ , from_pt=UpperCAmelCase_ )
snake_case_ = fx_model_loaded(**UpperCAmelCase_ ).to_tuple()
self.assertEqual(
len(UpperCAmelCase_ ) , len(UpperCAmelCase_ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output_loaded, pt_output in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@is_pt_flax_cross_test
def lowerCAmelCase ( self : List[Any] ) ->str:
"""simple docstring"""
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
snake_case_ = self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
snake_case_ = model_class.__name__[4:] # Skip the "Flax" at the beginning
snake_case_ = getattr(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = pt_model_class(UpperCAmelCase_ ).eval()
snake_case_ = model_class(UpperCAmelCase_ , dtype=jnp.floataa )
snake_case_ = load_flax_weights_in_pytorch_model(UpperCAmelCase_ , fx_model.params )
snake_case_ , snake_case_ = pt_inputs["""input_ids"""].shape
snake_case_ = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(UpperCAmelCase_ ):
snake_case_ = 0
snake_case_ = 1
snake_case_ = 0
snake_case_ = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
snake_case_ = pt_model(**UpperCAmelCase_ ).to_tuple()
snake_case_ = fx_model(**UpperCAmelCase_ ).to_tuple()
self.assertEqual(len(UpperCAmelCase_ ) , len(UpperCAmelCase_ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(UpperCAmelCase_ )
snake_case_ = pt_model_class.from_pretrained(UpperCAmelCase_ , from_flax=UpperCAmelCase_ )
with torch.no_grad():
snake_case_ = pt_model_loaded(**UpperCAmelCase_ ).to_tuple()
self.assertEqual(
len(UpperCAmelCase_ ) , len(UpperCAmelCase_ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@tooslow
def lowerCAmelCase ( self : List[Any] ) ->str:
"""simple docstring"""
for model_class_name in self.all_model_classes:
snake_case_ = model_class_name.from_pretrained("""EleutherAI/gpt-j-6B""" )
snake_case_ = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCAmelCase_ )
| 347 | 1 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
PNDMScheduler,
StableDiffusionLDMaDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import nightly, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
enable_full_determinism()
class __A (unittest.TestCase):
'''simple docstring'''
__lowercase: Tuple = StableDiffusionLDMaDPipeline
__lowercase: Any = TEXT_TO_IMAGE_PARAMS
__lowercase: Tuple = TEXT_TO_IMAGE_BATCH_PARAMS
__lowercase: Dict = TEXT_TO_IMAGE_IMAGE_PARAMS
def lowerCAmelCase ( self : List[Any] ) ->List[str]:
"""simple docstring"""
torch.manual_seed(0 )
snake_case_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
snake_case_ = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=UpperCAmelCase_ , set_alpha_to_one=UpperCAmelCase_ , )
torch.manual_seed(0 )
snake_case_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=6 , out_channels=6 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
snake_case_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
snake_case_ = CLIPTextModel(UpperCAmelCase_ )
snake_case_ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
snake_case_ = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def lowerCAmelCase ( self : Optional[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : Any=0 ) ->int:
"""simple docstring"""
if str(UpperCAmelCase_ ).startswith("""mps""" ):
snake_case_ = torch.manual_seed(UpperCAmelCase_ )
else:
snake_case_ = torch.Generator(device=UpperCAmelCase_ ).manual_seed(UpperCAmelCase_ )
snake_case_ = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def lowerCAmelCase ( self : str ) ->Tuple:
"""simple docstring"""
snake_case_ = """cpu""" # ensure determinism for the device-dependent torch.Generator
snake_case_ = self.get_dummy_components()
snake_case_ = StableDiffusionLDMaDPipeline(**UpperCAmelCase_ )
snake_case_ = ldmad_pipe.to(UpperCAmelCase_ )
ldmad_pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
snake_case_ = self.get_dummy_inputs(UpperCAmelCase_ )
snake_case_ = ldmad_pipe(**UpperCAmelCase_ )
snake_case_ , snake_case_ = output.rgb, output.depth
snake_case_ = rgb[0, -3:, -3:, -1]
snake_case_ = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
snake_case_ = np.array(
[0.37_338_176, 0.70_247, 0.74_203_193, 0.51_643_604, 0.58_256_793, 0.60_932_136, 0.4_181_095, 0.48_355_877, 0.46_535_262] )
snake_case_ = np.array([103.46_727, 85.812_004, 87.849_236] )
assert np.abs(image_slice_rgb.flatten() - expected_slice_rgb ).max() < 1E-2
assert np.abs(image_slice_depth.flatten() - expected_slice_depth ).max() < 1E-2
def lowerCAmelCase ( self : Optional[Any] ) ->Optional[Any]:
"""simple docstring"""
snake_case_ = self.get_dummy_components()
snake_case_ = StableDiffusionLDMaDPipeline(**UpperCAmelCase_ )
snake_case_ = ldmad_pipe.to(UpperCAmelCase_ )
ldmad_pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
snake_case_ = self.get_dummy_inputs(UpperCAmelCase_ )
snake_case_ = 3 * [inputs["""prompt"""]]
# forward
snake_case_ = ldmad_pipe(**UpperCAmelCase_ )
snake_case_ , snake_case_ = output.rgb, output.depth
snake_case_ = rgb_slice_a[0, -3:, -3:, -1]
snake_case_ = depth_slice_a[0, -3:, -1]
snake_case_ = self.get_dummy_inputs(UpperCAmelCase_ )
snake_case_ = 3 * [inputs.pop("""prompt""" )]
snake_case_ = ldmad_pipe.tokenizer(
UpperCAmelCase_ , padding="""max_length""" , max_length=ldmad_pipe.tokenizer.model_max_length , truncation=UpperCAmelCase_ , return_tensors="""pt""" , )
snake_case_ = text_inputs["""input_ids"""].to(UpperCAmelCase_ )
snake_case_ = ldmad_pipe.text_encoder(UpperCAmelCase_ )[0]
snake_case_ = prompt_embeds
# forward
snake_case_ = ldmad_pipe(**UpperCAmelCase_ )
snake_case_ , snake_case_ = output.rgb, output.depth
snake_case_ = rgb_slice_a[0, -3:, -3:, -1]
snake_case_ = depth_slice_a[0, -3:, -1]
assert np.abs(rgb_slice_a.flatten() - rgb_slice_a.flatten() ).max() < 1E-4
assert np.abs(depth_slice_a.flatten() - depth_slice_a.flatten() ).max() < 1E-4
def lowerCAmelCase ( self : Dict ) ->str:
"""simple docstring"""
snake_case_ = """cpu""" # ensure determinism for the device-dependent torch.Generator
snake_case_ = self.get_dummy_components()
snake_case_ = PNDMScheduler(skip_prk_steps=UpperCAmelCase_ )
snake_case_ = StableDiffusionLDMaDPipeline(**UpperCAmelCase_ )
snake_case_ = ldmad_pipe.to(UpperCAmelCase_ )
ldmad_pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
snake_case_ = self.get_dummy_inputs(UpperCAmelCase_ )
snake_case_ = """french fries"""
snake_case_ = ldmad_pipe(**UpperCAmelCase_ , negative_prompt=UpperCAmelCase_ )
snake_case_ , snake_case_ = output.rgb, output.depth
snake_case_ = rgb[0, -3:, -3:, -1]
snake_case_ = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
snake_case_ = np.array(
[0.37_044, 0.71_811_503, 0.7_223_251, 0.48_603_675, 0.5_638_391, 0.6_364_948, 0.42_833_704, 0.4_901_315, 0.47_926_217] )
snake_case_ = np.array([107.84_738, 84.62_802, 89.962_135] )
assert np.abs(rgb_slice.flatten() - expected_slice_rgb ).max() < 1E-2
assert np.abs(depth_slice.flatten() - expected_slice_depth ).max() < 1E-2
@slow
@require_torch_gpu
class __A (unittest.TestCase):
'''simple docstring'''
def lowerCAmelCase ( self : Optional[int] ) ->Any:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase ( self : Union[str, Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict="cpu" , UpperCAmelCase_ : Optional[Any]=torch.floataa , UpperCAmelCase_ : Optional[Any]=0 ) ->Dict:
"""simple docstring"""
snake_case_ = torch.Generator(device=UpperCAmelCase_ ).manual_seed(UpperCAmelCase_ )
snake_case_ = np.random.RandomState(UpperCAmelCase_ ).standard_normal((1, 4, 64, 64) )
snake_case_ = torch.from_numpy(UpperCAmelCase_ ).to(device=UpperCAmelCase_ , dtype=UpperCAmelCase_ )
snake_case_ = {
"""prompt""": """a photograph of an astronaut riding a horse""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def lowerCAmelCase ( self : Optional[Any] ) ->Dict:
"""simple docstring"""
snake_case_ = StableDiffusionLDMaDPipeline.from_pretrained("""Intel/ldm3d""" )
snake_case_ = ldmad_pipe.to(UpperCAmelCase_ )
ldmad_pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
snake_case_ = self.get_inputs(UpperCAmelCase_ )
snake_case_ = ldmad_pipe(**UpperCAmelCase_ )
snake_case_ , snake_case_ = output.rgb, output.depth
snake_case_ = rgb[0, -3:, -3:, -1].flatten()
snake_case_ = rgb[0, -3:, -1].flatten()
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512)
snake_case_ = np.array(
[0.53_805_465, 0.56_707_305, 0.5_486_515, 0.57_012_236, 0.5_814_511, 0.56_253_487, 0.54_843_014, 0.55_092_263, 0.6_459_706] )
snake_case_ = np.array(
[0.9_263_781, 0.6_678_672, 0.5_486_515, 0.92_202_145, 0.67_831_135, 0.56_253_487, 0.9_241_694, 0.7_551_478, 0.6_459_706] )
assert np.abs(rgb_slice - expected_slice_rgb ).max() < 3E-3
assert np.abs(depth_slice - expected_slice_depth ).max() < 3E-3
@nightly
@require_torch_gpu
class __A (unittest.TestCase):
'''simple docstring'''
def lowerCAmelCase ( self : int ) ->Dict:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase ( self : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[str]="cpu" , UpperCAmelCase_ : Tuple=torch.floataa , UpperCAmelCase_ : List[str]=0 ) ->Union[str, Any]:
"""simple docstring"""
snake_case_ = torch.Generator(device=UpperCAmelCase_ ).manual_seed(UpperCAmelCase_ )
snake_case_ = np.random.RandomState(UpperCAmelCase_ ).standard_normal((1, 4, 64, 64) )
snake_case_ = torch.from_numpy(UpperCAmelCase_ ).to(device=UpperCAmelCase_ , dtype=UpperCAmelCase_ )
snake_case_ = {
"""prompt""": """a photograph of an astronaut riding a horse""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 50,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def lowerCAmelCase ( self : Optional[Any] ) ->Tuple:
"""simple docstring"""
snake_case_ = StableDiffusionLDMaDPipeline.from_pretrained("""Intel/ldm3d""" ).to(UpperCAmelCase_ )
ldmad_pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
snake_case_ = self.get_inputs(UpperCAmelCase_ )
snake_case_ = ldmad_pipe(**UpperCAmelCase_ )
snake_case_ , snake_case_ = output.rgb, output.depth
snake_case_ = 0.495_586
snake_case_ = 0.33_795_515
snake_case_ = 112.48_518
snake_case_ = 98.489_746
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1E-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1E-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1E-3
assert np.abs(expected_depth_std - depth.std() ) < 1E-3
def lowerCAmelCase ( self : Optional[Any] ) ->Any:
"""simple docstring"""
snake_case_ = StableDiffusionLDMaDPipeline.from_pretrained("""Intel/ldm3d-4c""" ).to(UpperCAmelCase_ )
ldmad_pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
snake_case_ = self.get_inputs(UpperCAmelCase_ )
snake_case_ = ldmad_pipe(**UpperCAmelCase_ )
snake_case_ , snake_case_ = output.rgb, output.depth
snake_case_ = 0.4_194_127
snake_case_ = 0.35_375_586
snake_case_ = 0.5_638_502
snake_case_ = 0.34_686_103
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512, 1)
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1E-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1E-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1E-3
assert np.abs(expected_depth_std - depth.std() ) < 1E-3
| 347 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
__SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
class __A (snake_case__):
'''simple docstring'''
__lowercase: int = """upernet"""
def __init__( self : str , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : str=512 , UpperCAmelCase_ : int=0.02 , UpperCAmelCase_ : Optional[Any]=[1, 2, 3, 6] , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : Tuple=0.4 , UpperCAmelCase_ : Tuple=384 , UpperCAmelCase_ : Union[str, Any]=256 , UpperCAmelCase_ : str=1 , UpperCAmelCase_ : Tuple=False , UpperCAmelCase_ : Tuple=255 , **UpperCAmelCase_ : Dict , ) ->Union[str, Any]:
"""simple docstring"""
super().__init__(**UpperCAmelCase_ )
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
snake_case_ = CONFIG_MAPPING["""resnet"""](out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] )
elif isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
snake_case_ = backbone_config.get("""model_type""" )
snake_case_ = CONFIG_MAPPING[backbone_model_type]
snake_case_ = config_class.from_dict(UpperCAmelCase_ )
snake_case_ = backbone_config
snake_case_ = hidden_size
snake_case_ = initializer_range
snake_case_ = pool_scales
snake_case_ = use_auxiliary_head
snake_case_ = auxiliary_loss_weight
snake_case_ = auxiliary_in_channels
snake_case_ = auxiliary_channels
snake_case_ = auxiliary_num_convs
snake_case_ = auxiliary_concat_input
snake_case_ = loss_ignore_index
def lowerCAmelCase ( self : str ) ->Optional[Any]:
"""simple docstring"""
snake_case_ = copy.deepcopy(self.__dict__ )
snake_case_ = self.backbone_config.to_dict()
snake_case_ = self.__class__.model_type
return output
| 347 | 1 |
"""simple docstring"""
def _a ( _SCREAMING_SNAKE_CASE ) -> float:
if not nums: # Makes sure that the list is not empty
raise ValueError("""List is empty""" )
snake_case_ = sum(_SCREAMING_SNAKE_CASE ) / len(_SCREAMING_SNAKE_CASE ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 347 |
"""simple docstring"""
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class __A (unittest.TestCase):
'''simple docstring'''
def lowerCAmelCase ( self : Optional[int] ) ->Dict:
"""simple docstring"""
snake_case_ = """ylacombe/bark-small"""
snake_case_ = tempfile.mkdtemp()
snake_case_ = """en_speaker_1"""
snake_case_ = """This is a test string"""
snake_case_ = """speaker_embeddings_path.json"""
snake_case_ = """speaker_embeddings"""
def lowerCAmelCase ( self : List[str] , **UpperCAmelCase_ : str ) ->Optional[int]:
"""simple docstring"""
return AutoTokenizer.from_pretrained(self.checkpoint , **UpperCAmelCase_ )
def lowerCAmelCase ( self : Any ) ->Dict:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase ( self : List[Any] ) ->Dict:
"""simple docstring"""
snake_case_ = self.get_tokenizer()
snake_case_ = BarkProcessor(tokenizer=UpperCAmelCase_ )
processor.save_pretrained(self.tmpdirname )
snake_case_ = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def lowerCAmelCase ( self : Dict ) ->int:
"""simple docstring"""
snake_case_ = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
snake_case_ = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
snake_case_ = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="""(BOS)""" , eos_token="""(EOS)""" , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def lowerCAmelCase ( self : Optional[Any] ) ->Any:
"""simple docstring"""
snake_case_ = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
snake_case_ = 35
snake_case_ = 2
snake_case_ = 8
snake_case_ = {
"""semantic_prompt""": np.ones(UpperCAmelCase_ ),
"""coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ),
"""fine_prompt""": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
snake_case_ = processor(text=self.input_string , voice_preset=UpperCAmelCase_ )
snake_case_ = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(UpperCAmelCase_ , np.array([] ) ).tolist() )
# test loading voice preset from npz file
snake_case_ = os.path.join(self.tmpdirname , """file.npz""" )
np.savez(UpperCAmelCase_ , **UpperCAmelCase_ )
snake_case_ = processor(text=self.input_string , voice_preset=UpperCAmelCase_ )
snake_case_ = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(UpperCAmelCase_ , np.array([] ) ).tolist() )
# test loading voice preset from the hub
snake_case_ = processor(text=self.input_string , voice_preset=self.voice_preset )
def lowerCAmelCase ( self : Tuple ) ->Dict:
"""simple docstring"""
snake_case_ = self.get_tokenizer()
snake_case_ = BarkProcessor(tokenizer=UpperCAmelCase_ )
snake_case_ = processor(text=self.input_string )
snake_case_ = tokenizer(
self.input_string , padding="""max_length""" , max_length=256 , add_special_tokens=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , return_token_type_ids=UpperCAmelCase_ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 347 | 1 |
"""simple docstring"""
__SCREAMING_SNAKE_CASE : dict[str, float] = {
"km/h": 1.0,
"m/s": 3.6,
"mph": 1.60_9344,
"knot": 1.852,
}
__SCREAMING_SNAKE_CASE : dict[str, float] = {
"km/h": 1.0,
"m/s": 0.2_7777_7778,
"mph": 0.6_2137_1192,
"knot": 0.5_3995_6803,
}
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
if unit_to not in speed_chart or unit_from not in speed_chart_inverse:
snake_case_ = (
f"""Incorrect 'from_type' or 'to_type' value: {unit_from!r}, {unit_to!r}\n"""
f"""Valid values are: {", ".join(_SCREAMING_SNAKE_CASE )}"""
)
raise ValueError(_SCREAMING_SNAKE_CASE )
return round(speed * speed_chart[unit_from] * speed_chart_inverse[unit_to] , 3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 347 |
"""simple docstring"""
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
__SCREAMING_SNAKE_CASE : int = sys.version_info >= (3, 10)
def _a ( _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> Tuple:
return field(default_factory=lambda: default , metadata=_SCREAMING_SNAKE_CASE )
@dataclass
class __A :
'''simple docstring'''
__lowercase: int
__lowercase: float
__lowercase: str
__lowercase: bool
@dataclass
class __A :
'''simple docstring'''
__lowercase: int = 42
__lowercase: str = field(default="""toto""" , metadata={"""help""": """help message"""})
@dataclass
class __A :
'''simple docstring'''
__lowercase: bool = False
__lowercase: bool = True
__lowercase: Optional[bool] = None
class __A (snake_case__):
'''simple docstring'''
__lowercase: str = """titi"""
__lowercase: Any = """toto"""
class __A (snake_case__):
'''simple docstring'''
__lowercase: int = """titi"""
__lowercase: Optional[Any] = """toto"""
__lowercase: List[Any] = 42
@dataclass
class __A :
'''simple docstring'''
__lowercase: BasicEnum = "toto"
def lowerCAmelCase ( self : int ) ->List[Any]:
"""simple docstring"""
snake_case_ = BasicEnum(self.foo )
@dataclass
class __A :
'''simple docstring'''
__lowercase: MixedTypeEnum = "toto"
def lowerCAmelCase ( self : Optional[int] ) ->Optional[Any]:
"""simple docstring"""
snake_case_ = MixedTypeEnum(self.foo )
@dataclass
class __A :
'''simple docstring'''
__lowercase: Optional[int] = None
__lowercase: Optional[float] = field(default=snake_case__ , metadata={"""help""": """help message"""})
__lowercase: Optional[str] = None
__lowercase: Optional[List[str]] = list_field(default=[])
__lowercase: Optional[List[int]] = list_field(default=[])
@dataclass
class __A :
'''simple docstring'''
__lowercase: List[int] = list_field(default=[])
__lowercase: List[int] = list_field(default=[1, 2, 3])
__lowercase: List[str] = list_field(default=["""Hallo""", """Bonjour""", """Hello"""])
__lowercase: List[float] = list_field(default=[0.1, 0.2, 0.3])
@dataclass
class __A :
'''simple docstring'''
__lowercase: List[int] = field()
__lowercase: str = field()
__lowercase: BasicEnum = field()
def lowerCAmelCase ( self : Any ) ->str:
"""simple docstring"""
snake_case_ = BasicEnum(self.required_enum )
@dataclass
class __A :
'''simple docstring'''
__lowercase: int
__lowercase: "BasicEnum" = field()
__lowercase: "Optional[bool]" = None
__lowercase: "str" = field(default="""toto""" , metadata={"""help""": """help message"""})
__lowercase: "List[str]" = list_field(default=["""Hallo""", """Bonjour""", """Hello"""])
if is_python_no_less_than_3_10:
@dataclass
class __A :
'''simple docstring'''
__lowercase: bool = False
__lowercase: bool = True
__lowercase: bool | None = None
@dataclass
class __A :
'''simple docstring'''
__lowercase: int | None = None
__lowercase: float | None = field(default=snake_case__ , metadata={"""help""": """help message"""})
__lowercase: str | None = None
__lowercase: list[str] | None = list_field(default=[])
__lowercase: list[int] | None = list_field(default=[])
class __A (unittest.TestCase):
'''simple docstring'''
def lowerCAmelCase ( self : Optional[int] , UpperCAmelCase_ : argparse.ArgumentParser , UpperCAmelCase_ : argparse.ArgumentParser ) ->Optional[int]:
"""simple docstring"""
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
snake_case_ = {k: v for k, v in vars(UpperCAmelCase_ ).items() if k != """container"""}
snake_case_ = {k: v for k, v in vars(UpperCAmelCase_ ).items() if k != """container"""}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get("""choices""" , UpperCAmelCase_ ) and yy.get("""choices""" , UpperCAmelCase_ ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx["""type"""](UpperCAmelCase_ ) , yy["""type"""](UpperCAmelCase_ ) )
del xx["type"], yy["type"]
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCAmelCase ( self : int ) ->Any:
"""simple docstring"""
snake_case_ = HfArgumentParser(UpperCAmelCase_ )
snake_case_ = argparse.ArgumentParser()
expected.add_argument("""--foo""" , type=UpperCAmelCase_ , required=UpperCAmelCase_ )
expected.add_argument("""--bar""" , type=UpperCAmelCase_ , required=UpperCAmelCase_ )
expected.add_argument("""--baz""" , type=UpperCAmelCase_ , required=UpperCAmelCase_ )
expected.add_argument("""--flag""" , type=UpperCAmelCase_ , default=UpperCAmelCase_ , const=UpperCAmelCase_ , nargs="""?""" )
self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = ["""--foo""", """1""", """--baz""", """quux""", """--bar""", """0.5"""]
((snake_case_) , ) = parser.parse_args_into_dataclasses(UpperCAmelCase_ , look_for_args_file=UpperCAmelCase_ )
self.assertFalse(example.flag )
def lowerCAmelCase ( self : Optional[Any] ) ->List[Any]:
"""simple docstring"""
snake_case_ = HfArgumentParser(UpperCAmelCase_ )
snake_case_ = argparse.ArgumentParser()
expected.add_argument("""--foo""" , default=42 , type=UpperCAmelCase_ )
expected.add_argument("""--baz""" , default="""toto""" , type=UpperCAmelCase_ , help="""help message""" )
self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCAmelCase ( self : List[Any] ) ->Union[str, Any]:
"""simple docstring"""
snake_case_ = argparse.ArgumentParser()
expected.add_argument("""--foo""" , type=UpperCAmelCase_ , default=UpperCAmelCase_ , const=UpperCAmelCase_ , nargs="""?""" )
expected.add_argument("""--baz""" , type=UpperCAmelCase_ , default=UpperCAmelCase_ , const=UpperCAmelCase_ , nargs="""?""" )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument("""--no_baz""" , action="""store_false""" , default=UpperCAmelCase_ , dest="""baz""" )
expected.add_argument("""--opt""" , type=UpperCAmelCase_ , default=UpperCAmelCase_ )
snake_case_ = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(UpperCAmelCase_ )
for dataclass_type in dataclass_types:
snake_case_ = HfArgumentParser(UpperCAmelCase_ )
self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = parser.parse_args([] )
self.assertEqual(UpperCAmelCase_ , Namespace(foo=UpperCAmelCase_ , baz=UpperCAmelCase_ , opt=UpperCAmelCase_ ) )
snake_case_ = parser.parse_args(["""--foo""", """--no_baz"""] )
self.assertEqual(UpperCAmelCase_ , Namespace(foo=UpperCAmelCase_ , baz=UpperCAmelCase_ , opt=UpperCAmelCase_ ) )
snake_case_ = parser.parse_args(["""--foo""", """--baz"""] )
self.assertEqual(UpperCAmelCase_ , Namespace(foo=UpperCAmelCase_ , baz=UpperCAmelCase_ , opt=UpperCAmelCase_ ) )
snake_case_ = parser.parse_args(["""--foo""", """True""", """--baz""", """True""", """--opt""", """True"""] )
self.assertEqual(UpperCAmelCase_ , Namespace(foo=UpperCAmelCase_ , baz=UpperCAmelCase_ , opt=UpperCAmelCase_ ) )
snake_case_ = parser.parse_args(["""--foo""", """False""", """--baz""", """False""", """--opt""", """False"""] )
self.assertEqual(UpperCAmelCase_ , Namespace(foo=UpperCAmelCase_ , baz=UpperCAmelCase_ , opt=UpperCAmelCase_ ) )
def lowerCAmelCase ( self : int ) ->List[str]:
"""simple docstring"""
snake_case_ = HfArgumentParser(UpperCAmelCase_ )
snake_case_ = argparse.ArgumentParser()
expected.add_argument(
"""--foo""" , default="""toto""" , choices=["""titi""", """toto""", 42] , type=make_choice_type_function(["""titi""", """toto""", 42] ) , )
self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = parser.parse_args([] )
self.assertEqual(args.foo , """toto""" )
snake_case_ = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
snake_case_ = parser.parse_args(["""--foo""", """titi"""] )
self.assertEqual(args.foo , """titi""" )
snake_case_ = parser.parse_args_into_dataclasses(["""--foo""", """titi"""] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
snake_case_ = parser.parse_args(["""--foo""", """42"""] )
self.assertEqual(args.foo , 42 )
snake_case_ = parser.parse_args_into_dataclasses(["""--foo""", """42"""] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def lowerCAmelCase ( self : Dict ) ->str:
"""simple docstring"""
@dataclass
class __A :
'''simple docstring'''
__lowercase: Literal["titi", "toto", 42] = "toto"
snake_case_ = HfArgumentParser(UpperCAmelCase_ )
snake_case_ = argparse.ArgumentParser()
expected.add_argument(
"""--foo""" , default="""toto""" , choices=("""titi""", """toto""", 42) , type=make_choice_type_function(["""titi""", """toto""", 42] ) , )
self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = parser.parse_args([] )
self.assertEqual(args.foo , """toto""" )
snake_case_ = parser.parse_args(["""--foo""", """titi"""] )
self.assertEqual(args.foo , """titi""" )
snake_case_ = parser.parse_args(["""--foo""", """42"""] )
self.assertEqual(args.foo , 42 )
def lowerCAmelCase ( self : Optional[int] ) ->Dict:
"""simple docstring"""
snake_case_ = HfArgumentParser(UpperCAmelCase_ )
snake_case_ = argparse.ArgumentParser()
expected.add_argument("""--foo_int""" , nargs="""+""" , default=[] , type=UpperCAmelCase_ )
expected.add_argument("""--bar_int""" , nargs="""+""" , default=[1, 2, 3] , type=UpperCAmelCase_ )
expected.add_argument("""--foo_str""" , nargs="""+""" , default=["""Hallo""", """Bonjour""", """Hello"""] , type=UpperCAmelCase_ )
expected.add_argument("""--foo_float""" , nargs="""+""" , default=[0.1, 0.2, 0.3] , type=UpperCAmelCase_ )
self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = parser.parse_args([] )
self.assertEqual(
UpperCAmelCase_ , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=["""Hallo""", """Bonjour""", """Hello"""] , foo_float=[0.1, 0.2, 0.3] ) , )
snake_case_ = parser.parse_args("""--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7""".split() )
self.assertEqual(UpperCAmelCase_ , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=["""a""", """b""", """c"""] , foo_float=[0.1, 0.7] ) )
def lowerCAmelCase ( self : Optional[int] ) ->List[Any]:
"""simple docstring"""
snake_case_ = argparse.ArgumentParser()
expected.add_argument("""--foo""" , default=UpperCAmelCase_ , type=UpperCAmelCase_ )
expected.add_argument("""--bar""" , default=UpperCAmelCase_ , type=UpperCAmelCase_ , help="""help message""" )
expected.add_argument("""--baz""" , default=UpperCAmelCase_ , type=UpperCAmelCase_ )
expected.add_argument("""--ces""" , nargs="""+""" , default=[] , type=UpperCAmelCase_ )
expected.add_argument("""--des""" , nargs="""+""" , default=[] , type=UpperCAmelCase_ )
snake_case_ = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(UpperCAmelCase_ )
for dataclass_type in dataclass_types:
snake_case_ = HfArgumentParser(UpperCAmelCase_ )
self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = parser.parse_args([] )
self.assertEqual(UpperCAmelCase_ , Namespace(foo=UpperCAmelCase_ , bar=UpperCAmelCase_ , baz=UpperCAmelCase_ , ces=[] , des=[] ) )
snake_case_ = parser.parse_args("""--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3""".split() )
self.assertEqual(UpperCAmelCase_ , Namespace(foo=12 , bar=3.14 , baz="""42""" , ces=["""a""", """b""", """c"""] , des=[1, 2, 3] ) )
def lowerCAmelCase ( self : Optional[Any] ) ->Optional[Any]:
"""simple docstring"""
snake_case_ = HfArgumentParser(UpperCAmelCase_ )
snake_case_ = argparse.ArgumentParser()
expected.add_argument("""--required_list""" , nargs="""+""" , type=UpperCAmelCase_ , required=UpperCAmelCase_ )
expected.add_argument("""--required_str""" , type=UpperCAmelCase_ , required=UpperCAmelCase_ )
expected.add_argument(
"""--required_enum""" , type=make_choice_type_function(["""titi""", """toto"""] ) , choices=["""titi""", """toto"""] , required=UpperCAmelCase_ , )
self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCAmelCase ( self : Optional[int] ) ->List[Any]:
"""simple docstring"""
snake_case_ = HfArgumentParser(UpperCAmelCase_ )
snake_case_ = argparse.ArgumentParser()
expected.add_argument("""--foo""" , type=UpperCAmelCase_ , required=UpperCAmelCase_ )
expected.add_argument(
"""--required_enum""" , type=make_choice_type_function(["""titi""", """toto"""] ) , choices=["""titi""", """toto"""] , required=UpperCAmelCase_ , )
expected.add_argument("""--opt""" , type=UpperCAmelCase_ , default=UpperCAmelCase_ )
expected.add_argument("""--baz""" , default="""toto""" , type=UpperCAmelCase_ , help="""help message""" )
expected.add_argument("""--foo_str""" , nargs="""+""" , default=["""Hallo""", """Bonjour""", """Hello"""] , type=UpperCAmelCase_ )
self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCAmelCase ( self : Dict ) ->Tuple:
"""simple docstring"""
snake_case_ = HfArgumentParser(UpperCAmelCase_ )
snake_case_ = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
}
snake_case_ = parser.parse_dict(UpperCAmelCase_ )[0]
snake_case_ = BasicExample(**UpperCAmelCase_ )
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCAmelCase ( self : Optional[Any] ) ->List[Any]:
"""simple docstring"""
snake_case_ = HfArgumentParser(UpperCAmelCase_ )
snake_case_ = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
"""extra""": 42,
}
self.assertRaises(UpperCAmelCase_ , parser.parse_dict , UpperCAmelCase_ , allow_extra_keys=UpperCAmelCase_ )
def lowerCAmelCase ( self : Any ) ->Dict:
"""simple docstring"""
snake_case_ = HfArgumentParser(UpperCAmelCase_ )
snake_case_ = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case_ = os.path.join(UpperCAmelCase_ , """temp_json""" )
os.mkdir(UpperCAmelCase_ )
with open(temp_local_path + """.json""" , """w+""" ) as f:
json.dump(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = parser.parse_yaml_file(Path(temp_local_path + """.json""" ) )[0]
snake_case_ = BasicExample(**UpperCAmelCase_ )
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCAmelCase ( self : Optional[int] ) ->List[str]:
"""simple docstring"""
snake_case_ = HfArgumentParser(UpperCAmelCase_ )
snake_case_ = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case_ = os.path.join(UpperCAmelCase_ , """temp_yaml""" )
os.mkdir(UpperCAmelCase_ )
with open(temp_local_path + """.yaml""" , """w+""" ) as f:
yaml.dump(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = parser.parse_yaml_file(Path(temp_local_path + """.yaml""" ) )[0]
snake_case_ = BasicExample(**UpperCAmelCase_ )
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCAmelCase ( self : Dict ) ->Any:
"""simple docstring"""
snake_case_ = HfArgumentParser(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
| 347 | 1 |
"""simple docstring"""
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(snake_case__) , """Tatoeba directory does not exist.""")
class __A (unittest.TestCase):
'''simple docstring'''
@cached_property
def lowerCAmelCase ( self : int ) ->Tuple:
"""simple docstring"""
snake_case_ = tempfile.mkdtemp()
return TatoebaConverter(save_dir=UpperCAmelCase_ )
@slow
def lowerCAmelCase ( self : Dict ) ->Optional[Any]:
"""simple docstring"""
self.resolver.convert_models(["""heb-eng"""] )
@slow
def lowerCAmelCase ( self : Dict ) ->List[str]:
"""simple docstring"""
snake_case_ , snake_case_ = self.resolver.write_model_card("""opus-mt-he-en""" , dry_run=UpperCAmelCase_ )
assert mmeta["long_pair"] == "heb-eng"
| 347 |
"""simple docstring"""
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
__SCREAMING_SNAKE_CASE : Any = logging.getLogger(__name__)
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False , ) -> Optional[Any]:
snake_case_ = bnb_quantization_config.load_in_abit
snake_case_ = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
"""You have a version of `bitsandbytes` that is not compatible with 8bit quantization,"""
""" make sure you have the latest version of `bitsandbytes` installed.""" )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
"""You have a version of `bitsandbytes` that is not compatible with 4bit quantization,"""
"""make sure you have the latest version of `bitsandbytes` installed.""" )
snake_case_ = []
# custom device map
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and len(device_map.keys() ) > 1:
snake_case_ = [key for key, value in device_map.items() if value in ["""disk""", """cpu"""]]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
snake_case_ = get_keys_to_not_convert(_SCREAMING_SNAKE_CASE )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(_SCREAMING_SNAKE_CASE )
snake_case_ = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
snake_case_ = []
snake_case_ = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(_SCREAMING_SNAKE_CASE )
# compatibility with peft
snake_case_ = load_in_abit
snake_case_ = load_in_abit
snake_case_ = get_parameter_device(_SCREAMING_SNAKE_CASE )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
"""It is not recommended to quantize a loaded model. """
"""The model should be instantiated under the `init_empty_weights` context manager.""" )
snake_case_ = replace_with_bnb_layers(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , modules_to_not_convert=_SCREAMING_SNAKE_CASE )
# convert param to the right dtype
snake_case_ = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
snake_case_ = name.replace(""".weight""" , """""" ).replace(""".bias""" , """""" )
snake_case_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(_SCREAMING_SNAKE_CASE ):
param.to(_SCREAMING_SNAKE_CASE )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" )
logger.info(
f"""The model device type is {model_device.type}. However, cuda is needed for quantization."""
"""We move the model to cuda.""" )
return model
elif weights_location is None:
raise RuntimeError(
f"""`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} """ )
else:
with init_empty_weights():
snake_case_ = replace_with_bnb_layers(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , modules_to_not_convert=_SCREAMING_SNAKE_CASE )
snake_case_ = get_quantized_model_device_map(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , max_memory=_SCREAMING_SNAKE_CASE , no_split_module_classes=_SCREAMING_SNAKE_CASE , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
snake_case_ = True
snake_case_ = any(x in list(device_map.values() ) for x in ["""cpu""", """disk"""] )
load_checkpoint_in_model(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , dtype=bnb_quantization_config.torch_dtype , offload_folder=_SCREAMING_SNAKE_CASE , offload_state_dict=_SCREAMING_SNAKE_CASE , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(_SCREAMING_SNAKE_CASE , device_map=_SCREAMING_SNAKE_CASE , offload_dir=_SCREAMING_SNAKE_CASE )
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> Tuple:
if device_map is None:
if torch.cuda.is_available():
snake_case_ = {"""""": torch.cuda.current_device()}
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" )
logger.info("""The device_map was not initialized.""" """Setting device_map to `{'':torch.cuda.current_device()}`.""" )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
"""If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or """
"""'sequential'.""" )
snake_case_ = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
snake_case_ = {}
snake_case_ = special_dtypes
snake_case_ = no_split_module_classes
snake_case_ = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
snake_case_ = get_balanced_memory(
_SCREAMING_SNAKE_CASE , low_zero=(device_map == """balanced_low_0""") , max_memory=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
snake_case_ = max_memory
snake_case_ = infer_auto_device_map(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
# check if don't have any quantized module on the cpu
snake_case_ = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
snake_case_ = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
"""
Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit
the quantized model. If you want to dispatch the model on the CPU or the disk while keeping
these modules in `torch_dtype`, you need to pass a custom `device_map` to
`load_and_quantize_model`. Check
https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk
for more details.
""" )
else:
logger.info(
"""Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit""" )
del device_map_without_some_modules
return device_map
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> Tuple:
if modules_to_not_convert is None:
snake_case_ = []
snake_case_ , snake_case_ = _replace_with_bnb_layers(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if not has_been_replaced:
logger.warning(
"""You are loading your model in 8bit or 4bit but no linear modules were found in your model."""
""" this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers."""
""" Please double check your model architecture, or submit an issue on github if you think this is"""
""" a bug.""" )
return model
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , ) -> List[Any]:
snake_case_ = False
for name, module in model.named_children():
if current_key_name is None:
snake_case_ = []
current_key_name.append(_SCREAMING_SNAKE_CASE )
if isinstance(_SCREAMING_SNAKE_CASE , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
snake_case_ = """.""".join(_SCREAMING_SNAKE_CASE )
snake_case_ = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
snake_case_ = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
snake_case_ = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=_SCREAMING_SNAKE_CASE , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
snake_case_ = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError("""load_in_8bit and load_in_4bit can't be both False""" )
snake_case_ = module.weight.data
if module.bias is not None:
snake_case_ = module.bias.data
bnb_module.requires_grad_(_SCREAMING_SNAKE_CASE )
setattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case_ = True
if len(list(module.children() ) ) > 0:
snake_case_ , snake_case_ = _replace_with_bnb_layers(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case_ = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def _a ( _SCREAMING_SNAKE_CASE ) -> Any:
# Create a copy of the model
with init_empty_weights():
snake_case_ = deepcopy(_SCREAMING_SNAKE_CASE ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
snake_case_ = find_tied_parameters(_SCREAMING_SNAKE_CASE )
# For compatibility with Accelerate < 0.18
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
snake_case_ = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
snake_case_ = sum(_SCREAMING_SNAKE_CASE , [] )
snake_case_ = len(_SCREAMING_SNAKE_CASE ) > 0
# Check if it is a base model
snake_case_ = False
if hasattr(_SCREAMING_SNAKE_CASE , """base_model_prefix""" ):
snake_case_ = not hasattr(_SCREAMING_SNAKE_CASE , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
snake_case_ = list(model.named_children() )
snake_case_ = [list_modules[-1][0]]
# add last module together with tied weights
snake_case_ = set(_SCREAMING_SNAKE_CASE ) - set(_SCREAMING_SNAKE_CASE )
snake_case_ = list(set(_SCREAMING_SNAKE_CASE ) ) + list(_SCREAMING_SNAKE_CASE )
# remove ".weight" from the keys
snake_case_ = [""".weight""", """.bias"""]
snake_case_ = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
snake_case_ = name.replace(_SCREAMING_SNAKE_CASE , """""" )
filtered_module_names.append(_SCREAMING_SNAKE_CASE )
return filtered_module_names
def _a ( _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
for m in model.modules():
if isinstance(_SCREAMING_SNAKE_CASE , bnb.nn.Linearabit ):
return True
return False
def _a ( _SCREAMING_SNAKE_CASE ) -> Optional[int]:
return next(parameter.parameters() ).device
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
# if it is not quantized, we quantize and offload the quantized weights and the SCB stats
if fpaa_statistics is None:
set_module_tensor_to_device(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 0 , dtype=_SCREAMING_SNAKE_CASE , value=_SCREAMING_SNAKE_CASE )
snake_case_ = param_name
snake_case_ = model
if "." in tensor_name:
snake_case_ = tensor_name.split(""".""" )
for split in splits[:-1]:
snake_case_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if new_module is None:
raise ValueError(f"""{module} has no attribute {split}.""" )
snake_case_ = new_module
snake_case_ = splits[-1]
# offload weights
snake_case_ = False
offload_weight(module._parameters[tensor_name] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE )
if hasattr(module._parameters[tensor_name] , """SCB""" ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace("""weight""" , """SCB""" ) , _SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE , )
else:
offload_weight(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE )
offload_weight(_SCREAMING_SNAKE_CASE , param_name.replace("""weight""" , """SCB""" ) , _SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE )
set_module_tensor_to_device(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , """meta""" , dtype=_SCREAMING_SNAKE_CASE , value=torch.empty(*param.size() ) )
| 347 | 1 |
"""simple docstring"""
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__SCREAMING_SNAKE_CASE : List[str] = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class __A (snake_case__ , unittest.TestCase):
'''simple docstring'''
__lowercase: Any = XLMRobertaTokenizer
__lowercase: str = XLMRobertaTokenizerFast
__lowercase: Tuple = True
__lowercase: Optional[Any] = True
def lowerCAmelCase ( self : Tuple ) ->int:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
snake_case_ = XLMRobertaTokenizer(UpperCAmelCase_ , keep_accents=UpperCAmelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase ( self : str ) ->Optional[Any]:
"""simple docstring"""
snake_case_ = """<pad>"""
snake_case_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase_ ) , UpperCAmelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase_ ) , UpperCAmelCase_ )
def lowerCAmelCase ( self : Optional[Any] ) ->List[str]:
"""simple docstring"""
snake_case_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(UpperCAmelCase_ ) , 1_002 )
def lowerCAmelCase ( self : List[str] ) ->List[str]:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_002 )
def lowerCAmelCase ( self : List[str] ) ->Union[str, Any]:
"""simple docstring"""
snake_case_ = XLMRobertaTokenizer(UpperCAmelCase_ , keep_accents=UpperCAmelCase_ )
snake_case_ = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(UpperCAmelCase_ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
snake_case_ = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
UpperCAmelCase_ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
snake_case_ = tokenizer.convert_tokens_to_ids(UpperCAmelCase_ )
self.assertListEqual(
UpperCAmelCase_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
snake_case_ = tokenizer.convert_ids_to_tokens(UpperCAmelCase_ )
self.assertListEqual(
UpperCAmelCase_ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def lowerCAmelCase ( self : Optional[int] ) ->Tuple:
"""simple docstring"""
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
snake_case_ = (self.rust_tokenizer_class, """hf-internal-testing/tiny-xlm-roberta""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
snake_case_ = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase_ , **UpperCAmelCase_ )
snake_case_ = self.tokenizer_class.from_pretrained(UpperCAmelCase_ , **UpperCAmelCase_ )
snake_case_ = tempfile.mkdtemp()
snake_case_ = tokenizer_r.save_pretrained(UpperCAmelCase_ )
snake_case_ = tokenizer_p.save_pretrained(UpperCAmelCase_ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
snake_case_ = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(UpperCAmelCase_ , UpperCAmelCase_ )
# Checks everything loads correctly in the same way
snake_case_ = tokenizer_r.from_pretrained(UpperCAmelCase_ )
snake_case_ = tokenizer_p.from_pretrained(UpperCAmelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCAmelCase_ , UpperCAmelCase_ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(UpperCAmelCase_ )
# Save tokenizer rust, legacy_format=True
snake_case_ = tempfile.mkdtemp()
snake_case_ = tokenizer_r.save_pretrained(UpperCAmelCase_ , legacy_format=UpperCAmelCase_ )
snake_case_ = tokenizer_p.save_pretrained(UpperCAmelCase_ )
# Checks it save with the same files
self.assertSequenceEqual(UpperCAmelCase_ , UpperCAmelCase_ )
# Checks everything loads correctly in the same way
snake_case_ = tokenizer_r.from_pretrained(UpperCAmelCase_ )
snake_case_ = tokenizer_p.from_pretrained(UpperCAmelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCAmelCase_ , UpperCAmelCase_ ) )
shutil.rmtree(UpperCAmelCase_ )
# Save tokenizer rust, legacy_format=False
snake_case_ = tempfile.mkdtemp()
snake_case_ = tokenizer_r.save_pretrained(UpperCAmelCase_ , legacy_format=UpperCAmelCase_ )
snake_case_ = tokenizer_p.save_pretrained(UpperCAmelCase_ )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
snake_case_ = tokenizer_r.from_pretrained(UpperCAmelCase_ )
snake_case_ = tokenizer_p.from_pretrained(UpperCAmelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCAmelCase_ , UpperCAmelCase_ ) )
shutil.rmtree(UpperCAmelCase_ )
@cached_property
def lowerCAmelCase ( self : Tuple ) ->Union[str, Any]:
"""simple docstring"""
return XLMRobertaTokenizer.from_pretrained("""xlm-roberta-base""" )
def lowerCAmelCase ( self : Optional[int] ) ->Optional[int]:
"""simple docstring"""
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(UpperCAmelCase_ , f.name )
snake_case_ = XLMRobertaTokenizer(f.name , keep_accents=UpperCAmelCase_ )
snake_case_ = pickle.dumps(UpperCAmelCase_ )
pickle.loads(UpperCAmelCase_ )
def lowerCAmelCase ( self : Tuple ) ->int:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
snake_case_ = self.get_tokenizer()
snake_case_ = self.get_rust_tokenizer()
snake_case_ = """I was born in 92000, and this is falsé."""
snake_case_ = tokenizer.tokenize(UpperCAmelCase_ )
snake_case_ = rust_tokenizer.tokenize(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ )
snake_case_ = rust_tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = self.get_rust_tokenizer()
snake_case_ = tokenizer.encode(UpperCAmelCase_ )
snake_case_ = rust_tokenizer.encode(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
@slow
def lowerCAmelCase ( self : Tuple ) ->List[str]:
"""simple docstring"""
snake_case_ = """Hello World!"""
snake_case_ = [0, 35_378, 6_661, 38, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(UpperCAmelCase_ , self.big_tokenizer.encode(UpperCAmelCase_ ) )
@slow
def lowerCAmelCase ( self : List[str] ) ->List[Any]:
"""simple docstring"""
snake_case_ = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"""
)
snake_case_ = [
0,
3_293,
83,
10,
4_552,
4_989,
7_986,
678,
10,
5_915,
111,
179_459,
124_850,
4,
6_044,
237,
12,
6,
5,
6,
4,
6_780,
705,
15,
1_388,
44,
378,
10_114,
711,
152,
20,
6,
5,
22_376,
642,
1_221,
15_190,
34_153,
450,
5_608,
959,
1_119,
57_702,
136,
186,
47,
1_098,
29_367,
47,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
6_044,
237,
6_284,
50_901,
528,
31,
90,
34,
927,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(UpperCAmelCase_ , self.big_tokenizer.encode(UpperCAmelCase_ ) )
@slow
def lowerCAmelCase ( self : Tuple ) ->Dict:
"""simple docstring"""
snake_case_ = {"""input_ids""": [[0, 11_062, 82_772, 7, 15, 82_772, 538, 51_529, 237, 17_198, 1_290, 206, 9, 215_175, 1_314, 136, 17_198, 1_290, 206, 9, 56_359, 42, 122_009, 9, 16_466, 16, 87_344, 4_537, 9, 4_717, 78_381, 6, 159_958, 7, 15, 24_480, 618, 4, 527, 22_693, 5_428, 4, 2_777, 24_480, 9_874, 4, 43_523, 594, 4, 803, 18_392, 33_189, 18, 4, 43_523, 24_447, 12_399, 100, 24_955, 83_658, 9_626, 144_057, 15, 839, 22_335, 16, 136, 24_955, 83_658, 83_479, 15, 39_102, 724, 16, 678, 645, 2_789, 1_328, 4_589, 42, 122_009, 115_774, 23, 805, 1_328, 46_876, 7, 136, 53_894, 1_940, 42_227, 41_159, 17_721, 823, 425, 4, 27_512, 98_722, 206, 136, 5_531, 4_970, 919, 17_336, 5, 2], [0, 20_080, 618, 83, 82_775, 47, 479, 9, 1_517, 73, 53_894, 333, 80_581, 110_117, 18_811, 5_256, 1_295, 51, 152_526, 297, 7_986, 390, 124_416, 538, 35_431, 214, 98, 15_044, 25_737, 136, 7_108, 43_701, 23, 756, 135_355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 581, 63_773, 119_455, 6, 147_797, 88_203, 7, 645, 70, 21, 3_285, 10_269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase_ , model_name="""xlm-roberta-base""" , revision="""d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3""" , )
| 347 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Tuple = {
'microsoft/beit-base-patch16-224-pt22k': (
'https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json'
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class __A (snake_case__):
'''simple docstring'''
__lowercase: Optional[int] = """beit"""
def __init__( self : List[str] , UpperCAmelCase_ : List[Any]=8_192 , UpperCAmelCase_ : Dict=768 , UpperCAmelCase_ : int=12 , UpperCAmelCase_ : Tuple=12 , UpperCAmelCase_ : List[Any]=3_072 , UpperCAmelCase_ : Tuple="gelu" , UpperCAmelCase_ : Dict=0.0 , UpperCAmelCase_ : List[str]=0.0 , UpperCAmelCase_ : Any=0.02 , UpperCAmelCase_ : Optional[Any]=1E-12 , UpperCAmelCase_ : int=224 , UpperCAmelCase_ : Tuple=16 , UpperCAmelCase_ : List[str]=3 , UpperCAmelCase_ : int=False , UpperCAmelCase_ : List[str]=False , UpperCAmelCase_ : Tuple=False , UpperCAmelCase_ : int=False , UpperCAmelCase_ : List[Any]=0.1 , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : Dict=[3, 5, 7, 11] , UpperCAmelCase_ : Tuple=[1, 2, 3, 6] , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : List[Any]=0.4 , UpperCAmelCase_ : Optional[Any]=256 , UpperCAmelCase_ : Optional[Any]=1 , UpperCAmelCase_ : int=False , UpperCAmelCase_ : Tuple=255 , **UpperCAmelCase_ : List[str] , ) ->Optional[Any]:
"""simple docstring"""
super().__init__(**UpperCAmelCase_ )
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = initializer_range
snake_case_ = layer_norm_eps
snake_case_ = image_size
snake_case_ = patch_size
snake_case_ = num_channels
snake_case_ = use_mask_token
snake_case_ = use_absolute_position_embeddings
snake_case_ = use_relative_position_bias
snake_case_ = use_shared_relative_position_bias
snake_case_ = layer_scale_init_value
snake_case_ = drop_path_rate
snake_case_ = use_mean_pooling
# decode head attributes (semantic segmentation)
snake_case_ = out_indices
snake_case_ = pool_scales
# auxiliary head attributes (semantic segmentation)
snake_case_ = use_auxiliary_head
snake_case_ = auxiliary_loss_weight
snake_case_ = auxiliary_channels
snake_case_ = auxiliary_num_convs
snake_case_ = auxiliary_concat_input
snake_case_ = semantic_loss_ignore_index
class __A (snake_case__):
'''simple docstring'''
__lowercase: List[Any] = version.parse("""1.11""")
@property
def lowerCAmelCase ( self : Dict ) ->Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCAmelCase ( self : Any ) ->float:
"""simple docstring"""
return 1E-4
| 347 | 1 |
"""simple docstring"""
import math
def _a ( _SCREAMING_SNAKE_CASE ) -> str:
snake_case_ = 0
snake_case_ = 0
while num > 0:
snake_case_ = num % 8
snake_case_ = octal + (remainder * math.floor(math.pow(10 , _SCREAMING_SNAKE_CASE ) ))
counter += 1
snake_case_ = math.floor(num / 8 ) # basically /= 8 without remainder if any
# This formatting removes trailing '.0' from `octal`.
return f"""0o{int(_SCREAMING_SNAKE_CASE )}"""
def _a ( ) -> None:
print("""\n2 in octal is:""" )
print(decimal_to_octal(2 ) ) # = 2
print("""\n8 in octal is:""" )
print(decimal_to_octal(8 ) ) # = 10
print("""\n65 in octal is:""" )
print(decimal_to_octal(65 ) ) # = 101
print("""\n216 in octal is:""" )
print(decimal_to_octal(216 ) ) # = 330
print("""\n512 in octal is:""" )
print(decimal_to_octal(512 ) ) # = 1000
print("""\n""" )
if __name__ == "__main__":
main()
| 347 |
"""simple docstring"""
import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
__SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : List[Any] = {'vocab_file': 'spiece.model'}
__SCREAMING_SNAKE_CASE : int = {
'vocab_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/spiece.model',
't5-base': 'https://huggingface.co/t5-base/resolve/main/spiece.model',
't5-large': 'https://huggingface.co/t5-large/resolve/main/spiece.model',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/spiece.model',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/spiece.model',
}
}
# TODO(PVP) - this should be removed in Transformers v5
__SCREAMING_SNAKE_CASE : Dict = {
't5-small': 512,
't5-base': 512,
't5-large': 512,
't5-3b': 512,
't5-11b': 512,
}
__SCREAMING_SNAKE_CASE : Optional[int] = '▁'
class __A (snake_case__):
'''simple docstring'''
__lowercase: Optional[int] = VOCAB_FILES_NAMES
__lowercase: Any = PRETRAINED_VOCAB_FILES_MAP
__lowercase: Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase: List[str] = ["""input_ids""", """attention_mask"""]
def __init__( self : Optional[int] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any]="</s>" , UpperCAmelCase_ : Optional[Any]="<unk>" , UpperCAmelCase_ : Any="<pad>" , UpperCAmelCase_ : Tuple=100 , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : Optional[Dict[str, Any]] = None , UpperCAmelCase_ : Optional[int]=True , **UpperCAmelCase_ : Dict , ) ->None:
"""simple docstring"""
if extra_ids > 0 and additional_special_tokens is None:
snake_case_ = [F"""<extra_id_{i}>""" for i in range(UpperCAmelCase_ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
snake_case_ = len(set(filter(lambda UpperCAmelCase_ : bool("""extra_id""" in str(UpperCAmelCase_ ) ) , UpperCAmelCase_ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F"""Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"""
""" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"""
""" tokens""" )
if legacy:
logger.warning_once(
F"""You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to"""
""" read the related pull request available at https://github.com/huggingface/transformers/pull/24565""" )
snake_case_ = legacy
snake_case_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , extra_ids=UpperCAmelCase_ , additional_special_tokens=UpperCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , legacy=UpperCAmelCase_ , **UpperCAmelCase_ , )
snake_case_ = vocab_file
snake_case_ = extra_ids
snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCAmelCase_ )
@staticmethod
def lowerCAmelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[Any] ) ->Union[str, Any]:
"""simple docstring"""
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
snake_case_ = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"""This tokenizer was incorrectly instantiated with a model max length of"""
F""" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this"""
""" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"""
""" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"""
F""" {pretrained_model_name_or_path} automatically truncating your input to"""
F""" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences"""
F""" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with"""
""" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"""
""" instantiate this tokenizer with `model_max_length` set to your preferred value.""" , UpperCAmelCase_ , )
return max_model_length
@property
def lowerCAmelCase ( self : Optional[Any] ) ->Optional[Any]:
"""simple docstring"""
return self.sp_model.get_piece_size() + self._extra_ids
def lowerCAmelCase ( self : Any ) ->Optional[int]:
"""simple docstring"""
snake_case_ = {self.convert_ids_to_tokens(UpperCAmelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCAmelCase ( self : List[str] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None , UpperCAmelCase_ : bool = False ) ->List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase_ , token_ids_a=UpperCAmelCase_ , already_has_special_tokens=UpperCAmelCase_ )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(UpperCAmelCase_ )) + [1]
return ([0] * len(UpperCAmelCase_ )) + [1] + ([0] * len(UpperCAmelCase_ )) + [1]
def lowerCAmelCase ( self : Any ) ->List[str]:
"""simple docstring"""
return list(
set(filter(lambda UpperCAmelCase_ : bool(re.search(R"""<extra_id_\d+>""" , UpperCAmelCase_ ) ) is not None , self.additional_special_tokens ) ) )
def lowerCAmelCase ( self : Dict ) ->str:
"""simple docstring"""
return [self._convert_token_to_id(UpperCAmelCase_ ) for token in self.get_sentinel_tokens()]
def lowerCAmelCase ( self : Dict , UpperCAmelCase_ : List[int] ) ->List[int]:
"""simple docstring"""
if len(UpperCAmelCase_ ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
F"""This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated"""
""" eos tokens being added.""" )
return token_ids
else:
return token_ids + [self.eos_token_id]
def lowerCAmelCase ( self : str , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None ) ->List[int]:
"""simple docstring"""
snake_case_ = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def lowerCAmelCase ( self : Optional[int] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None ) ->List[int]:
"""simple docstring"""
snake_case_ = self._add_eos_if_not_present(UpperCAmelCase_ )
if token_ids_a is None:
return token_ids_a
else:
snake_case_ = self._add_eos_if_not_present(UpperCAmelCase_ )
return token_ids_a + token_ids_a
def __getstate__( self : Optional[Any] ) ->Tuple:
"""simple docstring"""
snake_case_ = self.__dict__.copy()
snake_case_ = None
return state
def __setstate__( self : Optional[Any] , UpperCAmelCase_ : List[Any] ) ->List[Any]:
"""simple docstring"""
snake_case_ = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
snake_case_ = {}
snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCAmelCase ( self : int , UpperCAmelCase_ : "TextInput" , **UpperCAmelCase_ : Tuple ) ->List[str]:
"""simple docstring"""
if not self.legacy:
snake_case_ = SPIECE_UNDERLINE + text.replace(UpperCAmelCase_ , """ """ )
return super().tokenize(UpperCAmelCase_ , **UpperCAmelCase_ )
def lowerCAmelCase ( self : Dict , UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Any ) ->Tuple:
"""simple docstring"""
if not self.legacy:
snake_case_ = text.startswith(UpperCAmelCase_ )
if is_first:
snake_case_ = text[1:]
snake_case_ = self.sp_model.encode(UpperCAmelCase_ , out_type=UpperCAmelCase_ )
if not self.legacy and not is_first and not text.startswith(""" """ ) and tokens[0].startswith(UpperCAmelCase_ ):
snake_case_ = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:]
return tokens
def lowerCAmelCase ( self : List[str] , UpperCAmelCase_ : List[Any] ) ->Tuple:
"""simple docstring"""
if token.startswith("""<extra_id_""" ):
snake_case_ = re.match(R"""<extra_id_(\d+)>""" , UpperCAmelCase_ )
snake_case_ = int(match.group(1 ) )
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(UpperCAmelCase_ )
def lowerCAmelCase ( self : List[str] , UpperCAmelCase_ : Optional[Any] ) ->List[Any]:
"""simple docstring"""
if index < self.sp_model.get_piece_size():
snake_case_ = self.sp_model.IdToPiece(UpperCAmelCase_ )
else:
snake_case_ = F"""<extra_id_{self.vocab_size - 1 - index}>"""
return token
def lowerCAmelCase ( self : List[Any] , UpperCAmelCase_ : List[str] ) ->Optional[Any]:
"""simple docstring"""
snake_case_ = []
snake_case_ = """"""
snake_case_ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(UpperCAmelCase_ ) + token
snake_case_ = True
snake_case_ = []
else:
current_sub_tokens.append(UpperCAmelCase_ )
snake_case_ = False
out_string += self.sp_model.decode(UpperCAmelCase_ )
return out_string.strip()
def lowerCAmelCase ( self : str , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None ) ->Tuple[str]:
"""simple docstring"""
if not os.path.isdir(UpperCAmelCase_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case_ = os.path.join(
UpperCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCAmelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCAmelCase_ , """wb""" ) as fi:
snake_case_ = self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase_ )
return (out_vocab_file,)
| 347 | 1 |
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class __A (unittest.TestCase):
'''simple docstring'''
def __init__( self : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Dict=7 , UpperCAmelCase_ : Optional[int]=3 , UpperCAmelCase_ : str=30 , UpperCAmelCase_ : Tuple=400 , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : Any=[0.5, 0.5, 0.5] , UpperCAmelCase_ : Optional[Any]=[0.5, 0.5, 0.5] , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : Dict=1 / 255 , UpperCAmelCase_ : Any=True , ) ->Optional[Any]:
"""simple docstring"""
snake_case_ = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 1_333}
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = num_channels
snake_case_ = min_resolution
snake_case_ = max_resolution
snake_case_ = do_resize
snake_case_ = size
snake_case_ = do_normalize
snake_case_ = image_mean
snake_case_ = image_std
snake_case_ = do_rescale
snake_case_ = rescale_factor
snake_case_ = do_pad
def lowerCAmelCase ( self : str ) ->Any:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def lowerCAmelCase ( self : Tuple , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Dict=False ) ->Optional[int]:
"""simple docstring"""
if not batched:
snake_case_ = image_inputs[0]
if isinstance(UpperCAmelCase_ , Image.Image ):
snake_case_ , snake_case_ = image.size
else:
snake_case_ , snake_case_ = image.shape[1], image.shape[2]
if w < h:
snake_case_ = int(self.size["""shortest_edge"""] * h / w )
snake_case_ = self.size["""shortest_edge"""]
elif w > h:
snake_case_ = self.size["""shortest_edge"""]
snake_case_ = int(self.size["""shortest_edge"""] * w / h )
else:
snake_case_ = self.size["""shortest_edge"""]
snake_case_ = self.size["""shortest_edge"""]
else:
snake_case_ = []
for image in image_inputs:
snake_case_ , snake_case_ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
snake_case_ = max(UpperCAmelCase_ , key=lambda UpperCAmelCase_ : item[0] )[0]
snake_case_ = max(UpperCAmelCase_ , key=lambda UpperCAmelCase_ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __A (snake_case__ , unittest.TestCase):
'''simple docstring'''
__lowercase: Optional[int] = ConditionalDetrImageProcessor if is_vision_available() else None
def lowerCAmelCase ( self : List[str] ) ->List[Any]:
"""simple docstring"""
snake_case_ = ConditionalDetrImageProcessingTester(self )
@property
def lowerCAmelCase ( self : Any ) ->Tuple:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase ( self : Tuple ) ->Tuple:
"""simple docstring"""
snake_case_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase_ , """image_mean""" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , """image_std""" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , """do_normalize""" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , """do_resize""" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , """size""" ) )
def lowerCAmelCase ( self : Tuple ) ->str:
"""simple docstring"""
snake_case_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 1_333} )
self.assertEqual(image_processor.do_pad , UpperCAmelCase_ )
snake_case_ = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=UpperCAmelCase_ )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42, """longest_edge""": 84} )
self.assertEqual(image_processor.do_pad , UpperCAmelCase_ )
def lowerCAmelCase ( self : str ) ->Union[str, Any]:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Dict ) ->int:
"""simple docstring"""
snake_case_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , Image.Image )
# Test not batched input
snake_case_ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
snake_case_ , snake_case_ = self.image_processor_tester.get_expected_values(UpperCAmelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_ , snake_case_ = self.image_processor_tester.get_expected_values(UpperCAmelCase_ , batched=UpperCAmelCase_ )
snake_case_ = image_processing(UpperCAmelCase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase ( self : Optional[Any] ) ->Optional[Any]:
"""simple docstring"""
snake_case_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , numpify=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , np.ndarray )
# Test not batched input
snake_case_ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
snake_case_ , snake_case_ = self.image_processor_tester.get_expected_values(UpperCAmelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_ = image_processing(UpperCAmelCase_ , return_tensors="""pt""" ).pixel_values
snake_case_ , snake_case_ = self.image_processor_tester.get_expected_values(UpperCAmelCase_ , batched=UpperCAmelCase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase ( self : int ) ->List[str]:
"""simple docstring"""
snake_case_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , torchify=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , torch.Tensor )
# Test not batched input
snake_case_ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
snake_case_ , snake_case_ = self.image_processor_tester.get_expected_values(UpperCAmelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_ = image_processing(UpperCAmelCase_ , return_tensors="""pt""" ).pixel_values
snake_case_ , snake_case_ = self.image_processor_tester.get_expected_values(UpperCAmelCase_ , batched=UpperCAmelCase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def lowerCAmelCase ( self : List[str] ) ->Optional[int]:
"""simple docstring"""
snake_case_ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
snake_case_ = json.loads(f.read() )
snake_case_ = {"""image_id""": 39_769, """annotations""": target}
# encode them
snake_case_ = ConditionalDetrImageProcessor.from_pretrained("""microsoft/conditional-detr-resnet-50""" )
snake_case_ = image_processing(images=UpperCAmelCase_ , annotations=UpperCAmelCase_ , return_tensors="""pt""" )
# verify pixel values
snake_case_ = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding["""pixel_values"""].shape , UpperCAmelCase_ )
snake_case_ = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , UpperCAmelCase_ , atol=1E-4 ) )
# verify area
snake_case_ = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , UpperCAmelCase_ ) )
# verify boxes
snake_case_ = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , UpperCAmelCase_ )
snake_case_ = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , UpperCAmelCase_ , atol=1E-3 ) )
# verify image_id
snake_case_ = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , UpperCAmelCase_ ) )
# verify is_crowd
snake_case_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , UpperCAmelCase_ ) )
# verify class_labels
snake_case_ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , UpperCAmelCase_ ) )
# verify orig_size
snake_case_ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , UpperCAmelCase_ ) )
# verify size
snake_case_ = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , UpperCAmelCase_ ) )
@slow
def lowerCAmelCase ( self : Tuple ) ->Any:
"""simple docstring"""
snake_case_ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
snake_case_ = json.loads(f.read() )
snake_case_ = {"""file_name""": """000000039769.png""", """image_id""": 39_769, """segments_info""": target}
snake_case_ = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
snake_case_ = ConditionalDetrImageProcessor(format="""coco_panoptic""" )
snake_case_ = image_processing(images=UpperCAmelCase_ , annotations=UpperCAmelCase_ , masks_path=UpperCAmelCase_ , return_tensors="""pt""" )
# verify pixel values
snake_case_ = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding["""pixel_values"""].shape , UpperCAmelCase_ )
snake_case_ = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , UpperCAmelCase_ , atol=1E-4 ) )
# verify area
snake_case_ = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , UpperCAmelCase_ ) )
# verify boxes
snake_case_ = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , UpperCAmelCase_ )
snake_case_ = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , UpperCAmelCase_ , atol=1E-3 ) )
# verify image_id
snake_case_ = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , UpperCAmelCase_ ) )
# verify is_crowd
snake_case_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , UpperCAmelCase_ ) )
# verify class_labels
snake_case_ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , UpperCAmelCase_ ) )
# verify masks
snake_case_ = 822_873
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , UpperCAmelCase_ )
# verify orig_size
snake_case_ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , UpperCAmelCase_ ) )
# verify size
snake_case_ = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , UpperCAmelCase_ ) )
| 347 |
"""simple docstring"""
def _a ( _SCREAMING_SNAKE_CASE = 1_000_000 ) -> int:
snake_case_ = [i - 1 for i in range(limit + 1 )]
for i in range(2 , limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i , limit + 1 , _SCREAMING_SNAKE_CASE ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 347 | 1 |
"""simple docstring"""
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Any = {
'huggingface/autoformer-tourism-monthly': 'https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json',
}
class __A (snake_case__):
'''simple docstring'''
__lowercase: Optional[Any] = """autoformer"""
__lowercase: Any = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self : Union[str, Any] , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : str = "student_t" , UpperCAmelCase_ : str = "nll" , UpperCAmelCase_ : int = 1 , UpperCAmelCase_ : List[int] = [1, 2, 3, 4, 5, 6, 7] , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : int = 0 , UpperCAmelCase_ : int = 0 , UpperCAmelCase_ : int = 0 , UpperCAmelCase_ : int = 0 , UpperCAmelCase_ : Optional[List[int]] = None , UpperCAmelCase_ : Optional[List[int]] = None , UpperCAmelCase_ : int = 64 , UpperCAmelCase_ : int = 2 , UpperCAmelCase_ : int = 2 , UpperCAmelCase_ : int = 2 , UpperCAmelCase_ : int = 2 , UpperCAmelCase_ : int = 32 , UpperCAmelCase_ : int = 32 , UpperCAmelCase_ : str = "gelu" , UpperCAmelCase_ : float = 0.1 , UpperCAmelCase_ : float = 0.1 , UpperCAmelCase_ : float = 0.1 , UpperCAmelCase_ : float = 0.1 , UpperCAmelCase_ : float = 0.1 , UpperCAmelCase_ : int = 100 , UpperCAmelCase_ : float = 0.02 , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Dict=True , UpperCAmelCase_ : int = 10 , UpperCAmelCase_ : int = 25 , UpperCAmelCase_ : int = 3 , **UpperCAmelCase_ : int , ) ->Tuple:
"""simple docstring"""
snake_case_ = prediction_length
snake_case_ = context_length if context_length is not None else prediction_length
snake_case_ = distribution_output
snake_case_ = loss
snake_case_ = input_size
snake_case_ = num_time_features
snake_case_ = lags_sequence
snake_case_ = scaling
snake_case_ = num_dynamic_real_features
snake_case_ = num_static_real_features
snake_case_ = num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(UpperCAmelCase_ ) != num_static_categorical_features:
raise ValueError(
"""The cardinality should be a list of the same length as `num_static_categorical_features`""" )
snake_case_ = cardinality
else:
snake_case_ = [0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(UpperCAmelCase_ ) != num_static_categorical_features:
raise ValueError(
"""The embedding dimension should be a list of the same length as `num_static_categorical_features`""" )
snake_case_ = embedding_dimension
else:
snake_case_ = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
snake_case_ = num_parallel_samples
# Transformer architecture configuration
snake_case_ = input_size * len(self.lags_sequence ) + self._number_of_features
snake_case_ = d_model
snake_case_ = encoder_attention_heads
snake_case_ = decoder_attention_heads
snake_case_ = encoder_ffn_dim
snake_case_ = decoder_ffn_dim
snake_case_ = encoder_layers
snake_case_ = decoder_layers
snake_case_ = dropout
snake_case_ = attention_dropout
snake_case_ = activation_dropout
snake_case_ = encoder_layerdrop
snake_case_ = decoder_layerdrop
snake_case_ = activation_function
snake_case_ = init_std
snake_case_ = use_cache
# Autoformer
snake_case_ = label_length
snake_case_ = moving_average
snake_case_ = autocorrelation_factor
super().__init__(is_encoder_decoder=UpperCAmelCase_ , **UpperCAmelCase_ )
@property
def lowerCAmelCase ( self : str ) ->int:
"""simple docstring"""
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 347 |
"""simple docstring"""
from __future__ import annotations
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]:
print(f"""Vertex\tShortest Distance from vertex {src}""" )
for i, d in enumerate(_SCREAMING_SNAKE_CASE ):
print(f"""{i}\t\t{d}""" )
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
for j in range(_SCREAMING_SNAKE_CASE ):
snake_case_ , snake_case_ , snake_case_ = (graph[j][k] for k in ["""src""", """dst""", """weight"""])
if distance[u] != float("""inf""" ) and distance[u] + w < distance[v]:
return True
return False
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> list[float]:
snake_case_ = [float("""inf""" )] * vertex_count
snake_case_ = 0.0
for _ in range(vertex_count - 1 ):
for j in range(_SCREAMING_SNAKE_CASE ):
snake_case_ , snake_case_ , snake_case_ = (graph[j][k] for k in ["""src""", """dst""", """weight"""])
if distance[u] != float("""inf""" ) and distance[u] + w < distance[v]:
snake_case_ = distance[u] + w
snake_case_ = check_negative_cycle(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if negative_cycle_exists:
raise Exception("""Negative cycle found""" )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
__SCREAMING_SNAKE_CASE : int = int(input('Enter number of vertices: ').strip())
__SCREAMING_SNAKE_CASE : Dict = int(input('Enter number of edges: ').strip())
__SCREAMING_SNAKE_CASE : list[dict[str, int]] = [{} for _ in range(E)]
for i in range(E):
print('Edge ', i + 1)
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = (
int(x)
for x in input('Enter source, destination, weight: ').strip().split(' ')
)
__SCREAMING_SNAKE_CASE : Union[str, Any] = {'src': src, 'dst': dest, 'weight': weight}
__SCREAMING_SNAKE_CASE : Union[str, Any] = int(input('\nEnter shortest path source:').strip())
__SCREAMING_SNAKE_CASE : str = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 347 | 1 |
"""simple docstring"""
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __A :
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[str]=2 , UpperCAmelCase_ : Any=3 , UpperCAmelCase_ : Dict=4 , UpperCAmelCase_ : Optional[Any]=2 , UpperCAmelCase_ : Optional[int]=7 , UpperCAmelCase_ : Dict=True , UpperCAmelCase_ : Dict=True , UpperCAmelCase_ : Dict=True , UpperCAmelCase_ : str=True , UpperCAmelCase_ : Optional[int]=99 , UpperCAmelCase_ : Tuple=36 , UpperCAmelCase_ : Dict=3 , UpperCAmelCase_ : Optional[int]=4 , UpperCAmelCase_ : Tuple=37 , UpperCAmelCase_ : Optional[Any]="gelu" , UpperCAmelCase_ : Tuple=0.1 , UpperCAmelCase_ : str=0.1 , UpperCAmelCase_ : List[Any]=512 , UpperCAmelCase_ : List[Any]=16 , UpperCAmelCase_ : str=2 , UpperCAmelCase_ : Dict=0.02 , UpperCAmelCase_ : Any=6 , UpperCAmelCase_ : int=6 , UpperCAmelCase_ : Tuple=3 , UpperCAmelCase_ : Union[str, Any]=4 , UpperCAmelCase_ : str=None , UpperCAmelCase_ : Optional[Any]=1_000 , ) ->int:
"""simple docstring"""
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = num_channels
snake_case_ = image_size
snake_case_ = patch_size
snake_case_ = text_seq_length
snake_case_ = is_training
snake_case_ = use_input_mask
snake_case_ = use_token_type_ids
snake_case_ = use_labels
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = type_sequence_label_size
snake_case_ = initializer_range
snake_case_ = coordinate_size
snake_case_ = shape_size
snake_case_ = num_labels
snake_case_ = num_choices
snake_case_ = scope
snake_case_ = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
snake_case_ = text_seq_length
snake_case_ = (image_size // patch_size) ** 2 + 1
snake_case_ = self.text_seq_length + self.image_seq_length
def lowerCAmelCase ( self : List[Any] ) ->str:
"""simple docstring"""
snake_case_ = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
snake_case_ = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
snake_case_ = bbox[i, j, 3]
snake_case_ = bbox[i, j, 1]
snake_case_ = t
if bbox[i, j, 2] < bbox[i, j, 0]:
snake_case_ = bbox[i, j, 2]
snake_case_ = bbox[i, j, 0]
snake_case_ = t
snake_case_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ = None
if self.use_input_mask:
snake_case_ = random_attention_mask([self.batch_size, self.text_seq_length] )
snake_case_ = None
if self.use_token_type_ids:
snake_case_ = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
snake_case_ = None
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
snake_case_ = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def lowerCAmelCase ( self : Optional[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Any , UpperCAmelCase_ : Dict ) ->Union[str, Any]:
"""simple docstring"""
snake_case_ = LayoutLMvaModel(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
# text + image
snake_case_ = model(UpperCAmelCase_ , pixel_values=UpperCAmelCase_ )
snake_case_ = model(
UpperCAmelCase_ , bbox=UpperCAmelCase_ , pixel_values=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ )
snake_case_ = model(UpperCAmelCase_ , bbox=UpperCAmelCase_ , pixel_values=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ )
snake_case_ = model(UpperCAmelCase_ , bbox=UpperCAmelCase_ , pixel_values=UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
snake_case_ = model(UpperCAmelCase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
snake_case_ = model(pixel_values=UpperCAmelCase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def lowerCAmelCase ( self : Dict , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any] ) ->List[str]:
"""simple docstring"""
snake_case_ = self.num_labels
snake_case_ = LayoutLMvaForSequenceClassification(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
snake_case_ = model(
UpperCAmelCase_ , bbox=UpperCAmelCase_ , pixel_values=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase ( self : Optional[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Tuple ) ->Dict:
"""simple docstring"""
snake_case_ = self.num_labels
snake_case_ = LayoutLMvaForTokenClassification(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
snake_case_ = model(
UpperCAmelCase_ , bbox=UpperCAmelCase_ , pixel_values=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def lowerCAmelCase ( self : Any , UpperCAmelCase_ : int , UpperCAmelCase_ : Any , UpperCAmelCase_ : str , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Any , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Dict ) ->Dict:
"""simple docstring"""
snake_case_ = LayoutLMvaForQuestionAnswering(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
snake_case_ = model(
UpperCAmelCase_ , bbox=UpperCAmelCase_ , pixel_values=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , start_positions=UpperCAmelCase_ , end_positions=UpperCAmelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase ( self : str ) ->Union[str, Any]:
"""simple docstring"""
snake_case_ = self.prepare_config_and_inputs()
(
(
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) ,
) = config_and_inputs
snake_case_ = {
"""input_ids""": input_ids,
"""bbox""": bbox,
"""pixel_values""": pixel_values,
"""token_type_ids""": token_type_ids,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_torch
class __A (snake_case__ , snake_case__ , unittest.TestCase):
'''simple docstring'''
__lowercase: Optional[Any] = False
__lowercase: str = False
__lowercase: Any = False
__lowercase: Union[str, Any] = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
__lowercase: Any = (
{"""document-question-answering""": LayoutLMvaForQuestionAnswering, """feature-extraction""": LayoutLMvaModel}
if is_torch_available()
else {}
)
def lowerCAmelCase ( self : List[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[str] ) ->Optional[int]:
"""simple docstring"""
return True
def lowerCAmelCase ( self : Tuple ) ->Optional[int]:
"""simple docstring"""
snake_case_ = LayoutLMvaModelTester(self )
snake_case_ = ConfigTester(self , config_class=UpperCAmelCase_ , hidden_size=37 )
def lowerCAmelCase ( self : Tuple , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : int=False ) ->List[str]:
"""simple docstring"""
snake_case_ = copy.deepcopy(UpperCAmelCase_ )
if model_class in get_values(UpperCAmelCase_ ):
snake_case_ = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(UpperCAmelCase_ , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(UpperCAmelCase_ ):
snake_case_ = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase_ )
elif model_class in get_values(UpperCAmelCase_ ):
snake_case_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase_ )
snake_case_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase_ )
elif model_class in [
*get_values(UpperCAmelCase_ ),
]:
snake_case_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase_ )
elif model_class in [
*get_values(UpperCAmelCase_ ),
]:
snake_case_ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=UpperCAmelCase_ , )
return inputs_dict
def lowerCAmelCase ( self : Dict ) ->int:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : str ) ->List[Any]:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def lowerCAmelCase ( self : List[Any] ) ->List[str]:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
snake_case_ = type
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def lowerCAmelCase ( self : Dict ) ->List[str]:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase_ )
def lowerCAmelCase ( self : Any ) ->Optional[int]:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase_ )
def lowerCAmelCase ( self : Tuple ) ->str:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase_ )
@slow
def lowerCAmelCase ( self : Optional[int] ) ->Tuple:
"""simple docstring"""
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = LayoutLMvaModel.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
def _a ( ) -> Optional[Any]:
snake_case_ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
class __A (unittest.TestCase):
'''simple docstring'''
@cached_property
def lowerCAmelCase ( self : int ) ->int:
"""simple docstring"""
return LayoutLMvaImageProcessor(apply_ocr=UpperCAmelCase_ ) if is_vision_available() else None
@slow
def lowerCAmelCase ( self : List[str] ) ->int:
"""simple docstring"""
snake_case_ = LayoutLMvaModel.from_pretrained("""microsoft/layoutlmv3-base""" ).to(UpperCAmelCase_ )
snake_case_ = self.default_image_processor
snake_case_ = prepare_img()
snake_case_ = image_processor(images=UpperCAmelCase_ , return_tensors="""pt""" ).pixel_values.to(UpperCAmelCase_ )
snake_case_ = torch.tensor([[1, 2]] )
snake_case_ = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
snake_case_ = model(
input_ids=input_ids.to(UpperCAmelCase_ ) , bbox=bbox.to(UpperCAmelCase_ ) , pixel_values=pixel_values.to(UpperCAmelCase_ ) , )
# verify the logits
snake_case_ = torch.Size((1, 199, 768) )
self.assertEqual(outputs.last_hidden_state.shape , UpperCAmelCase_ )
snake_case_ = torch.tensor(
[[-0.0_529, 0.3_618, 0.1_632], [-0.1_587, -0.1_667, -0.0_400], [-0.1_557, -0.1_671, -0.0_505]] ).to(UpperCAmelCase_ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCAmelCase_ , atol=1E-4 ) )
| 347 |
"""simple docstring"""
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
__SCREAMING_SNAKE_CASE : List[str] = logging.getLogger(__name__)
__SCREAMING_SNAKE_CASE : str = tf.data.AUTOTUNE
def _a ( ) -> List[str]:
snake_case_ = argparse.ArgumentParser(description="""Train a masked language model on TPU.""" )
parser.add_argument(
"""--pretrained_model_config""" , type=_SCREAMING_SNAKE_CASE , default="""roberta-base""" , help="""The model config to use. Note that we don't copy the model's weights, only the config!""" , )
parser.add_argument(
"""--tokenizer""" , type=_SCREAMING_SNAKE_CASE , default="""unigram-tokenizer-wikitext""" , help="""The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model's vocab size.""" , )
parser.add_argument(
"""--per_replica_batch_size""" , type=_SCREAMING_SNAKE_CASE , default=8 , help="""Batch size per TPU core.""" , )
parser.add_argument(
"""--no_tpu""" , action="""store_true""" , help="""If set, run on CPU and don't try to initialize a TPU. Useful for debugging on non-TPU instances.""" , )
parser.add_argument(
"""--tpu_name""" , type=_SCREAMING_SNAKE_CASE , help="""Name of TPU resource to initialize. Should be blank on Colab, and 'local' on TPU VMs.""" , default="""local""" , )
parser.add_argument(
"""--tpu_zone""" , type=_SCREAMING_SNAKE_CASE , help="""Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.""" , )
parser.add_argument(
"""--gcp_project""" , type=_SCREAMING_SNAKE_CASE , help="""Google cloud project name. Only used for non-Colab TPU nodes.""" )
parser.add_argument(
"""--bfloat16""" , action="""store_true""" , help="""Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.""" , )
parser.add_argument(
"""--train_dataset""" , type=_SCREAMING_SNAKE_CASE , help="""Path to training dataset to load. If the path begins with `gs://`"""
""" then the dataset will be loaded from a Google Cloud Storage bucket.""" , )
parser.add_argument(
"""--shuffle_buffer_size""" , type=_SCREAMING_SNAKE_CASE , default=2**18 , help="""Size of the shuffle buffer (in samples)""" , )
parser.add_argument(
"""--eval_dataset""" , type=_SCREAMING_SNAKE_CASE , help="""Path to evaluation dataset to load. If the path begins with `gs://`"""
""" then the dataset will be loaded from a Google Cloud Storage bucket.""" , )
parser.add_argument(
"""--num_epochs""" , type=_SCREAMING_SNAKE_CASE , default=1 , help="""Number of epochs to train for.""" , )
parser.add_argument(
"""--learning_rate""" , type=_SCREAMING_SNAKE_CASE , default=1E-4 , help="""Learning rate to use for training.""" , )
parser.add_argument(
"""--weight_decay_rate""" , type=_SCREAMING_SNAKE_CASE , default=1E-3 , help="""Weight decay rate to use for training.""" , )
parser.add_argument(
"""--max_length""" , type=_SCREAMING_SNAKE_CASE , default=512 , help="""Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py""" , )
parser.add_argument(
"""--mlm_probability""" , type=_SCREAMING_SNAKE_CASE , default=0.15 , help="""Fraction of tokens to mask during training.""" , )
parser.add_argument("""--output_dir""" , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , help="""Path to save model checkpoints to.""" )
parser.add_argument("""--hub_model_id""" , type=_SCREAMING_SNAKE_CASE , help="""Model ID to upload to on the Hugging Face Hub.""" )
snake_case_ = parser.parse_args()
return args
def _a ( _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
try:
if args.tpu_name:
snake_case_ = tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name , zone=args.tpu_zone , project=args.gcp_project )
else:
snake_case_ = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
"""Couldn't connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or """
"""--gcp_project. When running on a TPU VM, use --tpu_name local.""" )
tf.config.experimental_connect_to_cluster(_SCREAMING_SNAKE_CASE )
tf.tpu.experimental.initialize_tpu_system(_SCREAMING_SNAKE_CASE )
return tpu
def _a ( _SCREAMING_SNAKE_CASE ) -> List[str]:
snake_case_ = 0
for file in file_list:
snake_case_ = file.split("""/""" )[-1]
snake_case_ = re.search(r"""-\d+-(\d+)\.tfrecord""" , _SCREAMING_SNAKE_CASE ).group(1 )
snake_case_ = int(_SCREAMING_SNAKE_CASE )
num_samples += sample_count
return num_samples
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> Union[str, Any]:
snake_case_ = count_samples(_SCREAMING_SNAKE_CASE )
snake_case_ = tf.data.Dataset.from_tensor_slices(_SCREAMING_SNAKE_CASE )
if shuffle:
snake_case_ = dataset.shuffle(len(_SCREAMING_SNAKE_CASE ) )
snake_case_ = tf.data.TFRecordDataset(_SCREAMING_SNAKE_CASE , num_parallel_reads=_SCREAMING_SNAKE_CASE )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
snake_case_ = dataset.apply(tf.data.experimental.assert_cardinality(_SCREAMING_SNAKE_CASE ) )
snake_case_ = dataset.map(_SCREAMING_SNAKE_CASE , num_parallel_calls=_SCREAMING_SNAKE_CASE )
if shuffle:
assert shuffle_buffer_size is not None
snake_case_ = dataset.shuffle(args.shuffle_buffer_size )
snake_case_ = dataset.batch(_SCREAMING_SNAKE_CASE , drop_remainder=_SCREAMING_SNAKE_CASE )
snake_case_ = dataset.map(_SCREAMING_SNAKE_CASE , num_parallel_calls=_SCREAMING_SNAKE_CASE )
snake_case_ = dataset.prefetch(_SCREAMING_SNAKE_CASE )
return dataset
def _a ( _SCREAMING_SNAKE_CASE ) -> List[Any]:
if not args.no_tpu:
snake_case_ = initialize_tpu(_SCREAMING_SNAKE_CASE )
snake_case_ = tf.distribute.TPUStrategy(_SCREAMING_SNAKE_CASE )
else:
snake_case_ = tf.distribute.OneDeviceStrategy(device="""/gpu:0""" )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy("""mixed_bfloat16""" )
snake_case_ = AutoTokenizer.from_pretrained(args.tokenizer )
snake_case_ = AutoConfig.from_pretrained(args.pretrained_model_config )
snake_case_ = tokenizer.vocab_size
snake_case_ = tf.io.gfile.glob(os.path.join(args.train_dataset , """*.tfrecord""" ) )
if not training_records:
raise ValueError(f"""No .tfrecord files found in {args.train_dataset}.""" )
snake_case_ = tf.io.gfile.glob(os.path.join(args.eval_dataset , """*.tfrecord""" ) )
if not eval_records:
raise ValueError(f"""No .tfrecord files found in {args.eval_dataset}.""" )
snake_case_ = count_samples(_SCREAMING_SNAKE_CASE )
snake_case_ = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
snake_case_ = steps_per_epoch * args.num_epochs
with strategy.scope():
snake_case_ = TFAutoModelForMaskedLM.from_config(_SCREAMING_SNAKE_CASE )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
snake_case_ , snake_case_ = create_optimizer(
num_train_steps=_SCREAMING_SNAKE_CASE , num_warmup_steps=total_train_steps // 20 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=_SCREAMING_SNAKE_CASE , metrics=["""accuracy"""] )
def decode_fn(_SCREAMING_SNAKE_CASE ):
snake_case_ = {
"""input_ids""": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
"""attention_mask""": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
}
return tf.io.parse_single_example(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
snake_case_ = DataCollatorForLanguageModeling(
tokenizer=_SCREAMING_SNAKE_CASE , mlm_probability=args.mlm_probability , mlm=_SCREAMING_SNAKE_CASE , return_tensors="""tf""" )
def mask_with_collator(_SCREAMING_SNAKE_CASE ):
# TF really needs an isin() function
snake_case_ = (
~tf.cast(batch["""attention_mask"""] , tf.bool )
| (batch["""input_ids"""] == tokenizer.cls_token_id)
| (batch["""input_ids"""] == tokenizer.sep_token_id)
)
snake_case_ , snake_case_ = data_collator.tf_mask_tokens(
batch["""input_ids"""] , vocab_size=len(_SCREAMING_SNAKE_CASE ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=_SCREAMING_SNAKE_CASE , )
return batch
snake_case_ = args.per_replica_batch_size * strategy.num_replicas_in_sync
snake_case_ = prepare_dataset(
_SCREAMING_SNAKE_CASE , decode_fn=_SCREAMING_SNAKE_CASE , mask_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , shuffle=_SCREAMING_SNAKE_CASE , shuffle_buffer_size=args.shuffle_buffer_size , )
snake_case_ = prepare_dataset(
_SCREAMING_SNAKE_CASE , decode_fn=_SCREAMING_SNAKE_CASE , mask_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , shuffle=_SCREAMING_SNAKE_CASE , )
snake_case_ = []
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=_SCREAMING_SNAKE_CASE ) )
model.fit(
_SCREAMING_SNAKE_CASE , validation_data=_SCREAMING_SNAKE_CASE , epochs=args.num_epochs , callbacks=_SCREAMING_SNAKE_CASE , )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Union[str, Any] = parse_args()
main(args)
| 347 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
__SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : str = {
'microsoft/layoutlmv3-base': 'https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json',
}
class __A (snake_case__):
'''simple docstring'''
__lowercase: Dict = """layoutlmv3"""
def __init__( self : Optional[int] , UpperCAmelCase_ : Optional[int]=50_265 , UpperCAmelCase_ : Union[str, Any]=768 , UpperCAmelCase_ : List[str]=12 , UpperCAmelCase_ : str=12 , UpperCAmelCase_ : Optional[int]=3_072 , UpperCAmelCase_ : Tuple="gelu" , UpperCAmelCase_ : Any=0.1 , UpperCAmelCase_ : Any=0.1 , UpperCAmelCase_ : List[Any]=512 , UpperCAmelCase_ : Optional[int]=2 , UpperCAmelCase_ : int=0.02 , UpperCAmelCase_ : Optional[int]=1E-5 , UpperCAmelCase_ : int=1 , UpperCAmelCase_ : List[str]=0 , UpperCAmelCase_ : List[Any]=2 , UpperCAmelCase_ : str=1_024 , UpperCAmelCase_ : List[Any]=128 , UpperCAmelCase_ : Dict=128 , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : Optional[int]=32 , UpperCAmelCase_ : Dict=128 , UpperCAmelCase_ : List[str]=64 , UpperCAmelCase_ : List[str]=256 , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : Dict=224 , UpperCAmelCase_ : Optional[int]=3 , UpperCAmelCase_ : Optional[int]=16 , UpperCAmelCase_ : Optional[int]=None , **UpperCAmelCase_ : Any , ) ->List[str]:
"""simple docstring"""
super().__init__(
vocab_size=UpperCAmelCase_ , hidden_size=UpperCAmelCase_ , num_hidden_layers=UpperCAmelCase_ , num_attention_heads=UpperCAmelCase_ , intermediate_size=UpperCAmelCase_ , hidden_act=UpperCAmelCase_ , hidden_dropout_prob=UpperCAmelCase_ , attention_probs_dropout_prob=UpperCAmelCase_ , max_position_embeddings=UpperCAmelCase_ , type_vocab_size=UpperCAmelCase_ , initializer_range=UpperCAmelCase_ , layer_norm_eps=UpperCAmelCase_ , pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , **UpperCAmelCase_ , )
snake_case_ = max_ad_position_embeddings
snake_case_ = coordinate_size
snake_case_ = shape_size
snake_case_ = has_relative_attention_bias
snake_case_ = rel_pos_bins
snake_case_ = max_rel_pos
snake_case_ = has_spatial_attention_bias
snake_case_ = rel_ad_pos_bins
snake_case_ = max_rel_ad_pos
snake_case_ = text_embed
snake_case_ = visual_embed
snake_case_ = input_size
snake_case_ = num_channels
snake_case_ = patch_size
snake_case_ = classifier_dropout
class __A (snake_case__):
'''simple docstring'''
__lowercase: Optional[int] = version.parse("""1.12""")
@property
def lowerCAmelCase ( self : Tuple ) ->Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
("""bbox""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
else:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""bbox""", {0: """batch""", 1: """sequence"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels"""}),
] )
@property
def lowerCAmelCase ( self : str ) ->float:
"""simple docstring"""
return 1E-5
@property
def lowerCAmelCase ( self : Tuple ) ->int:
"""simple docstring"""
return 12
def lowerCAmelCase ( self : List[str] , UpperCAmelCase_ : "ProcessorMixin" , UpperCAmelCase_ : int = -1 , UpperCAmelCase_ : int = -1 , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : Optional["TensorType"] = None , UpperCAmelCase_ : int = 3 , UpperCAmelCase_ : int = 40 , UpperCAmelCase_ : int = 40 , ) ->Mapping[str, Any]:
"""simple docstring"""
setattr(processor.image_processor , """apply_ocr""" , UpperCAmelCase_ )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
snake_case_ = compute_effective_axis_dimension(
UpperCAmelCase_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
snake_case_ = processor.tokenizer.num_special_tokens_to_add(UpperCAmelCase_ )
snake_case_ = compute_effective_axis_dimension(
UpperCAmelCase_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=UpperCAmelCase_ )
# Generate dummy inputs according to compute batch and sequence
snake_case_ = [[""" """.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
snake_case_ = [[[48, 84, 73, 128]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
snake_case_ = self._generate_dummy_images(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = dict(
processor(
UpperCAmelCase_ , text=UpperCAmelCase_ , boxes=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , ) )
return inputs
| 347 |
"""simple docstring"""
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
if density <= 0:
raise ValueError("""Impossible fluid density""" )
if bulk_modulus <= 0:
raise ValueError("""Impossible bulk modulus""" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 347 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__SCREAMING_SNAKE_CASE : Tuple = {
'configuration_mobilenet_v2': [
'MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP',
'MobileNetV2Config',
'MobileNetV2OnnxConfig',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : List[Any] = ['MobileNetV2FeatureExtractor']
__SCREAMING_SNAKE_CASE : List[str] = ['MobileNetV2ImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Union[str, Any] = [
'MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST',
'MobileNetV2ForImageClassification',
'MobileNetV2ForSemanticSegmentation',
'MobileNetV2Model',
'MobileNetV2PreTrainedModel',
'load_tf_weights_in_mobilenet_v2',
]
if TYPE_CHECKING:
from .configuration_mobilenet_va import (
MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileNetVaConfig,
MobileNetVaOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilenet_va import MobileNetVaFeatureExtractor
from .image_processing_mobilenet_va import MobileNetVaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilenet_va import (
MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileNetVaForImageClassification,
MobileNetVaForSemanticSegmentation,
MobileNetVaModel,
MobileNetVaPreTrainedModel,
load_tf_weights_in_mobilenet_va,
)
else:
import sys
__SCREAMING_SNAKE_CASE : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 347 |
"""simple docstring"""
def _a ( _SCREAMING_SNAKE_CASE ) -> bool:
if num < 0:
return False
snake_case_ = num
snake_case_ = 0
while num > 0:
snake_case_ = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 347 | 1 |
"""simple docstring"""
def _a ( _SCREAMING_SNAKE_CASE ) -> list:
snake_case_ = len(_SCREAMING_SNAKE_CASE )
for i in range(1 , _SCREAMING_SNAKE_CASE ):
snake_case_ = collection[i]
snake_case_ = 0
snake_case_ = i - 1
while low <= high:
snake_case_ = (low + high) // 2
if val < collection[mid]:
snake_case_ = mid - 1
else:
snake_case_ = mid + 1
for j in range(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , -1 ):
snake_case_ = collection[j - 1]
snake_case_ = val
return collection
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Tuple = input('Enter numbers separated by a comma:\n').strip()
__SCREAMING_SNAKE_CASE : Optional[int] = [int(item) for item in user_input.split(',')]
print(binary_insertion_sort(unsorted))
| 347 |
"""simple docstring"""
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
__SCREAMING_SNAKE_CASE : Tuple = get_tests_dir('fixtures/test_sentencepiece_bpe_char.model')
@require_sentencepiece
@require_tokenizers
class __A (snake_case__ , unittest.TestCase):
'''simple docstring'''
__lowercase: Tuple = SpeechTaTokenizer
__lowercase: int = False
__lowercase: List[str] = True
def lowerCAmelCase ( self : Any ) ->str:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
snake_case_ = SpeechTaTokenizer(UpperCAmelCase_ )
snake_case_ = AddedToken("""<mask>""" , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ )
snake_case_ = mask_token
tokenizer.add_special_tokens({"""mask_token""": mask_token} )
tokenizer.add_tokens(["""<ctc_blank>"""] )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase ( self : Optional[Any] , UpperCAmelCase_ : Optional[Any] ) ->Dict:
"""simple docstring"""
snake_case_ = """this is a test"""
snake_case_ = """this is a test"""
return input_text, output_text
def lowerCAmelCase ( self : str , UpperCAmelCase_ : int , UpperCAmelCase_ : Any=False , UpperCAmelCase_ : Tuple=20 , UpperCAmelCase_ : Dict=5 ) ->List[Any]:
"""simple docstring"""
snake_case_ , snake_case_ = self.get_input_output_texts(UpperCAmelCase_ )
snake_case_ = tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ )
snake_case_ = tokenizer.decode(UpperCAmelCase_ , clean_up_tokenization_spaces=UpperCAmelCase_ )
return text, ids
def lowerCAmelCase ( self : Union[str, Any] ) ->Optional[Any]:
"""simple docstring"""
snake_case_ = """<pad>"""
snake_case_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase_ ) , UpperCAmelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase_ ) , UpperCAmelCase_ )
def lowerCAmelCase ( self : int ) ->str:
"""simple docstring"""
snake_case_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-4] , """œ""" )
self.assertEqual(vocab_keys[-2] , """<mask>""" )
self.assertEqual(vocab_keys[-1] , """<ctc_blank>""" )
self.assertEqual(len(UpperCAmelCase_ ) , 81 )
def lowerCAmelCase ( self : Optional[int] ) ->int:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 79 )
def lowerCAmelCase ( self : Optional[int] ) ->List[Any]:
"""simple docstring"""
snake_case_ = self.get_tokenizers(do_lower_case=UpperCAmelCase_ )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
snake_case_ = tokenizer.vocab_size
snake_case_ = len(UpperCAmelCase_ )
self.assertNotEqual(UpperCAmelCase_ , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
snake_case_ = ["""aaaaa bbbbbb""", """cccccccccdddddddd"""]
snake_case_ = tokenizer.add_tokens(UpperCAmelCase_ )
snake_case_ = tokenizer.vocab_size
snake_case_ = len(UpperCAmelCase_ )
self.assertNotEqual(UpperCAmelCase_ , 0 )
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ )
self.assertEqual(UpperCAmelCase_ , len(UpperCAmelCase_ ) )
self.assertEqual(UpperCAmelCase_ , all_size + len(UpperCAmelCase_ ) )
snake_case_ = tokenizer.encode("""aaaaa bbbbbb low cccccccccdddddddd l""" , add_special_tokens=UpperCAmelCase_ )
self.assertGreaterEqual(len(UpperCAmelCase_ ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
snake_case_ = {"""eos_token""": """>>>>|||<||<<|<<""", """pad_token""": """<<<<<|||>|>>>>|>"""}
snake_case_ = tokenizer.add_special_tokens(UpperCAmelCase_ )
snake_case_ = tokenizer.vocab_size
snake_case_ = len(UpperCAmelCase_ )
self.assertNotEqual(UpperCAmelCase_ , 0 )
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ )
self.assertEqual(UpperCAmelCase_ , len(UpperCAmelCase_ ) )
self.assertEqual(UpperCAmelCase_ , all_size_a + len(UpperCAmelCase_ ) )
snake_case_ = tokenizer.encode(
""">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l""" , add_special_tokens=UpperCAmelCase_ )
self.assertGreaterEqual(len(UpperCAmelCase_ ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
def lowerCAmelCase ( self : Optional[Any] ) ->Tuple:
"""simple docstring"""
pass
def lowerCAmelCase ( self : List[str] ) ->Optional[Any]:
"""simple docstring"""
pass
def lowerCAmelCase ( self : List[str] ) ->List[str]:
"""simple docstring"""
snake_case_ = self.get_tokenizer()
snake_case_ = tokenizer.tokenize("""This is a test""" )
# fmt: off
self.assertListEqual(UpperCAmelCase_ , [SPIECE_UNDERLINE, """T""", """h""", """i""", """s""", SPIECE_UNDERLINE, """i""", """s""", SPIECE_UNDERLINE, """a""", SPIECE_UNDERLINE, """t""", """e""", """s""", """t"""] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , )
snake_case_ = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
UpperCAmelCase_ , [SPIECE_UNDERLINE, """I""", SPIECE_UNDERLINE, """w""", """a""", """s""", SPIECE_UNDERLINE, """b""", """o""", """r""", """n""", SPIECE_UNDERLINE, """i""", """n""", SPIECE_UNDERLINE, """92000""", """,""", SPIECE_UNDERLINE, """a""", """n""", """d""", SPIECE_UNDERLINE, """t""", """h""", """i""", """s""", SPIECE_UNDERLINE, """i""", """s""", SPIECE_UNDERLINE, """f""", """a""", """l""", """s""", """é""", """."""] )
snake_case_ = tokenizer.convert_tokens_to_ids(UpperCAmelCase_ )
# fmt: off
self.assertListEqual(UpperCAmelCase_ , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] )
# fmt: on
snake_case_ = tokenizer.convert_ids_to_tokens(UpperCAmelCase_ )
self.assertListEqual(
UpperCAmelCase_ , [SPIECE_UNDERLINE, """I""", SPIECE_UNDERLINE, """w""", """a""", """s""", SPIECE_UNDERLINE, """b""", """o""", """r""", """n""", SPIECE_UNDERLINE, """i""", """n""", SPIECE_UNDERLINE, """<unk>""", """,""", SPIECE_UNDERLINE, """a""", """n""", """d""", SPIECE_UNDERLINE, """t""", """h""", """i""", """s""", SPIECE_UNDERLINE, """i""", """s""", SPIECE_UNDERLINE, """f""", """a""", """l""", """s""", """é""", """."""] )
@slow
def lowerCAmelCase ( self : str ) ->Dict:
"""simple docstring"""
snake_case_ = [
"""Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides """
"""general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural """
"""Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained """
"""models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.""",
"""BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly """
"""conditioning on both left and right context in all layers.""",
"""The quick brown fox jumps over the lazy dog.""",
]
# fmt: off
snake_case_ = {
"""input_ids""": [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
"""attention_mask""": [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase_ , model_name="""microsoft/speecht5_asr""" , revision="""c5ef64c71905caeccde0e4462ef3f9077224c524""" , sequences=UpperCAmelCase_ , )
| 347 | 1 |
"""simple docstring"""
from __future__ import annotations
from decimal import Decimal
from numpy import array
def _a ( _SCREAMING_SNAKE_CASE ) -> list[list[float]]:
snake_case_ = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(_SCREAMING_SNAKE_CASE ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
snake_case_ = float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError("""This matrix has no inverse.""" )
# Creates a copy of the matrix with swapped positions of the elements
snake_case_ = [[0.0, 0.0], [0.0, 0.0]]
snake_case_ , snake_case_ = matrix[1][1], matrix[0][0]
snake_case_ , snake_case_ = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(_SCREAMING_SNAKE_CASE ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(_SCREAMING_SNAKE_CASE ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
snake_case_ = float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError("""This matrix has no inverse.""" )
# Creating cofactor matrix
snake_case_ = [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
snake_case_ = (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
snake_case_ = -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
snake_case_ = (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
snake_case_ = -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
snake_case_ = (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
snake_case_ = -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
snake_case_ = (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
snake_case_ = -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
snake_case_ = (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
snake_case_ = array(_SCREAMING_SNAKE_CASE )
for i in range(3 ):
for j in range(3 ):
snake_case_ = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
snake_case_ = array(_SCREAMING_SNAKE_CASE )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(_SCREAMING_SNAKE_CASE )
# Calculate the inverse of the matrix
return [[float(d(_SCREAMING_SNAKE_CASE ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError("""Please provide a matrix of size 2x2 or 3x3.""" )
| 347 |
"""simple docstring"""
import datasets
__SCREAMING_SNAKE_CASE : Tuple = '\\n@InProceedings{conneau2018xnli,\n author = "Conneau, Alexis\n and Rinott, Ruty\n and Lample, Guillaume\n and Williams, Adina\n and Bowman, Samuel R.\n and Schwenk, Holger\n and Stoyanov, Veselin",\n title = "XNLI: Evaluating Cross-lingual Sentence Representations",\n booktitle = "Proceedings of the 2018 Conference on Empirical Methods\n in Natural Language Processing",\n year = "2018",\n publisher = "Association for Computational Linguistics",\n location = "Brussels, Belgium",\n}\n'
__SCREAMING_SNAKE_CASE : Dict = '\\nXNLI is a subset of a few thousand examples from MNLI which has been translated\ninto a 14 different languages (some low-ish resource). As with MNLI, the goal is\nto predict textual entailment (does sentence A imply/contradict/neither sentence\nB) and is a classification task (given two sentences, predict one of three\nlabels).\n'
__SCREAMING_SNAKE_CASE : List[str] = '\nComputes XNLI score which is just simple accuracy.\nArgs:\n predictions: Predicted labels.\n references: Ground truth labels.\nReturns:\n \'accuracy\': accuracy\nExamples:\n\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> xnli_metric = datasets.load_metric("xnli")\n >>> results = xnli_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n'
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]:
return (preds == labels).mean()
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class __A (datasets.Metric):
'''simple docstring'''
def lowerCAmelCase ( self : str ) ->Any:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""int64""" if self.config_name != """sts-b""" else """float32""" ),
"""references""": datasets.Value("""int64""" if self.config_name != """sts-b""" else """float32""" ),
} ) , codebase_urls=[] , reference_urls=[] , format="""numpy""" , )
def lowerCAmelCase ( self : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any ) ->int:
"""simple docstring"""
return {"accuracy": simple_accuracy(UpperCAmelCase_ , UpperCAmelCase_ )}
| 347 | 1 |
"""simple docstring"""
def _a ( _SCREAMING_SNAKE_CASE ) -> list[int]:
snake_case_ = [0 for i in range(len(_SCREAMING_SNAKE_CASE ) )]
# initialize interval's left pointer and right pointer
snake_case_ , snake_case_ = 0, 0
for i in range(1 , len(_SCREAMING_SNAKE_CASE ) ):
# case when current index is inside the interval
if i <= right_pointer:
snake_case_ = min(right_pointer - i + 1 , z_result[i - left_pointer] )
snake_case_ = min_edge
while go_next(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
snake_case_ , snake_case_ = i, i + z_result[i] - 1
return z_result
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> bool:
return i + z_result[i] < len(_SCREAMING_SNAKE_CASE ) and s[z_result[i]] == s[i + z_result[i]]
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
snake_case_ = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
snake_case_ = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(_SCREAMING_SNAKE_CASE ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod()
| 347 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: List[Any] = ["""sentencepiece"""]
def __init__( self : int , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : List[str] ) ->List[Any]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: Optional[int] = ["""sentencepiece"""]
def __init__( self : Optional[int] , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Tuple ) ->Dict:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: Any = ["""sentencepiece"""]
def __init__( self : Any , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : List[Any] ) ->List[Any]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: Dict = ["""sentencepiece"""]
def __init__( self : List[str] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : int ) ->Optional[int]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: List[str] = ["""sentencepiece"""]
def __init__( self : Dict , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : int ) ->List[str]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: int = ["""sentencepiece"""]
def __init__( self : Tuple , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Dict ) ->Optional[int]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: int = ["""sentencepiece"""]
def __init__( self : int , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Union[str, Any] ) ->Dict:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: Optional[Any] = ["""sentencepiece"""]
def __init__( self : List[str] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : Optional[Any] ) ->Union[str, Any]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: Optional[int] = ["""sentencepiece"""]
def __init__( self : Optional[Any] , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : List[Any] ) ->Tuple:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: Any = ["""sentencepiece"""]
def __init__( self : List[Any] , *UpperCAmelCase_ : str , **UpperCAmelCase_ : int ) ->List[Any]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: Any = ["""sentencepiece"""]
def __init__( self : int , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : List[Any] ) ->Tuple:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: int = ["""sentencepiece"""]
def __init__( self : Optional[int] , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : Union[str, Any] ) ->Union[str, Any]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: Tuple = ["""sentencepiece"""]
def __init__( self : Dict , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : List[Any] ) ->Dict:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: Union[str, Any] = ["""sentencepiece"""]
def __init__( self : List[Any] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : List[Any] ) ->Optional[int]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: int = ["""sentencepiece"""]
def __init__( self : int , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : List[str] ) ->Union[str, Any]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: Tuple = ["""sentencepiece"""]
def __init__( self : int , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Optional[Any] ) ->str:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: List[Any] = ["""sentencepiece"""]
def __init__( self : Dict , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Optional[Any] ) ->int:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: Union[str, Any] = ["""sentencepiece"""]
def __init__( self : Optional[Any] , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Optional[int] ) ->Optional[int]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: Any = ["""sentencepiece"""]
def __init__( self : Optional[Any] , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Dict ) ->Dict:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: Any = ["""sentencepiece"""]
def __init__( self : List[Any] , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : List[str] ) ->List[Any]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: int = ["""sentencepiece"""]
def __init__( self : Union[str, Any] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Optional[Any] ) ->List[str]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: int = ["""sentencepiece"""]
def __init__( self : Union[str, Any] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : List[Any] ) ->Union[str, Any]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: List[Any] = ["""sentencepiece"""]
def __init__( self : Any , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Optional[Any] ) ->List[str]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: Dict = ["""sentencepiece"""]
def __init__( self : int , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Union[str, Any] ) ->List[Any]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: List[Any] = ["""sentencepiece"""]
def __init__( self : List[Any] , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Optional[int] ) ->Optional[Any]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: Union[str, Any] = ["""sentencepiece"""]
def __init__( self : Union[str, Any] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : str ) ->Optional[int]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: Optional[int] = ["""sentencepiece"""]
def __init__( self : Tuple , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Optional[int] ) ->Dict:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: Dict = ["""sentencepiece"""]
def __init__( self : Optional[int] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : List[str] ) ->Optional[Any]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: int = ["""sentencepiece"""]
def __init__( self : Dict , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Optional[int] ) ->Any:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: List[str] = ["""sentencepiece"""]
def __init__( self : List[str] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Union[str, Any] ) ->Optional[Any]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: Any = ["""sentencepiece"""]
def __init__( self : List[str] , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Optional[int] ) ->str:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
| 347 | 1 |
"""simple docstring"""
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> np.ndarray:
# prepare kernel
# the kernel size have to be odd
if (ksize % 2) == 0:
snake_case_ = ksize + 1
snake_case_ = np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(_SCREAMING_SNAKE_CASE ):
for x in range(_SCREAMING_SNAKE_CASE ):
# distance from center
snake_case_ = x - ksize // 2
snake_case_ = y - ksize // 2
# degree to radiant
snake_case_ = theta / 180 * np.pi
snake_case_ = np.cos(_theta )
snake_case_ = np.sin(_theta )
# get kernel x
snake_case_ = cos_theta * px + sin_theta * py
# get kernel y
snake_case_ = -sin_theta * px + cos_theta * py
# fill kernel
snake_case_ = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
__SCREAMING_SNAKE_CASE : int = imread('../image_data/lena.jpg')
# turn image in gray scale value
__SCREAMING_SNAKE_CASE : Optional[Any] = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
__SCREAMING_SNAKE_CASE : Optional[Any] = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
__SCREAMING_SNAKE_CASE : Optional[Any] = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
__SCREAMING_SNAKE_CASE : Any = out / out.max() * 255
__SCREAMING_SNAKE_CASE : Any = out.astype(np.uinta)
imshow('Original', gray)
imshow('Gabor filter with 20x20 mask and 6 directions', out)
waitKey(0)
| 347 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
__SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
class __A (snake_case__):
'''simple docstring'''
def __init__( self : str , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : int ) ->None:
"""simple docstring"""
warnings.warn(
"""The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use MobileViTImageProcessor instead.""" , UpperCAmelCase_ , )
super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_ )
| 347 | 1 |
"""simple docstring"""
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def _a ( _SCREAMING_SNAKE_CASE ) -> Dict:
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class __A (snake_case__):
'''simple docstring'''
@staticmethod
def lowerCAmelCase ( UpperCAmelCase_ : ArgumentParser ) ->Optional[Any]:
"""simple docstring"""
snake_case_ = parser.add_parser("""download""" )
download_parser.add_argument(
"""--cache-dir""" , type=UpperCAmelCase_ , default=UpperCAmelCase_ , help="""Path to location to store the models""" )
download_parser.add_argument(
"""--force""" , action="""store_true""" , help="""Force the model to be download even if already in cache-dir""" )
download_parser.add_argument(
"""--trust-remote-code""" , action="""store_true""" , help="""Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you've reviewed the code as it will execute on your local machine""" , )
download_parser.add_argument("""model""" , type=UpperCAmelCase_ , help="""Name of the model to download""" )
download_parser.set_defaults(func=UpperCAmelCase_ )
def __init__( self : Optional[int] , UpperCAmelCase_ : str , UpperCAmelCase_ : str , UpperCAmelCase_ : bool , UpperCAmelCase_ : bool ) ->Union[str, Any]:
"""simple docstring"""
snake_case_ = model
snake_case_ = cache
snake_case_ = force
snake_case_ = trust_remote_code
def lowerCAmelCase ( self : Any ) ->List[Any]:
"""simple docstring"""
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
| 347 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Any:
snake_case_ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""blocks.{i}.norm1.weight""", f"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""blocks.{i}.norm1.bias""", f"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((f"""blocks.{i}.attn.proj.weight""", f"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.attn.proj.bias""", f"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""blocks.{i}.norm2.weight""", f"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""blocks.{i}.norm2.bias""", f"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.weight""", f"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.bias""", f"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.weight""", f"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.bias""", f"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
("""cls_token""", """vit.embeddings.cls_token"""),
("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""),
("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""),
("""pos_embed""", """vit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
("""pre_logits.fc.weight""", """pooler.dense.weight"""),
("""pre_logits.fc.bias""", """pooler.dense.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
snake_case_ = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Tuple:
for i in range(config.num_hidden_layers ):
if base_model:
snake_case_ = """"""
else:
snake_case_ = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
snake_case_ = state_dict.pop(f"""blocks.{i}.attn.qkv.weight""" )
snake_case_ = state_dict.pop(f"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
snake_case_ = in_proj_weight[
: config.hidden_size, :
]
snake_case_ = in_proj_bias[: config.hidden_size]
snake_case_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case_ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
snake_case_ = in_proj_weight[
-config.hidden_size :, :
]
snake_case_ = in_proj_bias[-config.hidden_size :]
def _a ( _SCREAMING_SNAKE_CASE ) -> List[str]:
snake_case_ = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
snake_case_ = dct.pop(_SCREAMING_SNAKE_CASE )
snake_case_ = val
def _a ( ) -> Any:
snake_case_ = """http://images.cocodataset.org/val2017/000000039769.jpg"""
snake_case_ = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
snake_case_ = ViTConfig()
snake_case_ = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
snake_case_ = True
snake_case_ = int(vit_name[-12:-10] )
snake_case_ = int(vit_name[-9:-6] )
else:
snake_case_ = 1_000
snake_case_ = """huggingface/label-files"""
snake_case_ = """imagenet-1k-id2label.json"""
snake_case_ = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="""dataset""" ) , """r""" ) )
snake_case_ = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
snake_case_ = idalabel
snake_case_ = {v: k for k, v in idalabel.items()}
snake_case_ = int(vit_name[-6:-4] )
snake_case_ = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith("""tiny""" ):
snake_case_ = 192
snake_case_ = 768
snake_case_ = 12
snake_case_ = 3
elif vit_name[9:].startswith("""small""" ):
snake_case_ = 384
snake_case_ = 1_536
snake_case_ = 12
snake_case_ = 6
else:
pass
else:
if vit_name[4:].startswith("""small""" ):
snake_case_ = 768
snake_case_ = 2_304
snake_case_ = 8
snake_case_ = 8
elif vit_name[4:].startswith("""base""" ):
pass
elif vit_name[4:].startswith("""large""" ):
snake_case_ = 1_024
snake_case_ = 4_096
snake_case_ = 24
snake_case_ = 16
elif vit_name[4:].startswith("""huge""" ):
snake_case_ = 1_280
snake_case_ = 5_120
snake_case_ = 32
snake_case_ = 16
# load original model from timm
snake_case_ = timm.create_model(_SCREAMING_SNAKE_CASE , pretrained=_SCREAMING_SNAKE_CASE )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
snake_case_ = timm_model.state_dict()
if base_model:
remove_classification_head_(_SCREAMING_SNAKE_CASE )
snake_case_ = create_rename_keys(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for src, dest in rename_keys:
rename_key(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
read_in_q_k_v(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# load HuggingFace model
if vit_name[-5:] == "in21k":
snake_case_ = ViTModel(_SCREAMING_SNAKE_CASE ).eval()
else:
snake_case_ = ViTForImageClassification(_SCREAMING_SNAKE_CASE ).eval()
model.load_state_dict(_SCREAMING_SNAKE_CASE )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
snake_case_ = DeiTImageProcessor(size=config.image_size )
else:
snake_case_ = ViTImageProcessor(size=config.image_size )
snake_case_ = image_processor(images=prepare_img() , return_tensors="""pt""" )
snake_case_ = encoding["""pixel_values"""]
snake_case_ = model(_SCREAMING_SNAKE_CASE )
if base_model:
snake_case_ = timm_model.forward_features(_SCREAMING_SNAKE_CASE )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(_SCREAMING_SNAKE_CASE , outputs.pooler_output , atol=1E-3 )
else:
snake_case_ = timm_model(_SCREAMING_SNAKE_CASE )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_SCREAMING_SNAKE_CASE , outputs.logits , atol=1E-3 )
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
print(f"""Saving model {vit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--vit_name',
default='vit_base_patch16_224',
type=str,
help='Name of the ViT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
__SCREAMING_SNAKE_CASE : int = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 347 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : List[Any] = {
'vinvino02/glpn-kitti': 'https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json',
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class __A (snake_case__):
'''simple docstring'''
__lowercase: Union[str, Any] = """glpn"""
def __init__( self : List[Any] , UpperCAmelCase_ : List[Any]=3 , UpperCAmelCase_ : List[str]=4 , UpperCAmelCase_ : List[str]=[2, 2, 2, 2] , UpperCAmelCase_ : List[Any]=[8, 4, 2, 1] , UpperCAmelCase_ : Optional[Any]=[32, 64, 160, 256] , UpperCAmelCase_ : Optional[int]=[7, 3, 3, 3] , UpperCAmelCase_ : Union[str, Any]=[4, 2, 2, 2] , UpperCAmelCase_ : Any=[1, 2, 5, 8] , UpperCAmelCase_ : Optional[Any]=[4, 4, 4, 4] , UpperCAmelCase_ : List[Any]="gelu" , UpperCAmelCase_ : Any=0.0 , UpperCAmelCase_ : List[Any]=0.0 , UpperCAmelCase_ : Dict=0.02 , UpperCAmelCase_ : Tuple=0.1 , UpperCAmelCase_ : Optional[int]=1E-6 , UpperCAmelCase_ : List[str]=64 , UpperCAmelCase_ : Optional[int]=10 , UpperCAmelCase_ : Optional[int]=-1 , **UpperCAmelCase_ : List[str] , ) ->str:
"""simple docstring"""
super().__init__(**UpperCAmelCase_ )
snake_case_ = num_channels
snake_case_ = num_encoder_blocks
snake_case_ = depths
snake_case_ = sr_ratios
snake_case_ = hidden_sizes
snake_case_ = patch_sizes
snake_case_ = strides
snake_case_ = mlp_ratios
snake_case_ = num_attention_heads
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = initializer_range
snake_case_ = drop_path_rate
snake_case_ = layer_norm_eps
snake_case_ = decoder_hidden_size
snake_case_ = max_depth
snake_case_ = head_in_index
| 347 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class __A (unittest.TestCase):
'''simple docstring'''
def __init__( self : List[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple=13 , UpperCAmelCase_ : List[Any]=7 , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Dict=True , UpperCAmelCase_ : int=True , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Dict=99 , UpperCAmelCase_ : str=32 , UpperCAmelCase_ : Tuple=5 , UpperCAmelCase_ : Union[str, Any]=4 , UpperCAmelCase_ : Any=37 , UpperCAmelCase_ : int="gelu" , UpperCAmelCase_ : Any=0.1 , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : Dict=512 , UpperCAmelCase_ : Optional[Any]=16 , UpperCAmelCase_ : Dict=2 , UpperCAmelCase_ : str=0.02 , UpperCAmelCase_ : str=4 , ) ->Tuple:
"""simple docstring"""
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = seq_length
snake_case_ = is_training
snake_case_ = use_attention_mask
snake_case_ = use_token_type_ids
snake_case_ = use_labels
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = type_sequence_label_size
snake_case_ = initializer_range
snake_case_ = num_choices
def lowerCAmelCase ( self : Optional[int] ) ->str:
"""simple docstring"""
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ = None
if self.use_attention_mask:
snake_case_ = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ = None
if self.use_token_type_ids:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase_ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCAmelCase ( self : List[str] ) ->Dict:
"""simple docstring"""
snake_case_ = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ , snake_case_ = config_and_inputs
snake_case_ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class __A (snake_case__ , unittest.TestCase):
'''simple docstring'''
__lowercase: Union[str, Any] = True
__lowercase: int = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCAmelCase ( self : Optional[Any] ) ->Tuple:
"""simple docstring"""
snake_case_ = FlaxRoFormerModelTester(self )
@slow
def lowerCAmelCase ( self : Any ) ->List[str]:
"""simple docstring"""
for model_class_name in self.all_model_classes:
snake_case_ = model_class_name.from_pretrained("""junnyu/roformer_chinese_small""" , from_pt=UpperCAmelCase_ )
snake_case_ = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCAmelCase_ )
@require_flax
class __A (unittest.TestCase):
'''simple docstring'''
@slow
def lowerCAmelCase ( self : str ) ->Dict:
"""simple docstring"""
snake_case_ = FlaxRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" )
snake_case_ = jnp.array([[0, 1, 2, 3, 4, 5]] )
snake_case_ = model(UpperCAmelCase_ )[0]
snake_case_ = 50_000
snake_case_ = (1, 6, vocab_size)
self.assertEqual(output.shape , UpperCAmelCase_ )
snake_case_ = jnp.array(
[[[-0.1_205, -1.0_265, 0.2_922], [-1.5_134, 0.1_974, 0.1_519], [-5.0_135, -3.9_003, -0.8_404]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , UpperCAmelCase_ , atol=1E-4 ) )
| 347 | 1 |
"""simple docstring"""
import math
from collections.abc import Iterator
from itertools import takewhile
def _a ( _SCREAMING_SNAKE_CASE ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_SCREAMING_SNAKE_CASE ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _a ( ) -> Iterator[int]:
snake_case_ = 2
while True:
if is_prime(_SCREAMING_SNAKE_CASE ):
yield num
num += 1
def _a ( _SCREAMING_SNAKE_CASE = 2_000_000 ) -> int:
return sum(takewhile(lambda _SCREAMING_SNAKE_CASE : x < n , prime_generator() ) )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 347 |
"""simple docstring"""
from __future__ import annotations
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> bool:
snake_case_ = get_failure_array(_SCREAMING_SNAKE_CASE )
# 2) Step through text searching for pattern
snake_case_ , snake_case_ = 0, 0 # index into text, pattern
while i < len(_SCREAMING_SNAKE_CASE ):
if pattern[j] == text[i]:
if j == (len(_SCREAMING_SNAKE_CASE ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
snake_case_ = failure[j - 1]
continue
i += 1
return False
def _a ( _SCREAMING_SNAKE_CASE ) -> list[int]:
snake_case_ = [0]
snake_case_ = 0
snake_case_ = 1
while j < len(_SCREAMING_SNAKE_CASE ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
snake_case_ = failure[i - 1]
continue
j += 1
failure.append(_SCREAMING_SNAKE_CASE )
return failure
if __name__ == "__main__":
# Test 1)
__SCREAMING_SNAKE_CASE : Optional[int] = 'abc1abc12'
__SCREAMING_SNAKE_CASE : Optional[int] = 'alskfjaldsabc1abc1abc12k23adsfabcabc'
__SCREAMING_SNAKE_CASE : List[str] = 'alskfjaldsk23adsfabcabc'
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
__SCREAMING_SNAKE_CASE : int = 'ABABX'
__SCREAMING_SNAKE_CASE : Optional[Any] = 'ABABZABABYABABX'
assert kmp(pattern, text)
# Test 3)
__SCREAMING_SNAKE_CASE : Any = 'AAAB'
__SCREAMING_SNAKE_CASE : List[Any] = 'ABAAAAAB'
assert kmp(pattern, text)
# Test 4)
__SCREAMING_SNAKE_CASE : Optional[int] = 'abcdabcy'
__SCREAMING_SNAKE_CASE : str = 'abcxabcdabxabcdabcdabcy'
assert kmp(pattern, text)
# Test 5)
__SCREAMING_SNAKE_CASE : Any = 'aabaabaaa'
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 347 | 1 |
"""simple docstring"""
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
__SCREAMING_SNAKE_CASE : List[str] = '\\n@inproceedings{lin-2004-rouge,\n title = "{ROUGE}: A Package for Automatic Evaluation of Summaries",\n author = "Lin, Chin-Yew",\n booktitle = "Text Summarization Branches Out",\n month = jul,\n year = "2004",\n address = "Barcelona, Spain",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W04-1013",\n pages = "74--81",\n}\n'
__SCREAMING_SNAKE_CASE : Union[str, Any] = '\\nROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for\nevaluating automatic summarization and machine translation software in natural language processing.\nThe metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.\n\nNote that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.\n\nThis metrics is a wrapper around Google Research reimplementation of ROUGE:\nhttps://github.com/google-research/google-research/tree/master/rouge\n'
__SCREAMING_SNAKE_CASE : str = '\nCalculates average rouge scores for a list of hypotheses and references\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n rouge_types: A list of rouge types to calculate.\n Valid names:\n `"rouge{n}"` (e.g. `"rouge1"`, `"rouge2"`) where: {n} is the n-gram based scoring,\n `"rougeL"`: Longest common subsequence based scoring.\n `"rougeLSum"`: rougeLsum splits text using `"\n"`.\n See details in https://github.com/huggingface/datasets/issues/617\n use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.\n use_aggregator: Return aggregates if this is set to True\nReturns:\n rouge1: rouge_1 (precision, recall, f1),\n rouge2: rouge_2 (precision, recall, f1),\n rougeL: rouge_l (precision, recall, f1),\n rougeLsum: rouge_lsum (precision, recall, f1)\nExamples:\n\n >>> rouge = datasets.load_metric(\'rouge\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> results = rouge.compute(predictions=predictions, references=references)\n >>> print(list(results.keys()))\n [\'rouge1\', \'rouge2\', \'rougeL\', \'rougeLsum\']\n >>> print(results["rouge1"])\n AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))\n >>> print(results["rouge1"].mid.fmeasure)\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class __A (datasets.Metric):
'''simple docstring'''
def lowerCAmelCase ( self : int ) ->List[Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/google-research/google-research/tree/master/rouge"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/ROUGE_(metric)""",
"""https://github.com/google-research/google-research/tree/master/rouge""",
] , )
def lowerCAmelCase ( self : str , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : str=None , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : Union[str, Any]=False ) ->Any:
"""simple docstring"""
if rouge_types is None:
snake_case_ = ["""rouge1""", """rouge2""", """rougeL""", """rougeLsum"""]
snake_case_ = rouge_scorer.RougeScorer(rouge_types=UpperCAmelCase_ , use_stemmer=UpperCAmelCase_ )
if use_aggregator:
snake_case_ = scoring.BootstrapAggregator()
else:
snake_case_ = []
for ref, pred in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
snake_case_ = scorer.score(UpperCAmelCase_ , UpperCAmelCase_ )
if use_aggregator:
aggregator.add_scores(UpperCAmelCase_ )
else:
scores.append(UpperCAmelCase_ )
if use_aggregator:
snake_case_ = aggregator.aggregate()
else:
snake_case_ = {}
for key in scores[0]:
snake_case_ = [score[key] for score in scores]
return result
| 347 |
"""simple docstring"""
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class __A (snake_case__):
'''simple docstring'''
@slow
@require_torch
def lowerCAmelCase ( self : Union[str, Any] ) ->Dict:
"""simple docstring"""
snake_case_ = EncoderDecoderModel.from_encoder_decoder_pretrained("""prajjwal1/bert-tiny""" , """prajjwal1/bert-tiny""" )
snake_case_ = BertTokenizer.from_pretrained("""bert-base-uncased""" )
snake_case_ = bertabert.config.encoder.vocab_size
snake_case_ = tokenizer.sep_token_id
snake_case_ = tokenizer.cls_token_id
snake_case_ = 128
snake_case_ = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""train[:1%]""" )
snake_case_ = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""validation[:1%]""" )
snake_case_ = train_dataset.select(range(32 ) )
snake_case_ = val_dataset.select(range(16 ) )
snake_case_ = 4
def _map_to_encoder_decoder_inputs(UpperCAmelCase_ : int ):
# Tokenizer will automatically set [BOS] <text> [EOS]
snake_case_ = tokenizer(batch["""article"""] , padding="""max_length""" , truncation=UpperCAmelCase_ , max_length=512 )
snake_case_ = tokenizer(batch["""highlights"""] , padding="""max_length""" , truncation=UpperCAmelCase_ , max_length=128 )
snake_case_ = inputs.input_ids
snake_case_ = inputs.attention_mask
snake_case_ = outputs.input_ids
snake_case_ = outputs.input_ids.copy()
snake_case_ = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["""labels"""]
]
snake_case_ = outputs.attention_mask
assert all(len(UpperCAmelCase_ ) == 512 for x in inputs.input_ids )
assert all(len(UpperCAmelCase_ ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(UpperCAmelCase_ : Union[str, Any] ):
snake_case_ = pred.label_ids
snake_case_ = pred.predictions
# all unnecessary tokens are removed
snake_case_ = tokenizer.batch_decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ )
snake_case_ = tokenizer.batch_decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ )
snake_case_ = sum([int(pred_str[i] == label_str[i] ) for i in range(len(UpperCAmelCase_ ) )] ) / len(UpperCAmelCase_ )
return {"accuracy": accuracy}
# map train dataset
snake_case_ = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=UpperCAmelCase_ , batch_size=UpperCAmelCase_ , remove_columns=["""article""", """highlights"""] , )
train_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
# same for validation dataset
snake_case_ = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=UpperCAmelCase_ , batch_size=UpperCAmelCase_ , remove_columns=["""article""", """highlights"""] , )
val_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
snake_case_ = self.get_auto_remove_tmp_dir()
snake_case_ = SeqaSeqTrainingArguments(
output_dir=UpperCAmelCase_ , per_device_train_batch_size=UpperCAmelCase_ , per_device_eval_batch_size=UpperCAmelCase_ , predict_with_generate=UpperCAmelCase_ , evaluation_strategy="""steps""" , do_train=UpperCAmelCase_ , do_eval=UpperCAmelCase_ , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
snake_case_ = SeqaSeqTrainer(
model=UpperCAmelCase_ , args=UpperCAmelCase_ , compute_metrics=_compute_metrics , train_dataset=UpperCAmelCase_ , eval_dataset=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , )
# start training
trainer.train()
| 347 | 1 |
"""simple docstring"""
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
__SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : str = OrderedDict(
[
('align', 'EfficientNetImageProcessor'),
('beit', 'BeitImageProcessor'),
('bit', 'BitImageProcessor'),
('blip', 'BlipImageProcessor'),
('blip-2', 'BlipImageProcessor'),
('bridgetower', 'BridgeTowerImageProcessor'),
('chinese_clip', 'ChineseCLIPImageProcessor'),
('clip', 'CLIPImageProcessor'),
('clipseg', 'ViTImageProcessor'),
('conditional_detr', 'ConditionalDetrImageProcessor'),
('convnext', 'ConvNextImageProcessor'),
('convnextv2', 'ConvNextImageProcessor'),
('cvt', 'ConvNextImageProcessor'),
('data2vec-vision', 'BeitImageProcessor'),
('deformable_detr', 'DeformableDetrImageProcessor'),
('deit', 'DeiTImageProcessor'),
('deta', 'DetaImageProcessor'),
('detr', 'DetrImageProcessor'),
('dinat', 'ViTImageProcessor'),
('donut-swin', 'DonutImageProcessor'),
('dpt', 'DPTImageProcessor'),
('efficientformer', 'EfficientFormerImageProcessor'),
('efficientnet', 'EfficientNetImageProcessor'),
('flava', 'FlavaImageProcessor'),
('focalnet', 'BitImageProcessor'),
('git', 'CLIPImageProcessor'),
('glpn', 'GLPNImageProcessor'),
('groupvit', 'CLIPImageProcessor'),
('imagegpt', 'ImageGPTImageProcessor'),
('instructblip', 'BlipImageProcessor'),
('layoutlmv2', 'LayoutLMv2ImageProcessor'),
('layoutlmv3', 'LayoutLMv3ImageProcessor'),
('levit', 'LevitImageProcessor'),
('mask2former', 'Mask2FormerImageProcessor'),
('maskformer', 'MaskFormerImageProcessor'),
('mgp-str', 'ViTImageProcessor'),
('mobilenet_v1', 'MobileNetV1ImageProcessor'),
('mobilenet_v2', 'MobileNetV2ImageProcessor'),
('mobilevit', 'MobileViTImageProcessor'),
('mobilevit', 'MobileViTImageProcessor'),
('mobilevitv2', 'MobileViTImageProcessor'),
('nat', 'ViTImageProcessor'),
('oneformer', 'OneFormerImageProcessor'),
('owlvit', 'OwlViTImageProcessor'),
('perceiver', 'PerceiverImageProcessor'),
('pix2struct', 'Pix2StructImageProcessor'),
('poolformer', 'PoolFormerImageProcessor'),
('regnet', 'ConvNextImageProcessor'),
('resnet', 'ConvNextImageProcessor'),
('sam', 'SamImageProcessor'),
('segformer', 'SegformerImageProcessor'),
('swiftformer', 'ViTImageProcessor'),
('swin', 'ViTImageProcessor'),
('swin2sr', 'Swin2SRImageProcessor'),
('swinv2', 'ViTImageProcessor'),
('table-transformer', 'DetrImageProcessor'),
('timesformer', 'VideoMAEImageProcessor'),
('tvlt', 'TvltImageProcessor'),
('upernet', 'SegformerImageProcessor'),
('van', 'ConvNextImageProcessor'),
('videomae', 'VideoMAEImageProcessor'),
('vilt', 'ViltImageProcessor'),
('vit', 'ViTImageProcessor'),
('vit_hybrid', 'ViTHybridImageProcessor'),
('vit_mae', 'ViTImageProcessor'),
('vit_msn', 'ViTImageProcessor'),
('xclip', 'CLIPImageProcessor'),
('yolos', 'YolosImageProcessor'),
]
)
__SCREAMING_SNAKE_CASE : Dict = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def _a ( _SCREAMING_SNAKE_CASE ) -> Optional[int]:
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
snake_case_ = model_type_to_module_name(_SCREAMING_SNAKE_CASE )
snake_case_ = importlib.import_module(f""".{module_name}""" , """transformers.models""" )
try:
return getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(_SCREAMING_SNAKE_CASE , """__name__""" , _SCREAMING_SNAKE_CASE ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
snake_case_ = importlib.import_module("""transformers""" )
if hasattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return None
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False , **_SCREAMING_SNAKE_CASE , ) -> Union[str, Any]:
snake_case_ = get_file_from_repo(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE , force_download=_SCREAMING_SNAKE_CASE , resume_download=_SCREAMING_SNAKE_CASE , proxies=_SCREAMING_SNAKE_CASE , use_auth_token=_SCREAMING_SNAKE_CASE , revision=_SCREAMING_SNAKE_CASE , local_files_only=_SCREAMING_SNAKE_CASE , )
if resolved_config_file is None:
logger.info(
"""Could not locate the image processor configuration file, will try to use the model config instead.""" )
return {}
with open(_SCREAMING_SNAKE_CASE , encoding="""utf-8""" ) as reader:
return json.load(_SCREAMING_SNAKE_CASE )
class __A :
'''simple docstring'''
def __init__( self : Tuple ) ->Optional[Any]:
"""simple docstring"""
raise EnvironmentError(
"""AutoImageProcessor is designed to be instantiated """
"""using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.""" )
@classmethod
@replace_list_option_in_docstrings(UpperCAmelCase_ )
def lowerCAmelCase ( cls : Optional[int] , UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : List[Any] ) ->List[str]:
"""simple docstring"""
snake_case_ = kwargs.pop("""config""" , UpperCAmelCase_ )
snake_case_ = kwargs.pop("""trust_remote_code""" , UpperCAmelCase_ )
snake_case_ = True
snake_case_ , snake_case_ = ImageProcessingMixin.get_image_processor_dict(UpperCAmelCase_ , **UpperCAmelCase_ )
snake_case_ = config_dict.get("""image_processor_type""" , UpperCAmelCase_ )
snake_case_ = None
if "AutoImageProcessor" in config_dict.get("""auto_map""" , {} ):
snake_case_ = config_dict["""auto_map"""]["""AutoImageProcessor"""]
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
snake_case_ = config_dict.pop("""feature_extractor_type""" , UpperCAmelCase_ )
if feature_extractor_class is not None:
logger.warning(
"""Could not find image processor class in the image processor config or the model config. Loading"""
""" based on pattern matching with the model's feature extractor configuration.""" )
snake_case_ = feature_extractor_class.replace("""FeatureExtractor""" , """ImageProcessor""" )
if "AutoFeatureExtractor" in config_dict.get("""auto_map""" , {} ):
snake_case_ = config_dict["""auto_map"""]["""AutoFeatureExtractor"""]
snake_case_ = feature_extractor_auto_map.replace("""FeatureExtractor""" , """ImageProcessor""" )
logger.warning(
"""Could not find image processor auto map in the image processor config or the model config."""
""" Loading based on pattern matching with the model's feature extractor configuration.""" )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
snake_case_ = AutoConfig.from_pretrained(UpperCAmelCase_ , **UpperCAmelCase_ )
# It could be in `config.image_processor_type``
snake_case_ = getattr(UpperCAmelCase_ , """image_processor_type""" , UpperCAmelCase_ )
if hasattr(UpperCAmelCase_ , """auto_map""" ) and "AutoImageProcessor" in config.auto_map:
snake_case_ = config.auto_map["""AutoImageProcessor"""]
if image_processor_class is not None:
snake_case_ = image_processor_class_from_name(UpperCAmelCase_ )
snake_case_ = image_processor_auto_map is not None
snake_case_ = image_processor_class is not None or type(UpperCAmelCase_ ) in IMAGE_PROCESSOR_MAPPING
snake_case_ = resolve_trust_remote_code(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
if has_remote_code and trust_remote_code:
snake_case_ = get_class_from_dynamic_module(
UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ )
snake_case_ = kwargs.pop("""code_revision""" , UpperCAmelCase_ )
if os.path.isdir(UpperCAmelCase_ ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(UpperCAmelCase_ , **UpperCAmelCase_ )
elif image_processor_class is not None:
return image_processor_class.from_dict(UpperCAmelCase_ , **UpperCAmelCase_ )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(UpperCAmelCase_ ) in IMAGE_PROCESSOR_MAPPING:
snake_case_ = IMAGE_PROCESSOR_MAPPING[type(UpperCAmelCase_ )]
return image_processor_class.from_dict(UpperCAmelCase_ , **UpperCAmelCase_ )
raise ValueError(
F"""Unrecognized image processor in {pretrained_model_name_or_path}. Should have a """
F"""`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following """
F"""`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}""" )
@staticmethod
def lowerCAmelCase ( UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[str] ) ->Dict:
"""simple docstring"""
IMAGE_PROCESSOR_MAPPING.register(UpperCAmelCase_ , UpperCAmelCase_ )
| 347 |
"""simple docstring"""
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
__SCREAMING_SNAKE_CASE : Tuple = subprocess.check_output('git merge-base main HEAD'.split()).decode('utf-8')
__SCREAMING_SNAKE_CASE : Tuple = subprocess.check_output(f"""git diff --name-only {fork_point_sha}""".split()).decode('utf-8').split()
__SCREAMING_SNAKE_CASE : Any = '|'.join(sys.argv[1:])
__SCREAMING_SNAKE_CASE : Optional[Any] = re.compile(Rf"""^({joined_dirs}).*?\.py$""")
__SCREAMING_SNAKE_CASE : List[str] = [x for x in modified_files if regex.match(x)]
print(' '.join(relevant_modified_files), end='')
| 347 | 1 |
"""simple docstring"""
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __A :
'''simple docstring'''
def __init__( self : List[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : str=13 , UpperCAmelCase_ : int=30 , UpperCAmelCase_ : Optional[Any]=2 , UpperCAmelCase_ : Union[str, Any]=3 , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : Optional[int]=32 , UpperCAmelCase_ : int=2 , UpperCAmelCase_ : Optional[Any]=4 , UpperCAmelCase_ : Dict=37 , UpperCAmelCase_ : str="gelu" , UpperCAmelCase_ : Optional[int]=0.1 , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : int=10 , UpperCAmelCase_ : str=0.02 , UpperCAmelCase_ : Any=3 , UpperCAmelCase_ : Optional[int]=0.6 , UpperCAmelCase_ : Dict=None , ) ->Dict:
"""simple docstring"""
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = image_size
snake_case_ = patch_size
snake_case_ = num_channels
snake_case_ = is_training
snake_case_ = use_labels
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = type_sequence_label_size
snake_case_ = initializer_range
snake_case_ = mask_ratio
snake_case_ = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
snake_case_ = (image_size // patch_size) ** 2
snake_case_ = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def lowerCAmelCase ( self : Dict ) ->str:
"""simple docstring"""
snake_case_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase ( self : List[Any] ) ->List[Any]:
"""simple docstring"""
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase_ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def lowerCAmelCase ( self : Dict , UpperCAmelCase_ : int , UpperCAmelCase_ : Any , UpperCAmelCase_ : Dict ) ->List[str]:
"""simple docstring"""
snake_case_ = TFViTMAEModel(config=UpperCAmelCase_ )
snake_case_ = model(UpperCAmelCase_ , training=UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase ( self : Union[str, Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Any ) ->List[str]:
"""simple docstring"""
snake_case_ = TFViTMAEForPreTraining(UpperCAmelCase_ )
snake_case_ = model(UpperCAmelCase_ , training=UpperCAmelCase_ )
# expected sequence length = num_patches
snake_case_ = (self.image_size // self.patch_size) ** 2
snake_case_ = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
snake_case_ = 1
snake_case_ = TFViTMAEForPreTraining(UpperCAmelCase_ )
snake_case_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case_ = model(UpperCAmelCase_ , training=UpperCAmelCase_ )
snake_case_ = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def lowerCAmelCase ( self : Optional[int] ) ->Any:
"""simple docstring"""
snake_case_ = self.prepare_config_and_inputs()
((snake_case_) , (snake_case_) , (snake_case_)) = config_and_inputs
snake_case_ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class __A (snake_case__ , snake_case__ , unittest.TestCase):
'''simple docstring'''
__lowercase: int = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
__lowercase: Optional[Any] = {"""feature-extraction""": TFViTMAEModel} if is_tf_available() else {}
__lowercase: List[str] = False
__lowercase: str = False
__lowercase: Dict = False
__lowercase: Tuple = False
def lowerCAmelCase ( self : Union[str, Any] ) ->int:
"""simple docstring"""
snake_case_ = TFViTMAEModelTester(self )
snake_case_ = ConfigTester(self , config_class=UpperCAmelCase_ , has_text_modality=UpperCAmelCase_ , hidden_size=37 )
def lowerCAmelCase ( self : Optional[int] ) ->Any:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMAE does not use inputs_embeds""" )
def lowerCAmelCase ( self : Dict ) ->Optional[Any]:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Any ) ->List[Any]:
"""simple docstring"""
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = model_class(UpperCAmelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
snake_case_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase_ , tf.keras.layers.Layer ) )
def lowerCAmelCase ( self : Optional[Any] ) ->List[Any]:
"""simple docstring"""
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = model_class(UpperCAmelCase_ )
snake_case_ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ = [*signature.parameters.keys()]
snake_case_ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCAmelCase_ )
def lowerCAmelCase ( self : Optional[int] ) ->Optional[int]:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def lowerCAmelCase ( self : str ) ->Optional[Any]:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCAmelCase_ )
def lowerCAmelCase ( self : Any ) ->Tuple:
"""simple docstring"""
np.random.seed(2 )
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = int((config.image_size // config.patch_size) ** 2 )
snake_case_ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
snake_case_ = model_class(UpperCAmelCase_ )
snake_case_ = self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = model(UpperCAmelCase_ , noise=UpperCAmelCase_ )
snake_case_ = copy.deepcopy(self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ ) )
snake_case_ = model(**UpperCAmelCase_ , noise=UpperCAmelCase_ )
snake_case_ = outputs_dict[0].numpy()
snake_case_ = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1E-6 )
def lowerCAmelCase ( self : List[str] ) ->str:
"""simple docstring"""
np.random.seed(2 )
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = int((config.image_size // config.patch_size) ** 2 )
snake_case_ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(UpperCAmelCase_ : Optional[int] ):
snake_case_ = {}
for k, v in inputs_dict.items():
if tf.is_tensor(UpperCAmelCase_ ):
snake_case_ = v.numpy()
else:
snake_case_ = np.array(UpperCAmelCase_ )
return inputs_np_dict
for model_class in self.all_model_classes:
snake_case_ = model_class(UpperCAmelCase_ )
snake_case_ = self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = prepare_numpy_arrays(UpperCAmelCase_ )
snake_case_ = model(UpperCAmelCase_ , noise=UpperCAmelCase_ )
snake_case_ = model(**UpperCAmelCase_ , noise=UpperCAmelCase_ )
self.assert_outputs_same(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCAmelCase ( self : Tuple , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Dict ) ->Dict:
"""simple docstring"""
np.random.seed(2 )
snake_case_ = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
snake_case_ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
snake_case_ = tf.constant(UpperCAmelCase_ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
snake_case_ = tf_noise
super().check_pt_tf_models(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCAmelCase ( self : Tuple ) ->Any:
"""simple docstring"""
np.random.seed(2 )
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(UpperCAmelCase_ )
if module_member_name.endswith("""MainLayer""" )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len("""MainLayer""" )] == model_class.__name__[: -len("""Model""" )]
for module_member in (getattr(UpperCAmelCase_ , UpperCAmelCase_ ),)
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(UpperCAmelCase_ , """_keras_serializable""" , UpperCAmelCase_ )
}
snake_case_ = int((config.image_size // config.patch_size) ** 2 )
snake_case_ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
snake_case_ = tf.convert_to_tensor(UpperCAmelCase_ )
inputs_dict.update({"""noise""": noise} )
for main_layer_class in tf_main_layer_classes:
snake_case_ = main_layer_class(UpperCAmelCase_ )
snake_case_ = {
name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
snake_case_ = tf.keras.Model(UpperCAmelCase_ , outputs=main_layer(UpperCAmelCase_ ) )
snake_case_ = model(UpperCAmelCase_ )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case_ = os.path.join(UpperCAmelCase_ , """keras_model.h5""" )
model.save(UpperCAmelCase_ )
snake_case_ = tf.keras.models.load_model(
UpperCAmelCase_ , custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(UpperCAmelCase_ , tf.keras.Model )
snake_case_ = model(UpperCAmelCase_ )
self.assert_outputs_same(UpperCAmelCase_ , UpperCAmelCase_ )
@slow
def lowerCAmelCase ( self : int ) ->Tuple:
"""simple docstring"""
np.random.seed(2 )
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = int((config.image_size // config.patch_size) ** 2 )
snake_case_ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
snake_case_ = model_class(UpperCAmelCase_ )
snake_case_ = self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = model(UpperCAmelCase_ , noise=UpperCAmelCase_ )
if model_class.__name__ == "TFViTMAEModel":
snake_case_ = outputs.last_hidden_state.numpy()
snake_case_ = 0
else:
snake_case_ = outputs.logits.numpy()
snake_case_ = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCAmelCase_ , saved_model=UpperCAmelCase_ )
snake_case_ = model_class.from_pretrained(UpperCAmelCase_ )
snake_case_ = model(UpperCAmelCase_ , noise=UpperCAmelCase_ )
if model_class.__name__ == "TFViTMAEModel":
snake_case_ = after_outputs["""last_hidden_state"""].numpy()
snake_case_ = 0
else:
snake_case_ = after_outputs["""logits"""].numpy()
snake_case_ = 0
snake_case_ = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(UpperCAmelCase_ , 1E-5 )
def lowerCAmelCase ( self : List[Any] ) ->Optional[int]:
"""simple docstring"""
np.random.seed(2 )
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = int((config.image_size // config.patch_size) ** 2 )
snake_case_ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
snake_case_ = model_class(UpperCAmelCase_ )
snake_case_ = self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = model(UpperCAmelCase_ , noise=UpperCAmelCase_ )
snake_case_ = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(UpperCAmelCase_ )
snake_case_ = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
snake_case_ = model_class.from_config(model.config )
snake_case_ = new_model(UpperCAmelCase_ ) # Build model
new_model.set_weights(model.get_weights() )
snake_case_ = new_model(UpperCAmelCase_ , noise=UpperCAmelCase_ )
self.assert_outputs_same(UpperCAmelCase_ , UpperCAmelCase_ )
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def lowerCAmelCase ( self : Optional[Any] ) ->int:
"""simple docstring"""
pass
@unittest.skip(reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load""" )
def lowerCAmelCase ( self : str ) ->Tuple:
"""simple docstring"""
pass
@slow
def lowerCAmelCase ( self : int ) ->int:
"""simple docstring"""
snake_case_ = TFViTMAEModel.from_pretrained("""google/vit-base-patch16-224""" )
self.assertIsNotNone(UpperCAmelCase_ )
def _a ( ) -> Any:
snake_case_ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class __A (unittest.TestCase):
'''simple docstring'''
@cached_property
def lowerCAmelCase ( self : str ) ->Any:
"""simple docstring"""
return ViTImageProcessor.from_pretrained("""facebook/vit-mae-base""" ) if is_vision_available() else None
@slow
def lowerCAmelCase ( self : Union[str, Any] ) ->Any:
"""simple docstring"""
np.random.seed(2 )
snake_case_ = TFViTMAEForPreTraining.from_pretrained("""facebook/vit-mae-base""" )
snake_case_ = self.default_image_processor
snake_case_ = prepare_img()
snake_case_ = image_processor(images=UpperCAmelCase_ , return_tensors="""tf""" )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
snake_case_ = ViTMAEConfig()
snake_case_ = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
snake_case_ = np.random.uniform(size=(1, num_patches) )
# forward pass
snake_case_ = model(**UpperCAmelCase_ , noise=UpperCAmelCase_ )
# verify the logits
snake_case_ = tf.convert_to_tensor([1, 196, 768] )
self.assertEqual(outputs.logits.shape , UpperCAmelCase_ )
snake_case_ = tf.convert_to_tensor(
[[-0.0_548, -1.7_023, -0.9_325], [0.3_721, -0.5_670, -0.2_233], [0.8_235, -1.3_878, -0.3_524]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] , UpperCAmelCase_ , atol=1E-4 )
| 347 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Tuple = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'adapter_layer': 'encoder.layers.*.adapter_layer',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
'pooling_layer.linear': 'projector',
'pooling_layer.projection': 'classifier',
}
__SCREAMING_SNAKE_CASE : List[Any] = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
'projector',
'classifier',
]
def _a ( _SCREAMING_SNAKE_CASE ) -> List[str]:
snake_case_ = {}
with open(_SCREAMING_SNAKE_CASE , """r""" ) as file:
for line_number, line in enumerate(_SCREAMING_SNAKE_CASE ):
snake_case_ = line.strip()
if line:
snake_case_ = line.split()
snake_case_ = line_number
snake_case_ = words[0]
snake_case_ = value
return result
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
for attribute in key.split(""".""" ):
snake_case_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case_ = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(_SCREAMING_SNAKE_CASE ):
snake_case_ = PARAM_MAPPING[full_name.split(""".""" )[-1]]
snake_case_ = """param"""
if weight_type is not None and weight_type != "param":
snake_case_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).shape
elif weight_type is not None and weight_type == "param":
snake_case_ = hf_pointer
for attribute in hf_param_name.split(""".""" ):
snake_case_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case_ = shape_pointer.shape
# let's reduce dimension
snake_case_ = value[0]
else:
snake_case_ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
snake_case_ = value
elif weight_type == "weight_g":
snake_case_ = value
elif weight_type == "weight_v":
snake_case_ = value
elif weight_type == "bias":
snake_case_ = value
elif weight_type == "param":
for attribute in hf_param_name.split(""".""" ):
snake_case_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case_ = value
else:
snake_case_ = value
logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
snake_case_ = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(_SCREAMING_SNAKE_CASE ):
snake_case_ = PARAM_MAPPING[full_name.split(""".""" )[-1]]
snake_case_ = """param"""
if weight_type is not None and weight_type != "param":
snake_case_ = """.""".join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
snake_case_ = """.""".join([key, hf_param_name] )
else:
snake_case_ = key
snake_case_ = value if """lm_head""" in full_key else value[0]
__SCREAMING_SNAKE_CASE : int = {
'W_a': 'linear_1.weight',
'W_b': 'linear_2.weight',
'b_a': 'linear_1.bias',
'b_b': 'linear_2.bias',
'ln_W': 'norm.weight',
'ln_b': 'norm.bias',
}
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> List[str]:
snake_case_ = False
for key, mapped_key in MAPPING.items():
snake_case_ = """wav2vec2.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
snake_case_ = True
if "*" in mapped_key:
snake_case_ = name.split(_SCREAMING_SNAKE_CASE )[0].split(""".""" )[-2]
snake_case_ = mapped_key.replace("""*""" , _SCREAMING_SNAKE_CASE )
if "weight_g" in name:
snake_case_ = """weight_g"""
elif "weight_v" in name:
snake_case_ = """weight_v"""
elif "bias" in name:
snake_case_ = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
snake_case_ = """weight"""
else:
snake_case_ = None
if hf_dict is not None:
rename_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
set_recursively(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return is_used
return is_used
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
snake_case_ = []
snake_case_ = fairseq_model.state_dict()
snake_case_ = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
snake_case_ = False
if "conv_layers" in name:
load_conv_layer(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == """group""" , )
snake_case_ = True
else:
snake_case_ = load_wavaveca_layer(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if not is_used:
unused_weights.append(_SCREAMING_SNAKE_CASE )
logger.warning(f"""Unused weights: {unused_weights}""" )
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
snake_case_ = full_name.split("""conv_layers.""" )[-1]
snake_case_ = name.split(""".""" )
snake_case_ = int(items[0] )
snake_case_ = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
snake_case_ = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
snake_case_ = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
snake_case_ = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
snake_case_ = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_SCREAMING_SNAKE_CASE )
@torch.no_grad()
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False ) -> int:
if config_path is not None:
snake_case_ = WavaVecaConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
else:
snake_case_ = WavaVecaConfig()
if is_seq_class:
snake_case_ = read_txt_into_dict(_SCREAMING_SNAKE_CASE )
snake_case_ = idalabel
snake_case_ = WavaVecaForSequenceClassification(_SCREAMING_SNAKE_CASE )
snake_case_ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , )
feature_extractor.save_pretrained(_SCREAMING_SNAKE_CASE )
elif is_finetuned:
if dict_path:
snake_case_ = Dictionary.load(_SCREAMING_SNAKE_CASE )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
snake_case_ = target_dict.pad_index
snake_case_ = target_dict.bos_index
snake_case_ = target_dict.eos_index
snake_case_ = len(target_dict.symbols )
snake_case_ = os.path.join(_SCREAMING_SNAKE_CASE , """vocab.json""" )
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(_SCREAMING_SNAKE_CASE ) )
return
os.makedirs(_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE )
snake_case_ = target_dict.indices
# fairseq has the <pad> and <s> switched
snake_case_ = 0
snake_case_ = 1
with open(_SCREAMING_SNAKE_CASE , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case_ = WavaVecaCTCTokenizer(
_SCREAMING_SNAKE_CASE , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=_SCREAMING_SNAKE_CASE , )
snake_case_ = True if config.feat_extract_norm == """layer""" else False
snake_case_ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , )
snake_case_ = WavaVecaProcessor(feature_extractor=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE )
processor.save_pretrained(_SCREAMING_SNAKE_CASE )
snake_case_ = WavaVecaForCTC(_SCREAMING_SNAKE_CASE )
else:
snake_case_ = WavaVecaForPreTraining(_SCREAMING_SNAKE_CASE )
if is_finetuned or is_seq_class:
snake_case_ , snake_case_ , snake_case_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
snake_case_ = argparse.Namespace(task="""audio_pretraining""" )
snake_case_ = fairseq.tasks.setup_task(_SCREAMING_SNAKE_CASE )
snake_case_ , snake_case_ , snake_case_ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=_SCREAMING_SNAKE_CASE )
snake_case_ = model[0].eval()
recursively_load_weights(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , not is_finetuned )
hf_wavavec.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : str = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
parser.add_argument(
'--is_seq_class',
action='store_true',
help='Whether the model to convert is a fine-tuned sequence classification model or not',
)
__SCREAMING_SNAKE_CASE : Any = parser.parse_args()
__SCREAMING_SNAKE_CASE : List[Any] = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 347 | 1 |
"""simple docstring"""
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class __A :
'''simple docstring'''
def __init__( self : Union[str, Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[Any]=sys.maxsize ) ->List[Any]:
"""simple docstring"""
snake_case_ = """bilinear"""
snake_case_ = max_size
snake_case_ = short_edge_length
def __call__( self : List[Any] , UpperCAmelCase_ : Dict ) ->int:
"""simple docstring"""
snake_case_ = []
for img in imgs:
snake_case_ , snake_case_ = img.shape[:2]
# later: provide list and randomly choose index for resize
snake_case_ = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 )
if size == 0:
return img
snake_case_ = size * 1.0 / min(UpperCAmelCase_ , UpperCAmelCase_ )
if h < w:
snake_case_ , snake_case_ = size, scale * w
else:
snake_case_ , snake_case_ = scale * h, size
if max(UpperCAmelCase_ , UpperCAmelCase_ ) > self.max_size:
snake_case_ = self.max_size * 1.0 / max(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = newh * scale
snake_case_ = neww * scale
snake_case_ = int(neww + 0.5 )
snake_case_ = int(newh + 0.5 )
if img.dtype == np.uinta:
snake_case_ = Image.fromarray(UpperCAmelCase_ )
snake_case_ = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR )
snake_case_ = np.asarray(UpperCAmelCase_ )
else:
snake_case_ = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw
snake_case_ = nn.functional.interpolate(
UpperCAmelCase_ , (newh, neww) , mode=self.interp_method , align_corners=UpperCAmelCase_ ).squeeze(0 )
img_augs.append(UpperCAmelCase_ )
return img_augs
class __A :
'''simple docstring'''
def __init__( self : Dict , UpperCAmelCase_ : Optional[int] ) ->Any:
"""simple docstring"""
snake_case_ = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST )
snake_case_ = cfg.INPUT.FORMAT
snake_case_ = cfg.SIZE_DIVISIBILITY
snake_case_ = cfg.PAD_VALUE
snake_case_ = cfg.INPUT.MAX_SIZE_TEST
snake_case_ = cfg.MODEL.DEVICE
snake_case_ = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
snake_case_ = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
snake_case_ = lambda UpperCAmelCase_ : (x - self.pixel_mean) / self.pixel_std
def lowerCAmelCase ( self : Union[str, Any] , UpperCAmelCase_ : Optional[Any] ) ->Optional[int]:
"""simple docstring"""
snake_case_ = tuple(max(UpperCAmelCase_ ) for s in zip(*[img.shape for img in images] ) )
snake_case_ = [im.shape[-2:] for im in images]
snake_case_ = [
nn.functional.pad(
UpperCAmelCase_ , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(UpperCAmelCase_ , UpperCAmelCase_ )
]
return torch.stack(UpperCAmelCase_ ), torch.tensor(UpperCAmelCase_ )
def __call__( self : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[Any]=False ) ->Dict:
"""simple docstring"""
with torch.no_grad():
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
snake_case_ = [images]
if single_image:
assert len(UpperCAmelCase_ ) == 1
for i in range(len(UpperCAmelCase_ ) ):
if isinstance(images[i] , torch.Tensor ):
images.insert(UpperCAmelCase_ , images.pop(UpperCAmelCase_ ).to(self.device ).float() )
elif not isinstance(images[i] , torch.Tensor ):
images.insert(
UpperCAmelCase_ , torch.as_tensor(img_tensorize(images.pop(UpperCAmelCase_ ) , input_format=self.input_format ) )
.to(self.device )
.float() , )
# resize smallest edge
snake_case_ = torch.tensor([im.shape[:2] for im in images] )
snake_case_ = self.aug(UpperCAmelCase_ )
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
snake_case_ = [self.normalizer(UpperCAmelCase_ ) for x in images]
# now pad them to do the following operations
snake_case_ , snake_case_ = self.pad(UpperCAmelCase_ )
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
snake_case_ = torch.true_divide(UpperCAmelCase_ , UpperCAmelCase_ )
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]:
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
assert torch.isfinite(_SCREAMING_SNAKE_CASE ).all(), "Box tensor contains infinite or NaN!"
snake_case_ , snake_case_ = box_size
tensor[:, 0].clamp_(min=0 , max=_SCREAMING_SNAKE_CASE )
tensor[:, 1].clamp_(min=0 , max=_SCREAMING_SNAKE_CASE )
tensor[:, 2].clamp_(min=0 , max=_SCREAMING_SNAKE_CASE )
tensor[:, 3].clamp_(min=0 , max=_SCREAMING_SNAKE_CASE )
| 347 |
"""simple docstring"""
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class __A :
'''simple docstring'''
def __init__( self : Dict , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Any=14 , UpperCAmelCase_ : Union[str, Any]=7 , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : Union[str, Any]=False , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : str=99 , UpperCAmelCase_ : Union[str, Any]=32 , UpperCAmelCase_ : List[Any]=4 , UpperCAmelCase_ : Optional[int]=4 , UpperCAmelCase_ : int=4 , UpperCAmelCase_ : str=37 , UpperCAmelCase_ : Any="gelu" , UpperCAmelCase_ : str=0.1 , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : int=512 , UpperCAmelCase_ : Tuple=0.02 , ) ->List[str]:
"""simple docstring"""
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = seq_length
snake_case_ = is_training
snake_case_ = use_input_mask
snake_case_ = use_token_type_ids
snake_case_ = use_labels
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = rotary_dim
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = initializer_range
snake_case_ = None
snake_case_ = vocab_size - 1
snake_case_ = vocab_size - 1
snake_case_ = vocab_size - 1
def lowerCAmelCase ( self : int ) ->Optional[int]:
"""simple docstring"""
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ = None
if self.use_input_mask:
snake_case_ = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=UpperCAmelCase_ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def lowerCAmelCase ( self : Dict ) ->Tuple:
"""simple docstring"""
snake_case_ = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ = config_and_inputs
snake_case_ = {"""input_ids""": input_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def lowerCAmelCase ( self : int , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict ) ->Tuple:
"""simple docstring"""
snake_case_ = 20
snake_case_ = model_class_name(UpperCAmelCase_ )
snake_case_ = model.init_cache(input_ids.shape[0] , UpperCAmelCase_ )
snake_case_ = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype="""i4""" )
snake_case_ = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
snake_case_ = model(
input_ids[:, :-1] , attention_mask=UpperCAmelCase_ , past_key_values=UpperCAmelCase_ , position_ids=UpperCAmelCase_ , )
snake_case_ = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""" )
snake_case_ = model(
input_ids[:, -1:] , attention_mask=UpperCAmelCase_ , past_key_values=outputs_cache.past_key_values , position_ids=UpperCAmelCase_ , )
snake_case_ = model(UpperCAmelCase_ )
snake_case_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" )
def lowerCAmelCase ( self : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[Any] ) ->Dict:
"""simple docstring"""
snake_case_ = 20
snake_case_ = model_class_name(UpperCAmelCase_ )
snake_case_ = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
snake_case_ = model.init_cache(input_ids.shape[0] , UpperCAmelCase_ )
snake_case_ = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
snake_case_ = model(
input_ids[:, :-1] , attention_mask=UpperCAmelCase_ , past_key_values=UpperCAmelCase_ , position_ids=UpperCAmelCase_ , )
snake_case_ = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""" )
snake_case_ = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=UpperCAmelCase_ , position_ids=UpperCAmelCase_ , )
snake_case_ = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )
snake_case_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" )
@require_flax
class __A (snake_case__ , snake_case__ , unittest.TestCase):
'''simple docstring'''
__lowercase: Any = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
__lowercase: List[str] = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def lowerCAmelCase ( self : Tuple ) ->List[str]:
"""simple docstring"""
snake_case_ = FlaxGPTJModelTester(self )
def lowerCAmelCase ( self : int ) ->List[Any]:
"""simple docstring"""
for model_class_name in self.all_model_classes:
snake_case_ , snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCAmelCase ( self : List[str] ) ->Any:
"""simple docstring"""
for model_class_name in self.all_model_classes:
snake_case_ , snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
@tooslow
def lowerCAmelCase ( self : List[str] ) ->Optional[Any]:
"""simple docstring"""
snake_case_ = GPTaTokenizer.from_pretrained("""gpt2""" , pad_token="""<|endoftext|>""" , padding_side="""left""" )
snake_case_ = tokenizer(["""Hello this is a long string""", """Hey"""] , return_tensors="""np""" , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ )
snake_case_ = FlaxGPTJForCausalLM.from_pretrained("""EleutherAI/gpt-j-6B""" )
snake_case_ = False
snake_case_ = model.config.eos_token_id
snake_case_ = jax.jit(model.generate )
snake_case_ = jit_generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , pad_token_id=tokenizer.pad_token_id ).sequences
snake_case_ = tokenizer.batch_decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ )
snake_case_ = [
"""Hello this is a long string of text.\n\nI'm trying to get the text of the""",
"""Hey, I'm a little late to the party. I'm going to""",
]
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
@is_pt_flax_cross_test
def lowerCAmelCase ( self : int ) ->str:
"""simple docstring"""
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
snake_case_ = self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
snake_case_ = model_class.__name__[4:] # Skip the "Flax" at the beginning
snake_case_ = getattr(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ , snake_case_ = pt_inputs["""input_ids"""].shape
snake_case_ = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(UpperCAmelCase_ ):
snake_case_ = 0
snake_case_ = 1
snake_case_ = 0
snake_case_ = 1
snake_case_ = pt_model_class(UpperCAmelCase_ ).eval()
snake_case_ = model_class(UpperCAmelCase_ , dtype=jnp.floataa )
snake_case_ = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , UpperCAmelCase_ )
snake_case_ = fx_state
with torch.no_grad():
snake_case_ = pt_model(**UpperCAmelCase_ ).to_tuple()
snake_case_ = fx_model(**UpperCAmelCase_ ).to_tuple()
self.assertEqual(len(UpperCAmelCase_ ) , len(UpperCAmelCase_ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(UpperCAmelCase_ )
snake_case_ = model_class.from_pretrained(UpperCAmelCase_ , from_pt=UpperCAmelCase_ )
snake_case_ = fx_model_loaded(**UpperCAmelCase_ ).to_tuple()
self.assertEqual(
len(UpperCAmelCase_ ) , len(UpperCAmelCase_ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output_loaded, pt_output in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@is_pt_flax_cross_test
def lowerCAmelCase ( self : List[Any] ) ->str:
"""simple docstring"""
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
snake_case_ = self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
snake_case_ = model_class.__name__[4:] # Skip the "Flax" at the beginning
snake_case_ = getattr(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = pt_model_class(UpperCAmelCase_ ).eval()
snake_case_ = model_class(UpperCAmelCase_ , dtype=jnp.floataa )
snake_case_ = load_flax_weights_in_pytorch_model(UpperCAmelCase_ , fx_model.params )
snake_case_ , snake_case_ = pt_inputs["""input_ids"""].shape
snake_case_ = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(UpperCAmelCase_ ):
snake_case_ = 0
snake_case_ = 1
snake_case_ = 0
snake_case_ = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
snake_case_ = pt_model(**UpperCAmelCase_ ).to_tuple()
snake_case_ = fx_model(**UpperCAmelCase_ ).to_tuple()
self.assertEqual(len(UpperCAmelCase_ ) , len(UpperCAmelCase_ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(UpperCAmelCase_ )
snake_case_ = pt_model_class.from_pretrained(UpperCAmelCase_ , from_flax=UpperCAmelCase_ )
with torch.no_grad():
snake_case_ = pt_model_loaded(**UpperCAmelCase_ ).to_tuple()
self.assertEqual(
len(UpperCAmelCase_ ) , len(UpperCAmelCase_ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@tooslow
def lowerCAmelCase ( self : List[Any] ) ->str:
"""simple docstring"""
for model_class_name in self.all_model_classes:
snake_case_ = model_class_name.from_pretrained("""EleutherAI/gpt-j-6B""" )
snake_case_ = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCAmelCase_ )
| 347 | 1 |
"""simple docstring"""
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : List[Any] = {
'nvidia/segformer-b0-finetuned-ade-512-512': (
'https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json'
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class __A (snake_case__):
'''simple docstring'''
__lowercase: Union[str, Any] = """segformer"""
def __init__( self : Union[str, Any] , UpperCAmelCase_ : Optional[int]=3 , UpperCAmelCase_ : List[Any]=4 , UpperCAmelCase_ : int=[2, 2, 2, 2] , UpperCAmelCase_ : str=[8, 4, 2, 1] , UpperCAmelCase_ : Optional[Any]=[32, 64, 160, 256] , UpperCAmelCase_ : Optional[Any]=[7, 3, 3, 3] , UpperCAmelCase_ : Union[str, Any]=[4, 2, 2, 2] , UpperCAmelCase_ : List[str]=[1, 2, 5, 8] , UpperCAmelCase_ : List[str]=[4, 4, 4, 4] , UpperCAmelCase_ : List[Any]="gelu" , UpperCAmelCase_ : Dict=0.0 , UpperCAmelCase_ : List[str]=0.0 , UpperCAmelCase_ : str=0.1 , UpperCAmelCase_ : str=0.02 , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : Dict=1E-6 , UpperCAmelCase_ : Tuple=256 , UpperCAmelCase_ : int=255 , **UpperCAmelCase_ : List[Any] , ) ->Any:
"""simple docstring"""
super().__init__(**UpperCAmelCase_ )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
"""Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be"""
""" removed, as the behaviour will default to that of reshape_last_stage = True.""" , UpperCAmelCase_ , )
snake_case_ = num_channels
snake_case_ = num_encoder_blocks
snake_case_ = depths
snake_case_ = sr_ratios
snake_case_ = hidden_sizes
snake_case_ = patch_sizes
snake_case_ = strides
snake_case_ = mlp_ratios
snake_case_ = num_attention_heads
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = classifier_dropout_prob
snake_case_ = initializer_range
snake_case_ = drop_path_rate
snake_case_ = layer_norm_eps
snake_case_ = decoder_hidden_size
snake_case_ = kwargs.get("""reshape_last_stage""" , UpperCAmelCase_ )
snake_case_ = semantic_loss_ignore_index
class __A (snake_case__):
'''simple docstring'''
__lowercase: Tuple = version.parse("""1.11""")
@property
def lowerCAmelCase ( self : Dict ) ->Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCAmelCase ( self : List[str] ) ->float:
"""simple docstring"""
return 1E-4
@property
def lowerCAmelCase ( self : Optional[Any] ) ->int:
"""simple docstring"""
return 12
| 347 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
__SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
class __A (snake_case__):
'''simple docstring'''
__lowercase: int = """upernet"""
def __init__( self : str , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : str=512 , UpperCAmelCase_ : int=0.02 , UpperCAmelCase_ : Optional[Any]=[1, 2, 3, 6] , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : Tuple=0.4 , UpperCAmelCase_ : Tuple=384 , UpperCAmelCase_ : Union[str, Any]=256 , UpperCAmelCase_ : str=1 , UpperCAmelCase_ : Tuple=False , UpperCAmelCase_ : Tuple=255 , **UpperCAmelCase_ : Dict , ) ->Union[str, Any]:
"""simple docstring"""
super().__init__(**UpperCAmelCase_ )
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
snake_case_ = CONFIG_MAPPING["""resnet"""](out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] )
elif isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
snake_case_ = backbone_config.get("""model_type""" )
snake_case_ = CONFIG_MAPPING[backbone_model_type]
snake_case_ = config_class.from_dict(UpperCAmelCase_ )
snake_case_ = backbone_config
snake_case_ = hidden_size
snake_case_ = initializer_range
snake_case_ = pool_scales
snake_case_ = use_auxiliary_head
snake_case_ = auxiliary_loss_weight
snake_case_ = auxiliary_in_channels
snake_case_ = auxiliary_channels
snake_case_ = auxiliary_num_convs
snake_case_ = auxiliary_concat_input
snake_case_ = loss_ignore_index
def lowerCAmelCase ( self : str ) ->Optional[Any]:
"""simple docstring"""
snake_case_ = copy.deepcopy(self.__dict__ )
snake_case_ = self.backbone_config.to_dict()
snake_case_ = self.__class__.model_type
return output
| 347 | 1 |
"""simple docstring"""
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Optional[Any] = '%20'.join(argv[1:]) if len(argv) > 1 else quote(str(input('Search: ')))
print('Googling.....')
__SCREAMING_SNAKE_CASE : Any = f"""https://www.google.com/search?q={query}&num=100"""
__SCREAMING_SNAKE_CASE : str = requests.get(
url,
headers={'User-Agent': str(UserAgent().random)},
)
try:
__SCREAMING_SNAKE_CASE : List[Any] = (
BeautifulSoup(res.text, 'html.parser')
.find('div', attrs={'class': 'yuRUbf'})
.find('a')
.get('href')
)
except AttributeError:
__SCREAMING_SNAKE_CASE : Optional[Any] = parse_qs(
BeautifulSoup(res.text, 'html.parser')
.find('div', attrs={'class': 'kCrYT'})
.find('a')
.get('href')
)['url'][0]
webbrowser.open(link)
| 347 |
"""simple docstring"""
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class __A (unittest.TestCase):
'''simple docstring'''
def lowerCAmelCase ( self : Optional[int] ) ->Dict:
"""simple docstring"""
snake_case_ = """ylacombe/bark-small"""
snake_case_ = tempfile.mkdtemp()
snake_case_ = """en_speaker_1"""
snake_case_ = """This is a test string"""
snake_case_ = """speaker_embeddings_path.json"""
snake_case_ = """speaker_embeddings"""
def lowerCAmelCase ( self : List[str] , **UpperCAmelCase_ : str ) ->Optional[int]:
"""simple docstring"""
return AutoTokenizer.from_pretrained(self.checkpoint , **UpperCAmelCase_ )
def lowerCAmelCase ( self : Any ) ->Dict:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase ( self : List[Any] ) ->Dict:
"""simple docstring"""
snake_case_ = self.get_tokenizer()
snake_case_ = BarkProcessor(tokenizer=UpperCAmelCase_ )
processor.save_pretrained(self.tmpdirname )
snake_case_ = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def lowerCAmelCase ( self : Dict ) ->int:
"""simple docstring"""
snake_case_ = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
snake_case_ = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
snake_case_ = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="""(BOS)""" , eos_token="""(EOS)""" , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def lowerCAmelCase ( self : Optional[Any] ) ->Any:
"""simple docstring"""
snake_case_ = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
snake_case_ = 35
snake_case_ = 2
snake_case_ = 8
snake_case_ = {
"""semantic_prompt""": np.ones(UpperCAmelCase_ ),
"""coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ),
"""fine_prompt""": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
snake_case_ = processor(text=self.input_string , voice_preset=UpperCAmelCase_ )
snake_case_ = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(UpperCAmelCase_ , np.array([] ) ).tolist() )
# test loading voice preset from npz file
snake_case_ = os.path.join(self.tmpdirname , """file.npz""" )
np.savez(UpperCAmelCase_ , **UpperCAmelCase_ )
snake_case_ = processor(text=self.input_string , voice_preset=UpperCAmelCase_ )
snake_case_ = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(UpperCAmelCase_ , np.array([] ) ).tolist() )
# test loading voice preset from the hub
snake_case_ = processor(text=self.input_string , voice_preset=self.voice_preset )
def lowerCAmelCase ( self : Tuple ) ->Dict:
"""simple docstring"""
snake_case_ = self.get_tokenizer()
snake_case_ = BarkProcessor(tokenizer=UpperCAmelCase_ )
snake_case_ = processor(text=self.input_string )
snake_case_ = tokenizer(
self.input_string , padding="""max_length""" , max_length=256 , add_special_tokens=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , return_token_type_ids=UpperCAmelCase_ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 347 | 1 |
"""simple docstring"""
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class __A (unittest.TestCase):
'''simple docstring'''
def lowerCAmelCase ( self : Optional[int] ) ->Dict:
"""simple docstring"""
snake_case_ = """ylacombe/bark-small"""
snake_case_ = tempfile.mkdtemp()
snake_case_ = """en_speaker_1"""
snake_case_ = """This is a test string"""
snake_case_ = """speaker_embeddings_path.json"""
snake_case_ = """speaker_embeddings"""
def lowerCAmelCase ( self : List[str] , **UpperCAmelCase_ : str ) ->Optional[int]:
"""simple docstring"""
return AutoTokenizer.from_pretrained(self.checkpoint , **UpperCAmelCase_ )
def lowerCAmelCase ( self : Any ) ->Dict:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase ( self : List[Any] ) ->Dict:
"""simple docstring"""
snake_case_ = self.get_tokenizer()
snake_case_ = BarkProcessor(tokenizer=UpperCAmelCase_ )
processor.save_pretrained(self.tmpdirname )
snake_case_ = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def lowerCAmelCase ( self : Dict ) ->int:
"""simple docstring"""
snake_case_ = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
snake_case_ = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
snake_case_ = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="""(BOS)""" , eos_token="""(EOS)""" , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def lowerCAmelCase ( self : Optional[Any] ) ->Any:
"""simple docstring"""
snake_case_ = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
snake_case_ = 35
snake_case_ = 2
snake_case_ = 8
snake_case_ = {
"""semantic_prompt""": np.ones(UpperCAmelCase_ ),
"""coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ),
"""fine_prompt""": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
snake_case_ = processor(text=self.input_string , voice_preset=UpperCAmelCase_ )
snake_case_ = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(UpperCAmelCase_ , np.array([] ) ).tolist() )
# test loading voice preset from npz file
snake_case_ = os.path.join(self.tmpdirname , """file.npz""" )
np.savez(UpperCAmelCase_ , **UpperCAmelCase_ )
snake_case_ = processor(text=self.input_string , voice_preset=UpperCAmelCase_ )
snake_case_ = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(UpperCAmelCase_ , np.array([] ) ).tolist() )
# test loading voice preset from the hub
snake_case_ = processor(text=self.input_string , voice_preset=self.voice_preset )
def lowerCAmelCase ( self : Tuple ) ->Dict:
"""simple docstring"""
snake_case_ = self.get_tokenizer()
snake_case_ = BarkProcessor(tokenizer=UpperCAmelCase_ )
snake_case_ = processor(text=self.input_string )
snake_case_ = tokenizer(
self.input_string , padding="""max_length""" , max_length=256 , add_special_tokens=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , return_token_type_ids=UpperCAmelCase_ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 347 |
"""simple docstring"""
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
__SCREAMING_SNAKE_CASE : int = sys.version_info >= (3, 10)
def _a ( _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> Tuple:
return field(default_factory=lambda: default , metadata=_SCREAMING_SNAKE_CASE )
@dataclass
class __A :
'''simple docstring'''
__lowercase: int
__lowercase: float
__lowercase: str
__lowercase: bool
@dataclass
class __A :
'''simple docstring'''
__lowercase: int = 42
__lowercase: str = field(default="""toto""" , metadata={"""help""": """help message"""})
@dataclass
class __A :
'''simple docstring'''
__lowercase: bool = False
__lowercase: bool = True
__lowercase: Optional[bool] = None
class __A (snake_case__):
'''simple docstring'''
__lowercase: str = """titi"""
__lowercase: Any = """toto"""
class __A (snake_case__):
'''simple docstring'''
__lowercase: int = """titi"""
__lowercase: Optional[Any] = """toto"""
__lowercase: List[Any] = 42
@dataclass
class __A :
'''simple docstring'''
__lowercase: BasicEnum = "toto"
def lowerCAmelCase ( self : int ) ->List[Any]:
"""simple docstring"""
snake_case_ = BasicEnum(self.foo )
@dataclass
class __A :
'''simple docstring'''
__lowercase: MixedTypeEnum = "toto"
def lowerCAmelCase ( self : Optional[int] ) ->Optional[Any]:
"""simple docstring"""
snake_case_ = MixedTypeEnum(self.foo )
@dataclass
class __A :
'''simple docstring'''
__lowercase: Optional[int] = None
__lowercase: Optional[float] = field(default=snake_case__ , metadata={"""help""": """help message"""})
__lowercase: Optional[str] = None
__lowercase: Optional[List[str]] = list_field(default=[])
__lowercase: Optional[List[int]] = list_field(default=[])
@dataclass
class __A :
'''simple docstring'''
__lowercase: List[int] = list_field(default=[])
__lowercase: List[int] = list_field(default=[1, 2, 3])
__lowercase: List[str] = list_field(default=["""Hallo""", """Bonjour""", """Hello"""])
__lowercase: List[float] = list_field(default=[0.1, 0.2, 0.3])
@dataclass
class __A :
'''simple docstring'''
__lowercase: List[int] = field()
__lowercase: str = field()
__lowercase: BasicEnum = field()
def lowerCAmelCase ( self : Any ) ->str:
"""simple docstring"""
snake_case_ = BasicEnum(self.required_enum )
@dataclass
class __A :
'''simple docstring'''
__lowercase: int
__lowercase: "BasicEnum" = field()
__lowercase: "Optional[bool]" = None
__lowercase: "str" = field(default="""toto""" , metadata={"""help""": """help message"""})
__lowercase: "List[str]" = list_field(default=["""Hallo""", """Bonjour""", """Hello"""])
if is_python_no_less_than_3_10:
@dataclass
class __A :
'''simple docstring'''
__lowercase: bool = False
__lowercase: bool = True
__lowercase: bool | None = None
@dataclass
class __A :
'''simple docstring'''
__lowercase: int | None = None
__lowercase: float | None = field(default=snake_case__ , metadata={"""help""": """help message"""})
__lowercase: str | None = None
__lowercase: list[str] | None = list_field(default=[])
__lowercase: list[int] | None = list_field(default=[])
class __A (unittest.TestCase):
'''simple docstring'''
def lowerCAmelCase ( self : Optional[int] , UpperCAmelCase_ : argparse.ArgumentParser , UpperCAmelCase_ : argparse.ArgumentParser ) ->Optional[int]:
"""simple docstring"""
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
snake_case_ = {k: v for k, v in vars(UpperCAmelCase_ ).items() if k != """container"""}
snake_case_ = {k: v for k, v in vars(UpperCAmelCase_ ).items() if k != """container"""}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get("""choices""" , UpperCAmelCase_ ) and yy.get("""choices""" , UpperCAmelCase_ ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx["""type"""](UpperCAmelCase_ ) , yy["""type"""](UpperCAmelCase_ ) )
del xx["type"], yy["type"]
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCAmelCase ( self : int ) ->Any:
"""simple docstring"""
snake_case_ = HfArgumentParser(UpperCAmelCase_ )
snake_case_ = argparse.ArgumentParser()
expected.add_argument("""--foo""" , type=UpperCAmelCase_ , required=UpperCAmelCase_ )
expected.add_argument("""--bar""" , type=UpperCAmelCase_ , required=UpperCAmelCase_ )
expected.add_argument("""--baz""" , type=UpperCAmelCase_ , required=UpperCAmelCase_ )
expected.add_argument("""--flag""" , type=UpperCAmelCase_ , default=UpperCAmelCase_ , const=UpperCAmelCase_ , nargs="""?""" )
self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = ["""--foo""", """1""", """--baz""", """quux""", """--bar""", """0.5"""]
((snake_case_) , ) = parser.parse_args_into_dataclasses(UpperCAmelCase_ , look_for_args_file=UpperCAmelCase_ )
self.assertFalse(example.flag )
def lowerCAmelCase ( self : Optional[Any] ) ->List[Any]:
"""simple docstring"""
snake_case_ = HfArgumentParser(UpperCAmelCase_ )
snake_case_ = argparse.ArgumentParser()
expected.add_argument("""--foo""" , default=42 , type=UpperCAmelCase_ )
expected.add_argument("""--baz""" , default="""toto""" , type=UpperCAmelCase_ , help="""help message""" )
self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCAmelCase ( self : List[Any] ) ->Union[str, Any]:
"""simple docstring"""
snake_case_ = argparse.ArgumentParser()
expected.add_argument("""--foo""" , type=UpperCAmelCase_ , default=UpperCAmelCase_ , const=UpperCAmelCase_ , nargs="""?""" )
expected.add_argument("""--baz""" , type=UpperCAmelCase_ , default=UpperCAmelCase_ , const=UpperCAmelCase_ , nargs="""?""" )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument("""--no_baz""" , action="""store_false""" , default=UpperCAmelCase_ , dest="""baz""" )
expected.add_argument("""--opt""" , type=UpperCAmelCase_ , default=UpperCAmelCase_ )
snake_case_ = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(UpperCAmelCase_ )
for dataclass_type in dataclass_types:
snake_case_ = HfArgumentParser(UpperCAmelCase_ )
self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = parser.parse_args([] )
self.assertEqual(UpperCAmelCase_ , Namespace(foo=UpperCAmelCase_ , baz=UpperCAmelCase_ , opt=UpperCAmelCase_ ) )
snake_case_ = parser.parse_args(["""--foo""", """--no_baz"""] )
self.assertEqual(UpperCAmelCase_ , Namespace(foo=UpperCAmelCase_ , baz=UpperCAmelCase_ , opt=UpperCAmelCase_ ) )
snake_case_ = parser.parse_args(["""--foo""", """--baz"""] )
self.assertEqual(UpperCAmelCase_ , Namespace(foo=UpperCAmelCase_ , baz=UpperCAmelCase_ , opt=UpperCAmelCase_ ) )
snake_case_ = parser.parse_args(["""--foo""", """True""", """--baz""", """True""", """--opt""", """True"""] )
self.assertEqual(UpperCAmelCase_ , Namespace(foo=UpperCAmelCase_ , baz=UpperCAmelCase_ , opt=UpperCAmelCase_ ) )
snake_case_ = parser.parse_args(["""--foo""", """False""", """--baz""", """False""", """--opt""", """False"""] )
self.assertEqual(UpperCAmelCase_ , Namespace(foo=UpperCAmelCase_ , baz=UpperCAmelCase_ , opt=UpperCAmelCase_ ) )
def lowerCAmelCase ( self : int ) ->List[str]:
"""simple docstring"""
snake_case_ = HfArgumentParser(UpperCAmelCase_ )
snake_case_ = argparse.ArgumentParser()
expected.add_argument(
"""--foo""" , default="""toto""" , choices=["""titi""", """toto""", 42] , type=make_choice_type_function(["""titi""", """toto""", 42] ) , )
self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = parser.parse_args([] )
self.assertEqual(args.foo , """toto""" )
snake_case_ = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
snake_case_ = parser.parse_args(["""--foo""", """titi"""] )
self.assertEqual(args.foo , """titi""" )
snake_case_ = parser.parse_args_into_dataclasses(["""--foo""", """titi"""] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
snake_case_ = parser.parse_args(["""--foo""", """42"""] )
self.assertEqual(args.foo , 42 )
snake_case_ = parser.parse_args_into_dataclasses(["""--foo""", """42"""] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def lowerCAmelCase ( self : Dict ) ->str:
"""simple docstring"""
@dataclass
class __A :
'''simple docstring'''
__lowercase: Literal["titi", "toto", 42] = "toto"
snake_case_ = HfArgumentParser(UpperCAmelCase_ )
snake_case_ = argparse.ArgumentParser()
expected.add_argument(
"""--foo""" , default="""toto""" , choices=("""titi""", """toto""", 42) , type=make_choice_type_function(["""titi""", """toto""", 42] ) , )
self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = parser.parse_args([] )
self.assertEqual(args.foo , """toto""" )
snake_case_ = parser.parse_args(["""--foo""", """titi"""] )
self.assertEqual(args.foo , """titi""" )
snake_case_ = parser.parse_args(["""--foo""", """42"""] )
self.assertEqual(args.foo , 42 )
def lowerCAmelCase ( self : Optional[int] ) ->Dict:
"""simple docstring"""
snake_case_ = HfArgumentParser(UpperCAmelCase_ )
snake_case_ = argparse.ArgumentParser()
expected.add_argument("""--foo_int""" , nargs="""+""" , default=[] , type=UpperCAmelCase_ )
expected.add_argument("""--bar_int""" , nargs="""+""" , default=[1, 2, 3] , type=UpperCAmelCase_ )
expected.add_argument("""--foo_str""" , nargs="""+""" , default=["""Hallo""", """Bonjour""", """Hello"""] , type=UpperCAmelCase_ )
expected.add_argument("""--foo_float""" , nargs="""+""" , default=[0.1, 0.2, 0.3] , type=UpperCAmelCase_ )
self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = parser.parse_args([] )
self.assertEqual(
UpperCAmelCase_ , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=["""Hallo""", """Bonjour""", """Hello"""] , foo_float=[0.1, 0.2, 0.3] ) , )
snake_case_ = parser.parse_args("""--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7""".split() )
self.assertEqual(UpperCAmelCase_ , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=["""a""", """b""", """c"""] , foo_float=[0.1, 0.7] ) )
def lowerCAmelCase ( self : Optional[int] ) ->List[Any]:
"""simple docstring"""
snake_case_ = argparse.ArgumentParser()
expected.add_argument("""--foo""" , default=UpperCAmelCase_ , type=UpperCAmelCase_ )
expected.add_argument("""--bar""" , default=UpperCAmelCase_ , type=UpperCAmelCase_ , help="""help message""" )
expected.add_argument("""--baz""" , default=UpperCAmelCase_ , type=UpperCAmelCase_ )
expected.add_argument("""--ces""" , nargs="""+""" , default=[] , type=UpperCAmelCase_ )
expected.add_argument("""--des""" , nargs="""+""" , default=[] , type=UpperCAmelCase_ )
snake_case_ = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(UpperCAmelCase_ )
for dataclass_type in dataclass_types:
snake_case_ = HfArgumentParser(UpperCAmelCase_ )
self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = parser.parse_args([] )
self.assertEqual(UpperCAmelCase_ , Namespace(foo=UpperCAmelCase_ , bar=UpperCAmelCase_ , baz=UpperCAmelCase_ , ces=[] , des=[] ) )
snake_case_ = parser.parse_args("""--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3""".split() )
self.assertEqual(UpperCAmelCase_ , Namespace(foo=12 , bar=3.14 , baz="""42""" , ces=["""a""", """b""", """c"""] , des=[1, 2, 3] ) )
def lowerCAmelCase ( self : Optional[Any] ) ->Optional[Any]:
"""simple docstring"""
snake_case_ = HfArgumentParser(UpperCAmelCase_ )
snake_case_ = argparse.ArgumentParser()
expected.add_argument("""--required_list""" , nargs="""+""" , type=UpperCAmelCase_ , required=UpperCAmelCase_ )
expected.add_argument("""--required_str""" , type=UpperCAmelCase_ , required=UpperCAmelCase_ )
expected.add_argument(
"""--required_enum""" , type=make_choice_type_function(["""titi""", """toto"""] ) , choices=["""titi""", """toto"""] , required=UpperCAmelCase_ , )
self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCAmelCase ( self : Optional[int] ) ->List[Any]:
"""simple docstring"""
snake_case_ = HfArgumentParser(UpperCAmelCase_ )
snake_case_ = argparse.ArgumentParser()
expected.add_argument("""--foo""" , type=UpperCAmelCase_ , required=UpperCAmelCase_ )
expected.add_argument(
"""--required_enum""" , type=make_choice_type_function(["""titi""", """toto"""] ) , choices=["""titi""", """toto"""] , required=UpperCAmelCase_ , )
expected.add_argument("""--opt""" , type=UpperCAmelCase_ , default=UpperCAmelCase_ )
expected.add_argument("""--baz""" , default="""toto""" , type=UpperCAmelCase_ , help="""help message""" )
expected.add_argument("""--foo_str""" , nargs="""+""" , default=["""Hallo""", """Bonjour""", """Hello"""] , type=UpperCAmelCase_ )
self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCAmelCase ( self : Dict ) ->Tuple:
"""simple docstring"""
snake_case_ = HfArgumentParser(UpperCAmelCase_ )
snake_case_ = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
}
snake_case_ = parser.parse_dict(UpperCAmelCase_ )[0]
snake_case_ = BasicExample(**UpperCAmelCase_ )
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCAmelCase ( self : Optional[Any] ) ->List[Any]:
"""simple docstring"""
snake_case_ = HfArgumentParser(UpperCAmelCase_ )
snake_case_ = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
"""extra""": 42,
}
self.assertRaises(UpperCAmelCase_ , parser.parse_dict , UpperCAmelCase_ , allow_extra_keys=UpperCAmelCase_ )
def lowerCAmelCase ( self : Any ) ->Dict:
"""simple docstring"""
snake_case_ = HfArgumentParser(UpperCAmelCase_ )
snake_case_ = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case_ = os.path.join(UpperCAmelCase_ , """temp_json""" )
os.mkdir(UpperCAmelCase_ )
with open(temp_local_path + """.json""" , """w+""" ) as f:
json.dump(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = parser.parse_yaml_file(Path(temp_local_path + """.json""" ) )[0]
snake_case_ = BasicExample(**UpperCAmelCase_ )
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCAmelCase ( self : Optional[int] ) ->List[str]:
"""simple docstring"""
snake_case_ = HfArgumentParser(UpperCAmelCase_ )
snake_case_ = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case_ = os.path.join(UpperCAmelCase_ , """temp_yaml""" )
os.mkdir(UpperCAmelCase_ )
with open(temp_local_path + """.yaml""" , """w+""" ) as f:
yaml.dump(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = parser.parse_yaml_file(Path(temp_local_path + """.yaml""" ) )[0]
snake_case_ = BasicExample(**UpperCAmelCase_ )
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCAmelCase ( self : Dict ) ->Any:
"""simple docstring"""
snake_case_ = HfArgumentParser(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
| 347 | 1 |
"""simple docstring"""
class __A :
'''simple docstring'''
def __init__( self : Dict ) ->Tuple:
"""simple docstring"""
snake_case_ = 0
snake_case_ = 0
snake_case_ = {}
def lowerCAmelCase ( self : List[str] , UpperCAmelCase_ : Union[str, Any] ) ->List[Any]:
"""simple docstring"""
if vertex not in self.adjacency:
snake_case_ = {}
self.num_vertices += 1
def lowerCAmelCase ( self : int , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : str ) ->Optional[Any]:
"""simple docstring"""
self.add_vertex(UpperCAmelCase_ )
self.add_vertex(UpperCAmelCase_ )
if head == tail:
return
snake_case_ = weight
snake_case_ = weight
def lowerCAmelCase ( self : List[str] ) ->Optional[Any]:
"""simple docstring"""
snake_case_ = self.get_edges()
for edge in edges:
snake_case_ , snake_case_ , snake_case_ = edge
edges.remove((tail, head, weight) )
for i in range(len(UpperCAmelCase_ ) ):
snake_case_ = list(edges[i] )
edges.sort(key=lambda UpperCAmelCase_ : e[2] )
for i in range(len(UpperCAmelCase_ ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
snake_case_ = edges[i][2] + 1
for edge in edges:
snake_case_ , snake_case_ , snake_case_ = edge
snake_case_ = weight
snake_case_ = weight
def __str__( self : int ) ->str:
"""simple docstring"""
snake_case_ = """"""
for tail in self.adjacency:
for head in self.adjacency[tail]:
snake_case_ = self.adjacency[head][tail]
string += F"""{head} -> {tail} == {weight}\n"""
return string.rstrip("""\n""" )
def lowerCAmelCase ( self : Dict ) ->Optional[int]:
"""simple docstring"""
snake_case_ = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def lowerCAmelCase ( self : str ) ->Optional[int]:
"""simple docstring"""
return self.adjacency.keys()
@staticmethod
def lowerCAmelCase ( UpperCAmelCase_ : str=None , UpperCAmelCase_ : Union[str, Any]=None ) ->List[str]:
"""simple docstring"""
snake_case_ = Graph()
if vertices is None:
snake_case_ = []
if edges is None:
snake_case_ = []
for vertex in vertices:
g.add_vertex(UpperCAmelCase_ )
for edge in edges:
g.add_edge(*UpperCAmelCase_ )
return g
class __A :
'''simple docstring'''
def __init__( self : Optional[Any] ) ->Dict:
"""simple docstring"""
snake_case_ = {}
snake_case_ = {}
def __len__( self : Dict ) ->Any:
"""simple docstring"""
return len(self.parent )
def lowerCAmelCase ( self : Any , UpperCAmelCase_ : str ) ->Tuple:
"""simple docstring"""
if item in self.parent:
return self.find(UpperCAmelCase_ )
snake_case_ = item
snake_case_ = 0
return item
def lowerCAmelCase ( self : List[str] , UpperCAmelCase_ : str ) ->Tuple:
"""simple docstring"""
if item not in self.parent:
return self.make_set(UpperCAmelCase_ )
if item != self.parent[item]:
snake_case_ = self.find(self.parent[item] )
return self.parent[item]
def lowerCAmelCase ( self : int , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[Any] ) ->Any:
"""simple docstring"""
snake_case_ = self.find(UpperCAmelCase_ )
snake_case_ = self.find(UpperCAmelCase_ )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
snake_case_ = roota
return roota
if self.rank[roota] < self.rank[roota]:
snake_case_ = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
snake_case_ = roota
return roota
return None
@staticmethod
def lowerCAmelCase ( UpperCAmelCase_ : Tuple ) ->List[str]:
"""simple docstring"""
snake_case_ = graph.num_vertices
snake_case_ = Graph.UnionFind()
snake_case_ = []
while num_components > 1:
snake_case_ = {}
for vertex in graph.get_vertices():
snake_case_ = -1
snake_case_ = graph.get_edges()
for edge in edges:
snake_case_ , snake_case_ , snake_case_ = edge
edges.remove((tail, head, weight) )
for edge in edges:
snake_case_ , snake_case_ , snake_case_ = edge
snake_case_ = union_find.find(UpperCAmelCase_ )
snake_case_ = union_find.find(UpperCAmelCase_ )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
snake_case_ = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
snake_case_ = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
snake_case_ , snake_case_ , snake_case_ = cheap_edge[vertex]
if union_find.find(UpperCAmelCase_ ) != union_find.find(UpperCAmelCase_ ):
union_find.union(UpperCAmelCase_ , UpperCAmelCase_ )
mst_edges.append(cheap_edge[vertex] )
snake_case_ = num_components - 1
snake_case_ = Graph.build(edges=UpperCAmelCase_ )
return mst
| 347 |
"""simple docstring"""
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
__SCREAMING_SNAKE_CASE : Any = logging.getLogger(__name__)
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False , ) -> Optional[Any]:
snake_case_ = bnb_quantization_config.load_in_abit
snake_case_ = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
"""You have a version of `bitsandbytes` that is not compatible with 8bit quantization,"""
""" make sure you have the latest version of `bitsandbytes` installed.""" )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
"""You have a version of `bitsandbytes` that is not compatible with 4bit quantization,"""
"""make sure you have the latest version of `bitsandbytes` installed.""" )
snake_case_ = []
# custom device map
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and len(device_map.keys() ) > 1:
snake_case_ = [key for key, value in device_map.items() if value in ["""disk""", """cpu"""]]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
snake_case_ = get_keys_to_not_convert(_SCREAMING_SNAKE_CASE )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(_SCREAMING_SNAKE_CASE )
snake_case_ = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
snake_case_ = []
snake_case_ = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(_SCREAMING_SNAKE_CASE )
# compatibility with peft
snake_case_ = load_in_abit
snake_case_ = load_in_abit
snake_case_ = get_parameter_device(_SCREAMING_SNAKE_CASE )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
"""It is not recommended to quantize a loaded model. """
"""The model should be instantiated under the `init_empty_weights` context manager.""" )
snake_case_ = replace_with_bnb_layers(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , modules_to_not_convert=_SCREAMING_SNAKE_CASE )
# convert param to the right dtype
snake_case_ = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
snake_case_ = name.replace(""".weight""" , """""" ).replace(""".bias""" , """""" )
snake_case_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(_SCREAMING_SNAKE_CASE ):
param.to(_SCREAMING_SNAKE_CASE )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" )
logger.info(
f"""The model device type is {model_device.type}. However, cuda is needed for quantization."""
"""We move the model to cuda.""" )
return model
elif weights_location is None:
raise RuntimeError(
f"""`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} """ )
else:
with init_empty_weights():
snake_case_ = replace_with_bnb_layers(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , modules_to_not_convert=_SCREAMING_SNAKE_CASE )
snake_case_ = get_quantized_model_device_map(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , max_memory=_SCREAMING_SNAKE_CASE , no_split_module_classes=_SCREAMING_SNAKE_CASE , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
snake_case_ = True
snake_case_ = any(x in list(device_map.values() ) for x in ["""cpu""", """disk"""] )
load_checkpoint_in_model(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , dtype=bnb_quantization_config.torch_dtype , offload_folder=_SCREAMING_SNAKE_CASE , offload_state_dict=_SCREAMING_SNAKE_CASE , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(_SCREAMING_SNAKE_CASE , device_map=_SCREAMING_SNAKE_CASE , offload_dir=_SCREAMING_SNAKE_CASE )
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> Tuple:
if device_map is None:
if torch.cuda.is_available():
snake_case_ = {"""""": torch.cuda.current_device()}
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" )
logger.info("""The device_map was not initialized.""" """Setting device_map to `{'':torch.cuda.current_device()}`.""" )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
"""If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or """
"""'sequential'.""" )
snake_case_ = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
snake_case_ = {}
snake_case_ = special_dtypes
snake_case_ = no_split_module_classes
snake_case_ = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
snake_case_ = get_balanced_memory(
_SCREAMING_SNAKE_CASE , low_zero=(device_map == """balanced_low_0""") , max_memory=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
snake_case_ = max_memory
snake_case_ = infer_auto_device_map(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
# check if don't have any quantized module on the cpu
snake_case_ = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
snake_case_ = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
"""
Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit
the quantized model. If you want to dispatch the model on the CPU or the disk while keeping
these modules in `torch_dtype`, you need to pass a custom `device_map` to
`load_and_quantize_model`. Check
https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk
for more details.
""" )
else:
logger.info(
"""Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit""" )
del device_map_without_some_modules
return device_map
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> Tuple:
if modules_to_not_convert is None:
snake_case_ = []
snake_case_ , snake_case_ = _replace_with_bnb_layers(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if not has_been_replaced:
logger.warning(
"""You are loading your model in 8bit or 4bit but no linear modules were found in your model."""
""" this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers."""
""" Please double check your model architecture, or submit an issue on github if you think this is"""
""" a bug.""" )
return model
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , ) -> List[Any]:
snake_case_ = False
for name, module in model.named_children():
if current_key_name is None:
snake_case_ = []
current_key_name.append(_SCREAMING_SNAKE_CASE )
if isinstance(_SCREAMING_SNAKE_CASE , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
snake_case_ = """.""".join(_SCREAMING_SNAKE_CASE )
snake_case_ = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
snake_case_ = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
snake_case_ = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=_SCREAMING_SNAKE_CASE , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
snake_case_ = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError("""load_in_8bit and load_in_4bit can't be both False""" )
snake_case_ = module.weight.data
if module.bias is not None:
snake_case_ = module.bias.data
bnb_module.requires_grad_(_SCREAMING_SNAKE_CASE )
setattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case_ = True
if len(list(module.children() ) ) > 0:
snake_case_ , snake_case_ = _replace_with_bnb_layers(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case_ = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def _a ( _SCREAMING_SNAKE_CASE ) -> Any:
# Create a copy of the model
with init_empty_weights():
snake_case_ = deepcopy(_SCREAMING_SNAKE_CASE ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
snake_case_ = find_tied_parameters(_SCREAMING_SNAKE_CASE )
# For compatibility with Accelerate < 0.18
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
snake_case_ = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
snake_case_ = sum(_SCREAMING_SNAKE_CASE , [] )
snake_case_ = len(_SCREAMING_SNAKE_CASE ) > 0
# Check if it is a base model
snake_case_ = False
if hasattr(_SCREAMING_SNAKE_CASE , """base_model_prefix""" ):
snake_case_ = not hasattr(_SCREAMING_SNAKE_CASE , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
snake_case_ = list(model.named_children() )
snake_case_ = [list_modules[-1][0]]
# add last module together with tied weights
snake_case_ = set(_SCREAMING_SNAKE_CASE ) - set(_SCREAMING_SNAKE_CASE )
snake_case_ = list(set(_SCREAMING_SNAKE_CASE ) ) + list(_SCREAMING_SNAKE_CASE )
# remove ".weight" from the keys
snake_case_ = [""".weight""", """.bias"""]
snake_case_ = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
snake_case_ = name.replace(_SCREAMING_SNAKE_CASE , """""" )
filtered_module_names.append(_SCREAMING_SNAKE_CASE )
return filtered_module_names
def _a ( _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
for m in model.modules():
if isinstance(_SCREAMING_SNAKE_CASE , bnb.nn.Linearabit ):
return True
return False
def _a ( _SCREAMING_SNAKE_CASE ) -> Optional[int]:
return next(parameter.parameters() ).device
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
# if it is not quantized, we quantize and offload the quantized weights and the SCB stats
if fpaa_statistics is None:
set_module_tensor_to_device(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 0 , dtype=_SCREAMING_SNAKE_CASE , value=_SCREAMING_SNAKE_CASE )
snake_case_ = param_name
snake_case_ = model
if "." in tensor_name:
snake_case_ = tensor_name.split(""".""" )
for split in splits[:-1]:
snake_case_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if new_module is None:
raise ValueError(f"""{module} has no attribute {split}.""" )
snake_case_ = new_module
snake_case_ = splits[-1]
# offload weights
snake_case_ = False
offload_weight(module._parameters[tensor_name] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE )
if hasattr(module._parameters[tensor_name] , """SCB""" ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace("""weight""" , """SCB""" ) , _SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE , )
else:
offload_weight(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE )
offload_weight(_SCREAMING_SNAKE_CASE , param_name.replace("""weight""" , """SCB""" ) , _SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE )
set_module_tensor_to_device(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , """meta""" , dtype=_SCREAMING_SNAKE_CASE , value=torch.empty(*param.size() ) )
| 347 | 1 |
"""simple docstring"""
from __future__ import annotations
__SCREAMING_SNAKE_CASE : Dict = tuple[int, int, int]
__SCREAMING_SNAKE_CASE : Optional[int] = tuple[str, str, str]
# used alphabet --------------------------
# from string.ascii_uppercase
__SCREAMING_SNAKE_CASE : List[Any] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
# -------------------------- default selection --------------------------
# rotors --------------------------
__SCREAMING_SNAKE_CASE : int = 'EGZWVONAHDCLFQMSIPJBYUKXTR'
__SCREAMING_SNAKE_CASE : Any = 'FOBHMDKEXQNRAULPGSJVTYICZW'
__SCREAMING_SNAKE_CASE : str = 'ZJXESIUQLHAVRMDOYGTNFWPBKC'
# reflector --------------------------
__SCREAMING_SNAKE_CASE : int = {
'A': 'N',
'N': 'A',
'B': 'O',
'O': 'B',
'C': 'P',
'P': 'C',
'D': 'Q',
'Q': 'D',
'E': 'R',
'R': 'E',
'F': 'S',
'S': 'F',
'G': 'T',
'T': 'G',
'H': 'U',
'U': 'H',
'I': 'V',
'V': 'I',
'J': 'W',
'W': 'J',
'K': 'X',
'X': 'K',
'L': 'Y',
'Y': 'L',
'M': 'Z',
'Z': 'M',
}
# -------------------------- extra rotors --------------------------
__SCREAMING_SNAKE_CASE : Optional[Any] = 'RMDJXFUWGISLHVTCQNKYPBEZOA'
__SCREAMING_SNAKE_CASE : Union[str, Any] = 'SGLCPQWZHKXAREONTFBVIYJUDM'
__SCREAMING_SNAKE_CASE : Any = 'HVSICLTYKQUBXDWAJZOMFGPREN'
__SCREAMING_SNAKE_CASE : Optional[Any] = 'RZWQHFMVDBKICJLNTUXAGYPSOE'
__SCREAMING_SNAKE_CASE : List[Any] = 'LFKIJODBEGAMQPXVUHYSTCZRWN'
__SCREAMING_SNAKE_CASE : Tuple = 'KOAEGVDHXPQZMLFTYWJNBRCIUS'
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> tuple[RotorPositionT, RotorSelectionT, dict[str, str]]:
# Checks if there are 3 unique rotors
if (unique_rotsel := len(set(_SCREAMING_SNAKE_CASE ) )) < 3:
snake_case_ = f"""Please use 3 unique rotors (not {unique_rotsel})"""
raise Exception(_SCREAMING_SNAKE_CASE )
# Checks if rotor positions are valid
snake_case_ , snake_case_ , snake_case_ = rotpos
if not 0 < rotorposa <= len(_SCREAMING_SNAKE_CASE ):
snake_case_ = f"""First rotor position is not within range of 1..26 ({rotorposa}"""
raise ValueError(_SCREAMING_SNAKE_CASE )
if not 0 < rotorposa <= len(_SCREAMING_SNAKE_CASE ):
snake_case_ = f"""Second rotor position is not within range of 1..26 ({rotorposa})"""
raise ValueError(_SCREAMING_SNAKE_CASE )
if not 0 < rotorposa <= len(_SCREAMING_SNAKE_CASE ):
snake_case_ = f"""Third rotor position is not within range of 1..26 ({rotorposa})"""
raise ValueError(_SCREAMING_SNAKE_CASE )
# Validates string and returns dict
snake_case_ = _plugboard(_SCREAMING_SNAKE_CASE )
return rotpos, rotsel, pbdict
def _a ( _SCREAMING_SNAKE_CASE ) -> dict[str, str]:
# tests the input string if it
# a) is type string
# b) has even length (so pairs can be made)
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
snake_case_ = f"""Plugboard setting isn't type string ({type(_SCREAMING_SNAKE_CASE )})"""
raise TypeError(_SCREAMING_SNAKE_CASE )
elif len(_SCREAMING_SNAKE_CASE ) % 2 != 0:
snake_case_ = f"""Odd number of symbols ({len(_SCREAMING_SNAKE_CASE )})"""
raise Exception(_SCREAMING_SNAKE_CASE )
elif pbstring == "":
return {}
pbstring.replace(""" """ , """""" )
# Checks if all characters are unique
snake_case_ = set()
for i in pbstring:
if i not in abc:
snake_case_ = f"""'{i}' not in list of symbols"""
raise Exception(_SCREAMING_SNAKE_CASE )
elif i in tmppbl:
snake_case_ = f"""Duplicate symbol ({i})"""
raise Exception(_SCREAMING_SNAKE_CASE )
else:
tmppbl.add(_SCREAMING_SNAKE_CASE )
del tmppbl
# Created the dictionary
snake_case_ = {}
for j in range(0 , len(_SCREAMING_SNAKE_CASE ) - 1 , 2 ):
snake_case_ = pbstring[j + 1]
snake_case_ = pbstring[j]
return pb
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = (rotora, rotora, rotora) , _SCREAMING_SNAKE_CASE = "" , ) -> str:
snake_case_ = text.upper()
snake_case_ , snake_case_ , snake_case_ = _validator(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , plugb.upper() )
snake_case_ , snake_case_ , snake_case_ = rotor_position
snake_case_ , snake_case_ , snake_case_ = rotor_selection
rotorposa -= 1
rotorposa -= 1
rotorposa -= 1
snake_case_ = []
# encryption/decryption process --------------------------
for symbol in text:
if symbol in abc:
# 1st plugboard --------------------------
if symbol in plugboard:
snake_case_ = plugboard[symbol]
# rotor ra --------------------------
snake_case_ = abc.index(_SCREAMING_SNAKE_CASE ) + rotorposa
snake_case_ = rotora[index % len(_SCREAMING_SNAKE_CASE )]
# rotor rb --------------------------
snake_case_ = abc.index(_SCREAMING_SNAKE_CASE ) + rotorposa
snake_case_ = rotora[index % len(_SCREAMING_SNAKE_CASE )]
# rotor rc --------------------------
snake_case_ = abc.index(_SCREAMING_SNAKE_CASE ) + rotorposa
snake_case_ = rotora[index % len(_SCREAMING_SNAKE_CASE )]
# reflector --------------------------
# this is the reason you don't need another machine to decipher
snake_case_ = reflector[symbol]
# 2nd rotors
snake_case_ = abc[rotora.index(_SCREAMING_SNAKE_CASE ) - rotorposa]
snake_case_ = abc[rotora.index(_SCREAMING_SNAKE_CASE ) - rotorposa]
snake_case_ = abc[rotora.index(_SCREAMING_SNAKE_CASE ) - rotorposa]
# 2nd plugboard
if symbol in plugboard:
snake_case_ = plugboard[symbol]
# moves/resets rotor positions
rotorposa += 1
if rotorposa >= len(_SCREAMING_SNAKE_CASE ):
snake_case_ = 0
rotorposa += 1
if rotorposa >= len(_SCREAMING_SNAKE_CASE ):
snake_case_ = 0
rotorposa += 1
if rotorposa >= len(_SCREAMING_SNAKE_CASE ):
snake_case_ = 0
# else:
# pass
# Error could be also raised
# raise ValueError(
# 'Invalid symbol('+repr(symbol)+')')
result.append(_SCREAMING_SNAKE_CASE )
return "".join(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Any = 'This is my Python script that emulates the Enigma machine from WWII.'
__SCREAMING_SNAKE_CASE : Dict = (1, 1, 1)
__SCREAMING_SNAKE_CASE : Dict = 'pictures'
__SCREAMING_SNAKE_CASE : str = (rotora, rotora, rotora)
__SCREAMING_SNAKE_CASE : Any = enigma(message, rotor_pos, rotor_sel, pb)
print('Encrypted message:', en)
print('Decrypted message:', enigma(en, rotor_pos, rotor_sel, pb))
| 347 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Tuple = {
'microsoft/beit-base-patch16-224-pt22k': (
'https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json'
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class __A (snake_case__):
'''simple docstring'''
__lowercase: Optional[int] = """beit"""
def __init__( self : List[str] , UpperCAmelCase_ : List[Any]=8_192 , UpperCAmelCase_ : Dict=768 , UpperCAmelCase_ : int=12 , UpperCAmelCase_ : Tuple=12 , UpperCAmelCase_ : List[Any]=3_072 , UpperCAmelCase_ : Tuple="gelu" , UpperCAmelCase_ : Dict=0.0 , UpperCAmelCase_ : List[str]=0.0 , UpperCAmelCase_ : Any=0.02 , UpperCAmelCase_ : Optional[Any]=1E-12 , UpperCAmelCase_ : int=224 , UpperCAmelCase_ : Tuple=16 , UpperCAmelCase_ : List[str]=3 , UpperCAmelCase_ : int=False , UpperCAmelCase_ : List[str]=False , UpperCAmelCase_ : Tuple=False , UpperCAmelCase_ : int=False , UpperCAmelCase_ : List[Any]=0.1 , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : Dict=[3, 5, 7, 11] , UpperCAmelCase_ : Tuple=[1, 2, 3, 6] , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : List[Any]=0.4 , UpperCAmelCase_ : Optional[Any]=256 , UpperCAmelCase_ : Optional[Any]=1 , UpperCAmelCase_ : int=False , UpperCAmelCase_ : Tuple=255 , **UpperCAmelCase_ : List[str] , ) ->Optional[Any]:
"""simple docstring"""
super().__init__(**UpperCAmelCase_ )
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = initializer_range
snake_case_ = layer_norm_eps
snake_case_ = image_size
snake_case_ = patch_size
snake_case_ = num_channels
snake_case_ = use_mask_token
snake_case_ = use_absolute_position_embeddings
snake_case_ = use_relative_position_bias
snake_case_ = use_shared_relative_position_bias
snake_case_ = layer_scale_init_value
snake_case_ = drop_path_rate
snake_case_ = use_mean_pooling
# decode head attributes (semantic segmentation)
snake_case_ = out_indices
snake_case_ = pool_scales
# auxiliary head attributes (semantic segmentation)
snake_case_ = use_auxiliary_head
snake_case_ = auxiliary_loss_weight
snake_case_ = auxiliary_channels
snake_case_ = auxiliary_num_convs
snake_case_ = auxiliary_concat_input
snake_case_ = semantic_loss_ignore_index
class __A (snake_case__):
'''simple docstring'''
__lowercase: List[Any] = version.parse("""1.11""")
@property
def lowerCAmelCase ( self : Dict ) ->Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCAmelCase ( self : Any ) ->float:
"""simple docstring"""
return 1E-4
| 347 | 1 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class __A (snake_case__):
'''simple docstring'''
__lowercase: List[Any] = """dandelin/vilt-b32-finetuned-vqa"""
__lowercase: List[Any] = (
"""This is a tool that answers a question about an image. It takes an input named `image` which should be the """
"""image containing the information, as well as a `question` which should be the question in English. It """
"""returns a text that is the answer to the question."""
)
__lowercase: Optional[Any] = """image_qa"""
__lowercase: Optional[Any] = AutoProcessor
__lowercase: Any = AutoModelForVisualQuestionAnswering
__lowercase: Union[str, Any] = ["""image""", """text"""]
__lowercase: int = ["""text"""]
def __init__( self : str , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : int ) ->Any:
"""simple docstring"""
requires_backends(self , ["""vision"""] )
super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_ )
def lowerCAmelCase ( self : Tuple , UpperCAmelCase_ : "Image" , UpperCAmelCase_ : str ) ->Optional[Any]:
"""simple docstring"""
return self.pre_processor(UpperCAmelCase_ , UpperCAmelCase_ , return_tensors="""pt""" )
def lowerCAmelCase ( self : Optional[Any] , UpperCAmelCase_ : Tuple ) ->Optional[int]:
"""simple docstring"""
with torch.no_grad():
return self.model(**UpperCAmelCase_ ).logits
def lowerCAmelCase ( self : List[Any] , UpperCAmelCase_ : str ) ->Any:
"""simple docstring"""
snake_case_ = outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx]
| 347 |
"""simple docstring"""
import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
__SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : List[Any] = {'vocab_file': 'spiece.model'}
__SCREAMING_SNAKE_CASE : int = {
'vocab_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/spiece.model',
't5-base': 'https://huggingface.co/t5-base/resolve/main/spiece.model',
't5-large': 'https://huggingface.co/t5-large/resolve/main/spiece.model',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/spiece.model',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/spiece.model',
}
}
# TODO(PVP) - this should be removed in Transformers v5
__SCREAMING_SNAKE_CASE : Dict = {
't5-small': 512,
't5-base': 512,
't5-large': 512,
't5-3b': 512,
't5-11b': 512,
}
__SCREAMING_SNAKE_CASE : Optional[int] = '▁'
class __A (snake_case__):
'''simple docstring'''
__lowercase: Optional[int] = VOCAB_FILES_NAMES
__lowercase: Any = PRETRAINED_VOCAB_FILES_MAP
__lowercase: Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase: List[str] = ["""input_ids""", """attention_mask"""]
def __init__( self : Optional[int] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any]="</s>" , UpperCAmelCase_ : Optional[Any]="<unk>" , UpperCAmelCase_ : Any="<pad>" , UpperCAmelCase_ : Tuple=100 , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : Optional[Dict[str, Any]] = None , UpperCAmelCase_ : Optional[int]=True , **UpperCAmelCase_ : Dict , ) ->None:
"""simple docstring"""
if extra_ids > 0 and additional_special_tokens is None:
snake_case_ = [F"""<extra_id_{i}>""" for i in range(UpperCAmelCase_ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
snake_case_ = len(set(filter(lambda UpperCAmelCase_ : bool("""extra_id""" in str(UpperCAmelCase_ ) ) , UpperCAmelCase_ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F"""Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"""
""" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"""
""" tokens""" )
if legacy:
logger.warning_once(
F"""You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to"""
""" read the related pull request available at https://github.com/huggingface/transformers/pull/24565""" )
snake_case_ = legacy
snake_case_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , extra_ids=UpperCAmelCase_ , additional_special_tokens=UpperCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , legacy=UpperCAmelCase_ , **UpperCAmelCase_ , )
snake_case_ = vocab_file
snake_case_ = extra_ids
snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCAmelCase_ )
@staticmethod
def lowerCAmelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[Any] ) ->Union[str, Any]:
"""simple docstring"""
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
snake_case_ = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"""This tokenizer was incorrectly instantiated with a model max length of"""
F""" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this"""
""" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"""
""" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"""
F""" {pretrained_model_name_or_path} automatically truncating your input to"""
F""" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences"""
F""" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with"""
""" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"""
""" instantiate this tokenizer with `model_max_length` set to your preferred value.""" , UpperCAmelCase_ , )
return max_model_length
@property
def lowerCAmelCase ( self : Optional[Any] ) ->Optional[Any]:
"""simple docstring"""
return self.sp_model.get_piece_size() + self._extra_ids
def lowerCAmelCase ( self : Any ) ->Optional[int]:
"""simple docstring"""
snake_case_ = {self.convert_ids_to_tokens(UpperCAmelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCAmelCase ( self : List[str] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None , UpperCAmelCase_ : bool = False ) ->List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase_ , token_ids_a=UpperCAmelCase_ , already_has_special_tokens=UpperCAmelCase_ )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(UpperCAmelCase_ )) + [1]
return ([0] * len(UpperCAmelCase_ )) + [1] + ([0] * len(UpperCAmelCase_ )) + [1]
def lowerCAmelCase ( self : Any ) ->List[str]:
"""simple docstring"""
return list(
set(filter(lambda UpperCAmelCase_ : bool(re.search(R"""<extra_id_\d+>""" , UpperCAmelCase_ ) ) is not None , self.additional_special_tokens ) ) )
def lowerCAmelCase ( self : Dict ) ->str:
"""simple docstring"""
return [self._convert_token_to_id(UpperCAmelCase_ ) for token in self.get_sentinel_tokens()]
def lowerCAmelCase ( self : Dict , UpperCAmelCase_ : List[int] ) ->List[int]:
"""simple docstring"""
if len(UpperCAmelCase_ ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
F"""This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated"""
""" eos tokens being added.""" )
return token_ids
else:
return token_ids + [self.eos_token_id]
def lowerCAmelCase ( self : str , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None ) ->List[int]:
"""simple docstring"""
snake_case_ = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def lowerCAmelCase ( self : Optional[int] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None ) ->List[int]:
"""simple docstring"""
snake_case_ = self._add_eos_if_not_present(UpperCAmelCase_ )
if token_ids_a is None:
return token_ids_a
else:
snake_case_ = self._add_eos_if_not_present(UpperCAmelCase_ )
return token_ids_a + token_ids_a
def __getstate__( self : Optional[Any] ) ->Tuple:
"""simple docstring"""
snake_case_ = self.__dict__.copy()
snake_case_ = None
return state
def __setstate__( self : Optional[Any] , UpperCAmelCase_ : List[Any] ) ->List[Any]:
"""simple docstring"""
snake_case_ = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
snake_case_ = {}
snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCAmelCase ( self : int , UpperCAmelCase_ : "TextInput" , **UpperCAmelCase_ : Tuple ) ->List[str]:
"""simple docstring"""
if not self.legacy:
snake_case_ = SPIECE_UNDERLINE + text.replace(UpperCAmelCase_ , """ """ )
return super().tokenize(UpperCAmelCase_ , **UpperCAmelCase_ )
def lowerCAmelCase ( self : Dict , UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Any ) ->Tuple:
"""simple docstring"""
if not self.legacy:
snake_case_ = text.startswith(UpperCAmelCase_ )
if is_first:
snake_case_ = text[1:]
snake_case_ = self.sp_model.encode(UpperCAmelCase_ , out_type=UpperCAmelCase_ )
if not self.legacy and not is_first and not text.startswith(""" """ ) and tokens[0].startswith(UpperCAmelCase_ ):
snake_case_ = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:]
return tokens
def lowerCAmelCase ( self : List[str] , UpperCAmelCase_ : List[Any] ) ->Tuple:
"""simple docstring"""
if token.startswith("""<extra_id_""" ):
snake_case_ = re.match(R"""<extra_id_(\d+)>""" , UpperCAmelCase_ )
snake_case_ = int(match.group(1 ) )
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(UpperCAmelCase_ )
def lowerCAmelCase ( self : List[str] , UpperCAmelCase_ : Optional[Any] ) ->List[Any]:
"""simple docstring"""
if index < self.sp_model.get_piece_size():
snake_case_ = self.sp_model.IdToPiece(UpperCAmelCase_ )
else:
snake_case_ = F"""<extra_id_{self.vocab_size - 1 - index}>"""
return token
def lowerCAmelCase ( self : List[Any] , UpperCAmelCase_ : List[str] ) ->Optional[Any]:
"""simple docstring"""
snake_case_ = []
snake_case_ = """"""
snake_case_ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(UpperCAmelCase_ ) + token
snake_case_ = True
snake_case_ = []
else:
current_sub_tokens.append(UpperCAmelCase_ )
snake_case_ = False
out_string += self.sp_model.decode(UpperCAmelCase_ )
return out_string.strip()
def lowerCAmelCase ( self : str , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None ) ->Tuple[str]:
"""simple docstring"""
if not os.path.isdir(UpperCAmelCase_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case_ = os.path.join(
UpperCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCAmelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCAmelCase_ , """wb""" ) as fi:
snake_case_ = self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase_ )
return (out_vocab_file,)
| 347 | 1 |
"""simple docstring"""
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
__SCREAMING_SNAKE_CASE : List[str] = get_tests_dir('fixtures/test_sentencepiece_bpe.model')
class __A (snake_case__ , unittest.TestCase):
'''simple docstring'''
__lowercase: Dict = BartphoTokenizer
__lowercase: Dict = False
__lowercase: Optional[int] = True
def lowerCAmelCase ( self : str ) ->Any:
"""simple docstring"""
super().setUp()
snake_case_ = ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""]
snake_case_ = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_ ) ) ) )
snake_case_ = {"""unk_token""": """<unk>"""}
snake_case_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""monolingual_vocab_file"""] )
with open(self.monolingual_vocab_file , """w""" , encoding="""utf-8""" ) as fp:
for token in vocab_tokens:
fp.write(F"""{token} {vocab_tokens[token]}\n""" )
snake_case_ = BartphoTokenizer(UpperCAmelCase_ , self.monolingual_vocab_file , **self.special_tokens_map )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase ( self : str , **UpperCAmelCase_ : Optional[int] ) ->Any:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return BartphoTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_ )
def lowerCAmelCase ( self : Any , UpperCAmelCase_ : Optional[Any] ) ->Optional[Any]:
"""simple docstring"""
snake_case_ = """This is a là test"""
snake_case_ = """This is a<unk><unk> test"""
return input_text, output_text
def lowerCAmelCase ( self : Optional[int] ) ->int:
"""simple docstring"""
snake_case_ = BartphoTokenizer(UpperCAmelCase_ , self.monolingual_vocab_file , **self.special_tokens_map )
snake_case_ = """This is a là test"""
snake_case_ = """▁This ▁is ▁a ▁l à ▁t est""".split()
snake_case_ = tokenizer.tokenize(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = tokens + [tokenizer.unk_token]
snake_case_ = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , UpperCAmelCase_ )
| 347 |
"""simple docstring"""
def _a ( _SCREAMING_SNAKE_CASE = 1_000_000 ) -> int:
snake_case_ = [i - 1 for i in range(limit + 1 )]
for i in range(2 , limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i , limit + 1 , _SCREAMING_SNAKE_CASE ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 347 | 1 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Tuple = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'adapter_layer': 'encoder.layers.*.adapter_layer',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
'pooling_layer.linear': 'projector',
'pooling_layer.projection': 'classifier',
}
__SCREAMING_SNAKE_CASE : List[Any] = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
'projector',
'classifier',
]
def _a ( _SCREAMING_SNAKE_CASE ) -> List[str]:
snake_case_ = {}
with open(_SCREAMING_SNAKE_CASE , """r""" ) as file:
for line_number, line in enumerate(_SCREAMING_SNAKE_CASE ):
snake_case_ = line.strip()
if line:
snake_case_ = line.split()
snake_case_ = line_number
snake_case_ = words[0]
snake_case_ = value
return result
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
for attribute in key.split(""".""" ):
snake_case_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case_ = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(_SCREAMING_SNAKE_CASE ):
snake_case_ = PARAM_MAPPING[full_name.split(""".""" )[-1]]
snake_case_ = """param"""
if weight_type is not None and weight_type != "param":
snake_case_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).shape
elif weight_type is not None and weight_type == "param":
snake_case_ = hf_pointer
for attribute in hf_param_name.split(""".""" ):
snake_case_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case_ = shape_pointer.shape
# let's reduce dimension
snake_case_ = value[0]
else:
snake_case_ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
snake_case_ = value
elif weight_type == "weight_g":
snake_case_ = value
elif weight_type == "weight_v":
snake_case_ = value
elif weight_type == "bias":
snake_case_ = value
elif weight_type == "param":
for attribute in hf_param_name.split(""".""" ):
snake_case_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case_ = value
else:
snake_case_ = value
logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
snake_case_ = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(_SCREAMING_SNAKE_CASE ):
snake_case_ = PARAM_MAPPING[full_name.split(""".""" )[-1]]
snake_case_ = """param"""
if weight_type is not None and weight_type != "param":
snake_case_ = """.""".join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
snake_case_ = """.""".join([key, hf_param_name] )
else:
snake_case_ = key
snake_case_ = value if """lm_head""" in full_key else value[0]
__SCREAMING_SNAKE_CASE : int = {
'W_a': 'linear_1.weight',
'W_b': 'linear_2.weight',
'b_a': 'linear_1.bias',
'b_b': 'linear_2.bias',
'ln_W': 'norm.weight',
'ln_b': 'norm.bias',
}
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> List[str]:
snake_case_ = False
for key, mapped_key in MAPPING.items():
snake_case_ = """wav2vec2.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
snake_case_ = True
if "*" in mapped_key:
snake_case_ = name.split(_SCREAMING_SNAKE_CASE )[0].split(""".""" )[-2]
snake_case_ = mapped_key.replace("""*""" , _SCREAMING_SNAKE_CASE )
if "weight_g" in name:
snake_case_ = """weight_g"""
elif "weight_v" in name:
snake_case_ = """weight_v"""
elif "bias" in name:
snake_case_ = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
snake_case_ = """weight"""
else:
snake_case_ = None
if hf_dict is not None:
rename_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
set_recursively(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return is_used
return is_used
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
snake_case_ = []
snake_case_ = fairseq_model.state_dict()
snake_case_ = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
snake_case_ = False
if "conv_layers" in name:
load_conv_layer(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == """group""" , )
snake_case_ = True
else:
snake_case_ = load_wavaveca_layer(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if not is_used:
unused_weights.append(_SCREAMING_SNAKE_CASE )
logger.warning(f"""Unused weights: {unused_weights}""" )
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
snake_case_ = full_name.split("""conv_layers.""" )[-1]
snake_case_ = name.split(""".""" )
snake_case_ = int(items[0] )
snake_case_ = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
snake_case_ = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
snake_case_ = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
snake_case_ = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
snake_case_ = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_SCREAMING_SNAKE_CASE )
@torch.no_grad()
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False ) -> int:
if config_path is not None:
snake_case_ = WavaVecaConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
else:
snake_case_ = WavaVecaConfig()
if is_seq_class:
snake_case_ = read_txt_into_dict(_SCREAMING_SNAKE_CASE )
snake_case_ = idalabel
snake_case_ = WavaVecaForSequenceClassification(_SCREAMING_SNAKE_CASE )
snake_case_ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , )
feature_extractor.save_pretrained(_SCREAMING_SNAKE_CASE )
elif is_finetuned:
if dict_path:
snake_case_ = Dictionary.load(_SCREAMING_SNAKE_CASE )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
snake_case_ = target_dict.pad_index
snake_case_ = target_dict.bos_index
snake_case_ = target_dict.eos_index
snake_case_ = len(target_dict.symbols )
snake_case_ = os.path.join(_SCREAMING_SNAKE_CASE , """vocab.json""" )
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(_SCREAMING_SNAKE_CASE ) )
return
os.makedirs(_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE )
snake_case_ = target_dict.indices
# fairseq has the <pad> and <s> switched
snake_case_ = 0
snake_case_ = 1
with open(_SCREAMING_SNAKE_CASE , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case_ = WavaVecaCTCTokenizer(
_SCREAMING_SNAKE_CASE , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=_SCREAMING_SNAKE_CASE , )
snake_case_ = True if config.feat_extract_norm == """layer""" else False
snake_case_ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , )
snake_case_ = WavaVecaProcessor(feature_extractor=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE )
processor.save_pretrained(_SCREAMING_SNAKE_CASE )
snake_case_ = WavaVecaForCTC(_SCREAMING_SNAKE_CASE )
else:
snake_case_ = WavaVecaForPreTraining(_SCREAMING_SNAKE_CASE )
if is_finetuned or is_seq_class:
snake_case_ , snake_case_ , snake_case_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
snake_case_ = argparse.Namespace(task="""audio_pretraining""" )
snake_case_ = fairseq.tasks.setup_task(_SCREAMING_SNAKE_CASE )
snake_case_ , snake_case_ , snake_case_ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=_SCREAMING_SNAKE_CASE )
snake_case_ = model[0].eval()
recursively_load_weights(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , not is_finetuned )
hf_wavavec.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : str = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
parser.add_argument(
'--is_seq_class',
action='store_true',
help='Whether the model to convert is a fine-tuned sequence classification model or not',
)
__SCREAMING_SNAKE_CASE : Any = parser.parse_args()
__SCREAMING_SNAKE_CASE : List[Any] = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 347 |
"""simple docstring"""
from __future__ import annotations
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]:
print(f"""Vertex\tShortest Distance from vertex {src}""" )
for i, d in enumerate(_SCREAMING_SNAKE_CASE ):
print(f"""{i}\t\t{d}""" )
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
for j in range(_SCREAMING_SNAKE_CASE ):
snake_case_ , snake_case_ , snake_case_ = (graph[j][k] for k in ["""src""", """dst""", """weight"""])
if distance[u] != float("""inf""" ) and distance[u] + w < distance[v]:
return True
return False
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> list[float]:
snake_case_ = [float("""inf""" )] * vertex_count
snake_case_ = 0.0
for _ in range(vertex_count - 1 ):
for j in range(_SCREAMING_SNAKE_CASE ):
snake_case_ , snake_case_ , snake_case_ = (graph[j][k] for k in ["""src""", """dst""", """weight"""])
if distance[u] != float("""inf""" ) and distance[u] + w < distance[v]:
snake_case_ = distance[u] + w
snake_case_ = check_negative_cycle(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if negative_cycle_exists:
raise Exception("""Negative cycle found""" )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
__SCREAMING_SNAKE_CASE : int = int(input('Enter number of vertices: ').strip())
__SCREAMING_SNAKE_CASE : Dict = int(input('Enter number of edges: ').strip())
__SCREAMING_SNAKE_CASE : list[dict[str, int]] = [{} for _ in range(E)]
for i in range(E):
print('Edge ', i + 1)
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = (
int(x)
for x in input('Enter source, destination, weight: ').strip().split(' ')
)
__SCREAMING_SNAKE_CASE : Union[str, Any] = {'src': src, 'dst': dest, 'weight': weight}
__SCREAMING_SNAKE_CASE : Union[str, Any] = int(input('\nEnter shortest path source:').strip())
__SCREAMING_SNAKE_CASE : str = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 347 | 1 |
"""simple docstring"""
from __future__ import annotations
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> bool:
snake_case_ = get_failure_array(_SCREAMING_SNAKE_CASE )
# 2) Step through text searching for pattern
snake_case_ , snake_case_ = 0, 0 # index into text, pattern
while i < len(_SCREAMING_SNAKE_CASE ):
if pattern[j] == text[i]:
if j == (len(_SCREAMING_SNAKE_CASE ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
snake_case_ = failure[j - 1]
continue
i += 1
return False
def _a ( _SCREAMING_SNAKE_CASE ) -> list[int]:
snake_case_ = [0]
snake_case_ = 0
snake_case_ = 1
while j < len(_SCREAMING_SNAKE_CASE ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
snake_case_ = failure[i - 1]
continue
j += 1
failure.append(_SCREAMING_SNAKE_CASE )
return failure
if __name__ == "__main__":
# Test 1)
__SCREAMING_SNAKE_CASE : Optional[int] = 'abc1abc12'
__SCREAMING_SNAKE_CASE : Optional[int] = 'alskfjaldsabc1abc1abc12k23adsfabcabc'
__SCREAMING_SNAKE_CASE : List[str] = 'alskfjaldsk23adsfabcabc'
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
__SCREAMING_SNAKE_CASE : int = 'ABABX'
__SCREAMING_SNAKE_CASE : Optional[Any] = 'ABABZABABYABABX'
assert kmp(pattern, text)
# Test 3)
__SCREAMING_SNAKE_CASE : Any = 'AAAB'
__SCREAMING_SNAKE_CASE : List[Any] = 'ABAAAAAB'
assert kmp(pattern, text)
# Test 4)
__SCREAMING_SNAKE_CASE : Optional[int] = 'abcdabcy'
__SCREAMING_SNAKE_CASE : str = 'abcxabcdabxabcdabcdabcy'
assert kmp(pattern, text)
# Test 5)
__SCREAMING_SNAKE_CASE : Any = 'aabaabaaa'
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 347 |
"""simple docstring"""
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
__SCREAMING_SNAKE_CASE : List[str] = logging.getLogger(__name__)
__SCREAMING_SNAKE_CASE : str = tf.data.AUTOTUNE
def _a ( ) -> List[str]:
snake_case_ = argparse.ArgumentParser(description="""Train a masked language model on TPU.""" )
parser.add_argument(
"""--pretrained_model_config""" , type=_SCREAMING_SNAKE_CASE , default="""roberta-base""" , help="""The model config to use. Note that we don't copy the model's weights, only the config!""" , )
parser.add_argument(
"""--tokenizer""" , type=_SCREAMING_SNAKE_CASE , default="""unigram-tokenizer-wikitext""" , help="""The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model's vocab size.""" , )
parser.add_argument(
"""--per_replica_batch_size""" , type=_SCREAMING_SNAKE_CASE , default=8 , help="""Batch size per TPU core.""" , )
parser.add_argument(
"""--no_tpu""" , action="""store_true""" , help="""If set, run on CPU and don't try to initialize a TPU. Useful for debugging on non-TPU instances.""" , )
parser.add_argument(
"""--tpu_name""" , type=_SCREAMING_SNAKE_CASE , help="""Name of TPU resource to initialize. Should be blank on Colab, and 'local' on TPU VMs.""" , default="""local""" , )
parser.add_argument(
"""--tpu_zone""" , type=_SCREAMING_SNAKE_CASE , help="""Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.""" , )
parser.add_argument(
"""--gcp_project""" , type=_SCREAMING_SNAKE_CASE , help="""Google cloud project name. Only used for non-Colab TPU nodes.""" )
parser.add_argument(
"""--bfloat16""" , action="""store_true""" , help="""Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.""" , )
parser.add_argument(
"""--train_dataset""" , type=_SCREAMING_SNAKE_CASE , help="""Path to training dataset to load. If the path begins with `gs://`"""
""" then the dataset will be loaded from a Google Cloud Storage bucket.""" , )
parser.add_argument(
"""--shuffle_buffer_size""" , type=_SCREAMING_SNAKE_CASE , default=2**18 , help="""Size of the shuffle buffer (in samples)""" , )
parser.add_argument(
"""--eval_dataset""" , type=_SCREAMING_SNAKE_CASE , help="""Path to evaluation dataset to load. If the path begins with `gs://`"""
""" then the dataset will be loaded from a Google Cloud Storage bucket.""" , )
parser.add_argument(
"""--num_epochs""" , type=_SCREAMING_SNAKE_CASE , default=1 , help="""Number of epochs to train for.""" , )
parser.add_argument(
"""--learning_rate""" , type=_SCREAMING_SNAKE_CASE , default=1E-4 , help="""Learning rate to use for training.""" , )
parser.add_argument(
"""--weight_decay_rate""" , type=_SCREAMING_SNAKE_CASE , default=1E-3 , help="""Weight decay rate to use for training.""" , )
parser.add_argument(
"""--max_length""" , type=_SCREAMING_SNAKE_CASE , default=512 , help="""Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py""" , )
parser.add_argument(
"""--mlm_probability""" , type=_SCREAMING_SNAKE_CASE , default=0.15 , help="""Fraction of tokens to mask during training.""" , )
parser.add_argument("""--output_dir""" , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , help="""Path to save model checkpoints to.""" )
parser.add_argument("""--hub_model_id""" , type=_SCREAMING_SNAKE_CASE , help="""Model ID to upload to on the Hugging Face Hub.""" )
snake_case_ = parser.parse_args()
return args
def _a ( _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
try:
if args.tpu_name:
snake_case_ = tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name , zone=args.tpu_zone , project=args.gcp_project )
else:
snake_case_ = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
"""Couldn't connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or """
"""--gcp_project. When running on a TPU VM, use --tpu_name local.""" )
tf.config.experimental_connect_to_cluster(_SCREAMING_SNAKE_CASE )
tf.tpu.experimental.initialize_tpu_system(_SCREAMING_SNAKE_CASE )
return tpu
def _a ( _SCREAMING_SNAKE_CASE ) -> List[str]:
snake_case_ = 0
for file in file_list:
snake_case_ = file.split("""/""" )[-1]
snake_case_ = re.search(r"""-\d+-(\d+)\.tfrecord""" , _SCREAMING_SNAKE_CASE ).group(1 )
snake_case_ = int(_SCREAMING_SNAKE_CASE )
num_samples += sample_count
return num_samples
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> Union[str, Any]:
snake_case_ = count_samples(_SCREAMING_SNAKE_CASE )
snake_case_ = tf.data.Dataset.from_tensor_slices(_SCREAMING_SNAKE_CASE )
if shuffle:
snake_case_ = dataset.shuffle(len(_SCREAMING_SNAKE_CASE ) )
snake_case_ = tf.data.TFRecordDataset(_SCREAMING_SNAKE_CASE , num_parallel_reads=_SCREAMING_SNAKE_CASE )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
snake_case_ = dataset.apply(tf.data.experimental.assert_cardinality(_SCREAMING_SNAKE_CASE ) )
snake_case_ = dataset.map(_SCREAMING_SNAKE_CASE , num_parallel_calls=_SCREAMING_SNAKE_CASE )
if shuffle:
assert shuffle_buffer_size is not None
snake_case_ = dataset.shuffle(args.shuffle_buffer_size )
snake_case_ = dataset.batch(_SCREAMING_SNAKE_CASE , drop_remainder=_SCREAMING_SNAKE_CASE )
snake_case_ = dataset.map(_SCREAMING_SNAKE_CASE , num_parallel_calls=_SCREAMING_SNAKE_CASE )
snake_case_ = dataset.prefetch(_SCREAMING_SNAKE_CASE )
return dataset
def _a ( _SCREAMING_SNAKE_CASE ) -> List[Any]:
if not args.no_tpu:
snake_case_ = initialize_tpu(_SCREAMING_SNAKE_CASE )
snake_case_ = tf.distribute.TPUStrategy(_SCREAMING_SNAKE_CASE )
else:
snake_case_ = tf.distribute.OneDeviceStrategy(device="""/gpu:0""" )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy("""mixed_bfloat16""" )
snake_case_ = AutoTokenizer.from_pretrained(args.tokenizer )
snake_case_ = AutoConfig.from_pretrained(args.pretrained_model_config )
snake_case_ = tokenizer.vocab_size
snake_case_ = tf.io.gfile.glob(os.path.join(args.train_dataset , """*.tfrecord""" ) )
if not training_records:
raise ValueError(f"""No .tfrecord files found in {args.train_dataset}.""" )
snake_case_ = tf.io.gfile.glob(os.path.join(args.eval_dataset , """*.tfrecord""" ) )
if not eval_records:
raise ValueError(f"""No .tfrecord files found in {args.eval_dataset}.""" )
snake_case_ = count_samples(_SCREAMING_SNAKE_CASE )
snake_case_ = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
snake_case_ = steps_per_epoch * args.num_epochs
with strategy.scope():
snake_case_ = TFAutoModelForMaskedLM.from_config(_SCREAMING_SNAKE_CASE )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
snake_case_ , snake_case_ = create_optimizer(
num_train_steps=_SCREAMING_SNAKE_CASE , num_warmup_steps=total_train_steps // 20 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=_SCREAMING_SNAKE_CASE , metrics=["""accuracy"""] )
def decode_fn(_SCREAMING_SNAKE_CASE ):
snake_case_ = {
"""input_ids""": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
"""attention_mask""": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
}
return tf.io.parse_single_example(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
snake_case_ = DataCollatorForLanguageModeling(
tokenizer=_SCREAMING_SNAKE_CASE , mlm_probability=args.mlm_probability , mlm=_SCREAMING_SNAKE_CASE , return_tensors="""tf""" )
def mask_with_collator(_SCREAMING_SNAKE_CASE ):
# TF really needs an isin() function
snake_case_ = (
~tf.cast(batch["""attention_mask"""] , tf.bool )
| (batch["""input_ids"""] == tokenizer.cls_token_id)
| (batch["""input_ids"""] == tokenizer.sep_token_id)
)
snake_case_ , snake_case_ = data_collator.tf_mask_tokens(
batch["""input_ids"""] , vocab_size=len(_SCREAMING_SNAKE_CASE ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=_SCREAMING_SNAKE_CASE , )
return batch
snake_case_ = args.per_replica_batch_size * strategy.num_replicas_in_sync
snake_case_ = prepare_dataset(
_SCREAMING_SNAKE_CASE , decode_fn=_SCREAMING_SNAKE_CASE , mask_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , shuffle=_SCREAMING_SNAKE_CASE , shuffle_buffer_size=args.shuffle_buffer_size , )
snake_case_ = prepare_dataset(
_SCREAMING_SNAKE_CASE , decode_fn=_SCREAMING_SNAKE_CASE , mask_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , shuffle=_SCREAMING_SNAKE_CASE , )
snake_case_ = []
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=_SCREAMING_SNAKE_CASE ) )
model.fit(
_SCREAMING_SNAKE_CASE , validation_data=_SCREAMING_SNAKE_CASE , epochs=args.num_epochs , callbacks=_SCREAMING_SNAKE_CASE , )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Union[str, Any] = parse_args()
main(args)
| 347 | 1 |
"""simple docstring"""
from __future__ import annotations
__SCREAMING_SNAKE_CASE : List[str] = '#'
class __A :
'''simple docstring'''
def __init__( self : Union[str, Any] ) ->None:
"""simple docstring"""
snake_case_ = {}
def lowerCAmelCase ( self : List[str] , UpperCAmelCase_ : str ) ->None:
"""simple docstring"""
snake_case_ = self._trie
for char in text:
if char not in trie:
snake_case_ = {}
snake_case_ = trie[char]
snake_case_ = True
def lowerCAmelCase ( self : Union[str, Any] , UpperCAmelCase_ : str ) ->tuple | list:
"""simple docstring"""
snake_case_ = self._trie
for char in prefix:
if char in trie:
snake_case_ = trie[char]
else:
return []
return self._elements(UpperCAmelCase_ )
def lowerCAmelCase ( self : Dict , UpperCAmelCase_ : dict ) ->tuple:
"""simple docstring"""
snake_case_ = []
for c, v in d.items():
snake_case_ = [""" """] if c == END else [(c + s) for s in self._elements(UpperCAmelCase_ )]
result.extend(UpperCAmelCase_ )
return tuple(UpperCAmelCase_ )
__SCREAMING_SNAKE_CASE : Dict = Trie()
__SCREAMING_SNAKE_CASE : Dict = ('depart', 'detergent', 'daring', 'dog', 'deer', 'deal')
for word in words:
trie.insert_word(word)
def _a ( _SCREAMING_SNAKE_CASE ) -> tuple:
snake_case_ = trie.find_word(_SCREAMING_SNAKE_CASE )
return tuple(string + word for word in suffixes )
def _a ( ) -> None:
print(autocomplete_using_trie("""de""" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 347 |
"""simple docstring"""
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
if density <= 0:
raise ValueError("""Impossible fluid density""" )
if bulk_modulus <= 0:
raise ValueError("""Impossible bulk modulus""" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 347 | 1 |
"""simple docstring"""
from __future__ import annotations
from PIL import Image
# Define glider example
__SCREAMING_SNAKE_CASE : str = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
__SCREAMING_SNAKE_CASE : List[Any] = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def _a ( _SCREAMING_SNAKE_CASE ) -> list[list[int]]:
snake_case_ = []
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
snake_case_ = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
snake_case_ = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(_SCREAMING_SNAKE_CASE ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(_SCREAMING_SNAKE_CASE ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(_SCREAMING_SNAKE_CASE ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
snake_case_ = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(_SCREAMING_SNAKE_CASE )
return next_generation
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> list[Image.Image]:
snake_case_ = []
for _ in range(_SCREAMING_SNAKE_CASE ):
# Create output image
snake_case_ = Image.new("""RGB""" , (len(cells[0] ), len(_SCREAMING_SNAKE_CASE )) )
snake_case_ = img.load()
# Save cells to image
for x in range(len(_SCREAMING_SNAKE_CASE ) ):
for y in range(len(cells[0] ) ):
snake_case_ = 255 - cells[y][x] * 255
snake_case_ = (colour, colour, colour)
# Save image
images.append(_SCREAMING_SNAKE_CASE )
snake_case_ = new_generation(_SCREAMING_SNAKE_CASE )
return images
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Optional[Any] = generate_images(GLIDER, 16)
images[0].save('out.gif', save_all=True, append_images=images[1:])
| 347 |
"""simple docstring"""
def _a ( _SCREAMING_SNAKE_CASE ) -> bool:
if num < 0:
return False
snake_case_ = num
snake_case_ = 0
while num > 0:
snake_case_ = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 347 | 1 |
"""simple docstring"""
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
__SCREAMING_SNAKE_CASE : Tuple = subprocess.check_output('git merge-base main HEAD'.split()).decode('utf-8')
__SCREAMING_SNAKE_CASE : Tuple = subprocess.check_output(f"""git diff --name-only {fork_point_sha}""".split()).decode('utf-8').split()
__SCREAMING_SNAKE_CASE : Any = '|'.join(sys.argv[1:])
__SCREAMING_SNAKE_CASE : Optional[Any] = re.compile(Rf"""^({joined_dirs}).*?\.py$""")
__SCREAMING_SNAKE_CASE : List[str] = [x for x in modified_files if regex.match(x)]
print(' '.join(relevant_modified_files), end='')
| 347 |
"""simple docstring"""
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
__SCREAMING_SNAKE_CASE : Tuple = get_tests_dir('fixtures/test_sentencepiece_bpe_char.model')
@require_sentencepiece
@require_tokenizers
class __A (snake_case__ , unittest.TestCase):
'''simple docstring'''
__lowercase: Tuple = SpeechTaTokenizer
__lowercase: int = False
__lowercase: List[str] = True
def lowerCAmelCase ( self : Any ) ->str:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
snake_case_ = SpeechTaTokenizer(UpperCAmelCase_ )
snake_case_ = AddedToken("""<mask>""" , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ )
snake_case_ = mask_token
tokenizer.add_special_tokens({"""mask_token""": mask_token} )
tokenizer.add_tokens(["""<ctc_blank>"""] )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase ( self : Optional[Any] , UpperCAmelCase_ : Optional[Any] ) ->Dict:
"""simple docstring"""
snake_case_ = """this is a test"""
snake_case_ = """this is a test"""
return input_text, output_text
def lowerCAmelCase ( self : str , UpperCAmelCase_ : int , UpperCAmelCase_ : Any=False , UpperCAmelCase_ : Tuple=20 , UpperCAmelCase_ : Dict=5 ) ->List[Any]:
"""simple docstring"""
snake_case_ , snake_case_ = self.get_input_output_texts(UpperCAmelCase_ )
snake_case_ = tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ )
snake_case_ = tokenizer.decode(UpperCAmelCase_ , clean_up_tokenization_spaces=UpperCAmelCase_ )
return text, ids
def lowerCAmelCase ( self : Union[str, Any] ) ->Optional[Any]:
"""simple docstring"""
snake_case_ = """<pad>"""
snake_case_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase_ ) , UpperCAmelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase_ ) , UpperCAmelCase_ )
def lowerCAmelCase ( self : int ) ->str:
"""simple docstring"""
snake_case_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-4] , """œ""" )
self.assertEqual(vocab_keys[-2] , """<mask>""" )
self.assertEqual(vocab_keys[-1] , """<ctc_blank>""" )
self.assertEqual(len(UpperCAmelCase_ ) , 81 )
def lowerCAmelCase ( self : Optional[int] ) ->int:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 79 )
def lowerCAmelCase ( self : Optional[int] ) ->List[Any]:
"""simple docstring"""
snake_case_ = self.get_tokenizers(do_lower_case=UpperCAmelCase_ )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
snake_case_ = tokenizer.vocab_size
snake_case_ = len(UpperCAmelCase_ )
self.assertNotEqual(UpperCAmelCase_ , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
snake_case_ = ["""aaaaa bbbbbb""", """cccccccccdddddddd"""]
snake_case_ = tokenizer.add_tokens(UpperCAmelCase_ )
snake_case_ = tokenizer.vocab_size
snake_case_ = len(UpperCAmelCase_ )
self.assertNotEqual(UpperCAmelCase_ , 0 )
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ )
self.assertEqual(UpperCAmelCase_ , len(UpperCAmelCase_ ) )
self.assertEqual(UpperCAmelCase_ , all_size + len(UpperCAmelCase_ ) )
snake_case_ = tokenizer.encode("""aaaaa bbbbbb low cccccccccdddddddd l""" , add_special_tokens=UpperCAmelCase_ )
self.assertGreaterEqual(len(UpperCAmelCase_ ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
snake_case_ = {"""eos_token""": """>>>>|||<||<<|<<""", """pad_token""": """<<<<<|||>|>>>>|>"""}
snake_case_ = tokenizer.add_special_tokens(UpperCAmelCase_ )
snake_case_ = tokenizer.vocab_size
snake_case_ = len(UpperCAmelCase_ )
self.assertNotEqual(UpperCAmelCase_ , 0 )
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ )
self.assertEqual(UpperCAmelCase_ , len(UpperCAmelCase_ ) )
self.assertEqual(UpperCAmelCase_ , all_size_a + len(UpperCAmelCase_ ) )
snake_case_ = tokenizer.encode(
""">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l""" , add_special_tokens=UpperCAmelCase_ )
self.assertGreaterEqual(len(UpperCAmelCase_ ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
def lowerCAmelCase ( self : Optional[Any] ) ->Tuple:
"""simple docstring"""
pass
def lowerCAmelCase ( self : List[str] ) ->Optional[Any]:
"""simple docstring"""
pass
def lowerCAmelCase ( self : List[str] ) ->List[str]:
"""simple docstring"""
snake_case_ = self.get_tokenizer()
snake_case_ = tokenizer.tokenize("""This is a test""" )
# fmt: off
self.assertListEqual(UpperCAmelCase_ , [SPIECE_UNDERLINE, """T""", """h""", """i""", """s""", SPIECE_UNDERLINE, """i""", """s""", SPIECE_UNDERLINE, """a""", SPIECE_UNDERLINE, """t""", """e""", """s""", """t"""] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , )
snake_case_ = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
UpperCAmelCase_ , [SPIECE_UNDERLINE, """I""", SPIECE_UNDERLINE, """w""", """a""", """s""", SPIECE_UNDERLINE, """b""", """o""", """r""", """n""", SPIECE_UNDERLINE, """i""", """n""", SPIECE_UNDERLINE, """92000""", """,""", SPIECE_UNDERLINE, """a""", """n""", """d""", SPIECE_UNDERLINE, """t""", """h""", """i""", """s""", SPIECE_UNDERLINE, """i""", """s""", SPIECE_UNDERLINE, """f""", """a""", """l""", """s""", """é""", """."""] )
snake_case_ = tokenizer.convert_tokens_to_ids(UpperCAmelCase_ )
# fmt: off
self.assertListEqual(UpperCAmelCase_ , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] )
# fmt: on
snake_case_ = tokenizer.convert_ids_to_tokens(UpperCAmelCase_ )
self.assertListEqual(
UpperCAmelCase_ , [SPIECE_UNDERLINE, """I""", SPIECE_UNDERLINE, """w""", """a""", """s""", SPIECE_UNDERLINE, """b""", """o""", """r""", """n""", SPIECE_UNDERLINE, """i""", """n""", SPIECE_UNDERLINE, """<unk>""", """,""", SPIECE_UNDERLINE, """a""", """n""", """d""", SPIECE_UNDERLINE, """t""", """h""", """i""", """s""", SPIECE_UNDERLINE, """i""", """s""", SPIECE_UNDERLINE, """f""", """a""", """l""", """s""", """é""", """."""] )
@slow
def lowerCAmelCase ( self : str ) ->Dict:
"""simple docstring"""
snake_case_ = [
"""Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides """
"""general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural """
"""Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained """
"""models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.""",
"""BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly """
"""conditioning on both left and right context in all layers.""",
"""The quick brown fox jumps over the lazy dog.""",
]
# fmt: off
snake_case_ = {
"""input_ids""": [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
"""attention_mask""": [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase_ , model_name="""microsoft/speecht5_asr""" , revision="""c5ef64c71905caeccde0e4462ef3f9077224c524""" , sequences=UpperCAmelCase_ , )
| 347 | 1 |
"""simple docstring"""
import math
def _a ( _SCREAMING_SNAKE_CASE ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_SCREAMING_SNAKE_CASE ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _a ( _SCREAMING_SNAKE_CASE = 0.1 ) -> int:
snake_case_ = 3
snake_case_ = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(_SCREAMING_SNAKE_CASE )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 347 |
"""simple docstring"""
import datasets
__SCREAMING_SNAKE_CASE : Tuple = '\\n@InProceedings{conneau2018xnli,\n author = "Conneau, Alexis\n and Rinott, Ruty\n and Lample, Guillaume\n and Williams, Adina\n and Bowman, Samuel R.\n and Schwenk, Holger\n and Stoyanov, Veselin",\n title = "XNLI: Evaluating Cross-lingual Sentence Representations",\n booktitle = "Proceedings of the 2018 Conference on Empirical Methods\n in Natural Language Processing",\n year = "2018",\n publisher = "Association for Computational Linguistics",\n location = "Brussels, Belgium",\n}\n'
__SCREAMING_SNAKE_CASE : Dict = '\\nXNLI is a subset of a few thousand examples from MNLI which has been translated\ninto a 14 different languages (some low-ish resource). As with MNLI, the goal is\nto predict textual entailment (does sentence A imply/contradict/neither sentence\nB) and is a classification task (given two sentences, predict one of three\nlabels).\n'
__SCREAMING_SNAKE_CASE : List[str] = '\nComputes XNLI score which is just simple accuracy.\nArgs:\n predictions: Predicted labels.\n references: Ground truth labels.\nReturns:\n \'accuracy\': accuracy\nExamples:\n\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> xnli_metric = datasets.load_metric("xnli")\n >>> results = xnli_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n'
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]:
return (preds == labels).mean()
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class __A (datasets.Metric):
'''simple docstring'''
def lowerCAmelCase ( self : str ) ->Any:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""int64""" if self.config_name != """sts-b""" else """float32""" ),
"""references""": datasets.Value("""int64""" if self.config_name != """sts-b""" else """float32""" ),
} ) , codebase_urls=[] , reference_urls=[] , format="""numpy""" , )
def lowerCAmelCase ( self : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any ) ->int:
"""simple docstring"""
return {"accuracy": simple_accuracy(UpperCAmelCase_ , UpperCAmelCase_ )}
| 347 | 1 |
"""simple docstring"""
from __future__ import annotations
def _a ( _SCREAMING_SNAKE_CASE ) -> list[int]:
snake_case_ = 2
snake_case_ = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(_SCREAMING_SNAKE_CASE )
if n > 1:
factors.append(_SCREAMING_SNAKE_CASE )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 347 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: List[Any] = ["""sentencepiece"""]
def __init__( self : int , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : List[str] ) ->List[Any]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: Optional[int] = ["""sentencepiece"""]
def __init__( self : Optional[int] , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Tuple ) ->Dict:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: Any = ["""sentencepiece"""]
def __init__( self : Any , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : List[Any] ) ->List[Any]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: Dict = ["""sentencepiece"""]
def __init__( self : List[str] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : int ) ->Optional[int]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: List[str] = ["""sentencepiece"""]
def __init__( self : Dict , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : int ) ->List[str]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: int = ["""sentencepiece"""]
def __init__( self : Tuple , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Dict ) ->Optional[int]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: int = ["""sentencepiece"""]
def __init__( self : int , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Union[str, Any] ) ->Dict:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: Optional[Any] = ["""sentencepiece"""]
def __init__( self : List[str] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : Optional[Any] ) ->Union[str, Any]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: Optional[int] = ["""sentencepiece"""]
def __init__( self : Optional[Any] , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : List[Any] ) ->Tuple:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: Any = ["""sentencepiece"""]
def __init__( self : List[Any] , *UpperCAmelCase_ : str , **UpperCAmelCase_ : int ) ->List[Any]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: Any = ["""sentencepiece"""]
def __init__( self : int , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : List[Any] ) ->Tuple:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: int = ["""sentencepiece"""]
def __init__( self : Optional[int] , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : Union[str, Any] ) ->Union[str, Any]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: Tuple = ["""sentencepiece"""]
def __init__( self : Dict , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : List[Any] ) ->Dict:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: Union[str, Any] = ["""sentencepiece"""]
def __init__( self : List[Any] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : List[Any] ) ->Optional[int]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: int = ["""sentencepiece"""]
def __init__( self : int , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : List[str] ) ->Union[str, Any]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: Tuple = ["""sentencepiece"""]
def __init__( self : int , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Optional[Any] ) ->str:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: List[Any] = ["""sentencepiece"""]
def __init__( self : Dict , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Optional[Any] ) ->int:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: Union[str, Any] = ["""sentencepiece"""]
def __init__( self : Optional[Any] , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Optional[int] ) ->Optional[int]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: Any = ["""sentencepiece"""]
def __init__( self : Optional[Any] , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Dict ) ->Dict:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: Any = ["""sentencepiece"""]
def __init__( self : List[Any] , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : List[str] ) ->List[Any]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: int = ["""sentencepiece"""]
def __init__( self : Union[str, Any] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Optional[Any] ) ->List[str]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: int = ["""sentencepiece"""]
def __init__( self : Union[str, Any] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : List[Any] ) ->Union[str, Any]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: List[Any] = ["""sentencepiece"""]
def __init__( self : Any , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Optional[Any] ) ->List[str]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: Dict = ["""sentencepiece"""]
def __init__( self : int , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Union[str, Any] ) ->List[Any]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: List[Any] = ["""sentencepiece"""]
def __init__( self : List[Any] , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Optional[int] ) ->Optional[Any]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: Union[str, Any] = ["""sentencepiece"""]
def __init__( self : Union[str, Any] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : str ) ->Optional[int]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: Optional[int] = ["""sentencepiece"""]
def __init__( self : Tuple , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Optional[int] ) ->Dict:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: Dict = ["""sentencepiece"""]
def __init__( self : Optional[int] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : List[str] ) ->Optional[Any]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: int = ["""sentencepiece"""]
def __init__( self : Dict , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Optional[int] ) ->Any:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: List[str] = ["""sentencepiece"""]
def __init__( self : List[str] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Union[str, Any] ) ->Optional[Any]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: Any = ["""sentencepiece"""]
def __init__( self : List[str] , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Optional[int] ) ->str:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
| 347 | 1 |
"""simple docstring"""
import requests
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> None:
snake_case_ = {"""Content-Type""": """application/json"""}
snake_case_ = requests.post(_SCREAMING_SNAKE_CASE , json={"""text""": message_body} , headers=_SCREAMING_SNAKE_CASE )
if response.status_code != 200:
snake_case_ = (
"""Request to slack returned an error """
f"""{response.status_code}, the response is:\n{response.text}"""
)
raise ValueError(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message('<YOUR MESSAGE BODY>', '<SLACK CHANNEL URL>')
| 347 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
__SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
class __A (snake_case__):
'''simple docstring'''
def __init__( self : str , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : int ) ->None:
"""simple docstring"""
warnings.warn(
"""The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use MobileViTImageProcessor instead.""" , UpperCAmelCase_ , )
super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_ )
| 347 | 1 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class __A (unittest.TestCase):
'''simple docstring'''
@slow
def lowerCAmelCase ( self : Union[str, Any] ) ->List[str]:
"""simple docstring"""
snake_case_ = XLMRobertaModel.from_pretrained("""xlm-roberta-base""" )
snake_case_ = torch.tensor([[0, 581, 10_269, 83, 99_942, 136, 60_742, 23, 70, 80_583, 18_276, 2]] )
# The dog is cute and lives in the garden house
snake_case_ = torch.Size((1, 12, 768) ) # batch_size, sequence_length, embedding_vector_dim
snake_case_ = torch.tensor(
[[-0.0_101, 0.1_218, -0.0_803, 0.0_801, 0.1_327, 0.0_776, -0.1_215, 0.2_383, 0.3_338, 0.3_106, 0.0_300, 0.0_252]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
snake_case_ = model(UpperCAmelCase_ )["""last_hidden_state"""].detach()
self.assertEqual(output.shape , UpperCAmelCase_ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , UpperCAmelCase_ , atol=1E-3 ) )
@slow
def lowerCAmelCase ( self : List[Any] ) ->Tuple:
"""simple docstring"""
snake_case_ = XLMRobertaModel.from_pretrained("""xlm-roberta-large""" )
snake_case_ = torch.tensor([[0, 581, 10_269, 83, 99_942, 136, 60_742, 23, 70, 80_583, 18_276, 2]] )
# The dog is cute and lives in the garden house
snake_case_ = torch.Size((1, 12, 1_024) ) # batch_size, sequence_length, embedding_vector_dim
snake_case_ = torch.tensor(
[[-0.0_699, -0.0_318, 0.0_705, -0.1_241, 0.0_999, -0.0_520, 0.1_004, -0.1_838, -0.4_704, 0.1_437, 0.0_821, 0.0_126]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
snake_case_ = model(UpperCAmelCase_ )["""last_hidden_state"""].detach()
self.assertEqual(output.shape , UpperCAmelCase_ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , UpperCAmelCase_ , atol=1E-3 ) )
| 347 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Any:
snake_case_ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""blocks.{i}.norm1.weight""", f"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""blocks.{i}.norm1.bias""", f"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((f"""blocks.{i}.attn.proj.weight""", f"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.attn.proj.bias""", f"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""blocks.{i}.norm2.weight""", f"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""blocks.{i}.norm2.bias""", f"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.weight""", f"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.bias""", f"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.weight""", f"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.bias""", f"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
("""cls_token""", """vit.embeddings.cls_token"""),
("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""),
("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""),
("""pos_embed""", """vit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
("""pre_logits.fc.weight""", """pooler.dense.weight"""),
("""pre_logits.fc.bias""", """pooler.dense.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
snake_case_ = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Tuple:
for i in range(config.num_hidden_layers ):
if base_model:
snake_case_ = """"""
else:
snake_case_ = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
snake_case_ = state_dict.pop(f"""blocks.{i}.attn.qkv.weight""" )
snake_case_ = state_dict.pop(f"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
snake_case_ = in_proj_weight[
: config.hidden_size, :
]
snake_case_ = in_proj_bias[: config.hidden_size]
snake_case_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case_ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
snake_case_ = in_proj_weight[
-config.hidden_size :, :
]
snake_case_ = in_proj_bias[-config.hidden_size :]
def _a ( _SCREAMING_SNAKE_CASE ) -> List[str]:
snake_case_ = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
snake_case_ = dct.pop(_SCREAMING_SNAKE_CASE )
snake_case_ = val
def _a ( ) -> Any:
snake_case_ = """http://images.cocodataset.org/val2017/000000039769.jpg"""
snake_case_ = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
snake_case_ = ViTConfig()
snake_case_ = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
snake_case_ = True
snake_case_ = int(vit_name[-12:-10] )
snake_case_ = int(vit_name[-9:-6] )
else:
snake_case_ = 1_000
snake_case_ = """huggingface/label-files"""
snake_case_ = """imagenet-1k-id2label.json"""
snake_case_ = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="""dataset""" ) , """r""" ) )
snake_case_ = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
snake_case_ = idalabel
snake_case_ = {v: k for k, v in idalabel.items()}
snake_case_ = int(vit_name[-6:-4] )
snake_case_ = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith("""tiny""" ):
snake_case_ = 192
snake_case_ = 768
snake_case_ = 12
snake_case_ = 3
elif vit_name[9:].startswith("""small""" ):
snake_case_ = 384
snake_case_ = 1_536
snake_case_ = 12
snake_case_ = 6
else:
pass
else:
if vit_name[4:].startswith("""small""" ):
snake_case_ = 768
snake_case_ = 2_304
snake_case_ = 8
snake_case_ = 8
elif vit_name[4:].startswith("""base""" ):
pass
elif vit_name[4:].startswith("""large""" ):
snake_case_ = 1_024
snake_case_ = 4_096
snake_case_ = 24
snake_case_ = 16
elif vit_name[4:].startswith("""huge""" ):
snake_case_ = 1_280
snake_case_ = 5_120
snake_case_ = 32
snake_case_ = 16
# load original model from timm
snake_case_ = timm.create_model(_SCREAMING_SNAKE_CASE , pretrained=_SCREAMING_SNAKE_CASE )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
snake_case_ = timm_model.state_dict()
if base_model:
remove_classification_head_(_SCREAMING_SNAKE_CASE )
snake_case_ = create_rename_keys(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for src, dest in rename_keys:
rename_key(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
read_in_q_k_v(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# load HuggingFace model
if vit_name[-5:] == "in21k":
snake_case_ = ViTModel(_SCREAMING_SNAKE_CASE ).eval()
else:
snake_case_ = ViTForImageClassification(_SCREAMING_SNAKE_CASE ).eval()
model.load_state_dict(_SCREAMING_SNAKE_CASE )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
snake_case_ = DeiTImageProcessor(size=config.image_size )
else:
snake_case_ = ViTImageProcessor(size=config.image_size )
snake_case_ = image_processor(images=prepare_img() , return_tensors="""pt""" )
snake_case_ = encoding["""pixel_values"""]
snake_case_ = model(_SCREAMING_SNAKE_CASE )
if base_model:
snake_case_ = timm_model.forward_features(_SCREAMING_SNAKE_CASE )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(_SCREAMING_SNAKE_CASE , outputs.pooler_output , atol=1E-3 )
else:
snake_case_ = timm_model(_SCREAMING_SNAKE_CASE )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_SCREAMING_SNAKE_CASE , outputs.logits , atol=1E-3 )
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
print(f"""Saving model {vit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--vit_name',
default='vit_base_patch16_224',
type=str,
help='Name of the ViT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
__SCREAMING_SNAKE_CASE : int = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 347 | 1 |
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __A (snake_case__):
'''simple docstring'''
__lowercase: Dict = ["""image_processor""", """tokenizer"""]
__lowercase: Dict = """ViltImageProcessor"""
__lowercase: List[Any] = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self : str , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : Dict=None , **UpperCAmelCase_ : Any ) ->List[str]:
"""simple docstring"""
snake_case_ = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , UpperCAmelCase_ , )
snake_case_ = kwargs.pop("""feature_extractor""" )
snake_case_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = self.image_processor
def __call__( self : Optional[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Union[bool, str, PaddingStrategy] = False , UpperCAmelCase_ : Union[bool, str, TruncationStrategy] = None , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : int = 0 , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Optional[Union[str, TensorType]] = None , **UpperCAmelCase_ : Any , ) ->BatchEncoding:
"""simple docstring"""
snake_case_ = self.tokenizer(
text=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , max_length=UpperCAmelCase_ , stride=UpperCAmelCase_ , pad_to_multiple_of=UpperCAmelCase_ , return_token_type_ids=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , return_overflowing_tokens=UpperCAmelCase_ , return_special_tokens_mask=UpperCAmelCase_ , return_offsets_mapping=UpperCAmelCase_ , return_length=UpperCAmelCase_ , verbose=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ , )
# add pixel_values + pixel_mask
snake_case_ = self.image_processor(UpperCAmelCase_ , return_tensors=UpperCAmelCase_ )
encoding.update(UpperCAmelCase_ )
return encoding
def lowerCAmelCase ( self : Dict , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : Optional[Any] ) ->int:
"""simple docstring"""
return self.tokenizer.batch_decode(*UpperCAmelCase_ , **UpperCAmelCase_ )
def lowerCAmelCase ( self : Any , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Tuple ) ->int:
"""simple docstring"""
return self.tokenizer.decode(*UpperCAmelCase_ , **UpperCAmelCase_ )
@property
def lowerCAmelCase ( self : Optional[int] ) ->Optional[int]:
"""simple docstring"""
snake_case_ = self.tokenizer.model_input_names
snake_case_ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def lowerCAmelCase ( self : str ) ->Optional[Any]:
"""simple docstring"""
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , UpperCAmelCase_ , )
return self.image_processor_class
@property
def lowerCAmelCase ( self : List[str] ) ->List[Any]:
"""simple docstring"""
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , UpperCAmelCase_ , )
return self.image_processor
| 347 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class __A (unittest.TestCase):
'''simple docstring'''
def __init__( self : List[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple=13 , UpperCAmelCase_ : List[Any]=7 , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Dict=True , UpperCAmelCase_ : int=True , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Dict=99 , UpperCAmelCase_ : str=32 , UpperCAmelCase_ : Tuple=5 , UpperCAmelCase_ : Union[str, Any]=4 , UpperCAmelCase_ : Any=37 , UpperCAmelCase_ : int="gelu" , UpperCAmelCase_ : Any=0.1 , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : Dict=512 , UpperCAmelCase_ : Optional[Any]=16 , UpperCAmelCase_ : Dict=2 , UpperCAmelCase_ : str=0.02 , UpperCAmelCase_ : str=4 , ) ->Tuple:
"""simple docstring"""
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = seq_length
snake_case_ = is_training
snake_case_ = use_attention_mask
snake_case_ = use_token_type_ids
snake_case_ = use_labels
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = type_sequence_label_size
snake_case_ = initializer_range
snake_case_ = num_choices
def lowerCAmelCase ( self : Optional[int] ) ->str:
"""simple docstring"""
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ = None
if self.use_attention_mask:
snake_case_ = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ = None
if self.use_token_type_ids:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase_ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCAmelCase ( self : List[str] ) ->Dict:
"""simple docstring"""
snake_case_ = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ , snake_case_ = config_and_inputs
snake_case_ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class __A (snake_case__ , unittest.TestCase):
'''simple docstring'''
__lowercase: Union[str, Any] = True
__lowercase: int = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCAmelCase ( self : Optional[Any] ) ->Tuple:
"""simple docstring"""
snake_case_ = FlaxRoFormerModelTester(self )
@slow
def lowerCAmelCase ( self : Any ) ->List[str]:
"""simple docstring"""
for model_class_name in self.all_model_classes:
snake_case_ = model_class_name.from_pretrained("""junnyu/roformer_chinese_small""" , from_pt=UpperCAmelCase_ )
snake_case_ = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCAmelCase_ )
@require_flax
class __A (unittest.TestCase):
'''simple docstring'''
@slow
def lowerCAmelCase ( self : str ) ->Dict:
"""simple docstring"""
snake_case_ = FlaxRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" )
snake_case_ = jnp.array([[0, 1, 2, 3, 4, 5]] )
snake_case_ = model(UpperCAmelCase_ )[0]
snake_case_ = 50_000
snake_case_ = (1, 6, vocab_size)
self.assertEqual(output.shape , UpperCAmelCase_ )
snake_case_ = jnp.array(
[[[-0.1_205, -1.0_265, 0.2_922], [-1.5_134, 0.1_974, 0.1_519], [-5.0_135, -3.9_003, -0.8_404]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , UpperCAmelCase_ , atol=1E-4 ) )
| 347 | 1 |
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = {
0: '0',
1: '1',
2: '2',
3: '3',
4: '4',
5: '5',
6: '6',
7: '7',
8: '8',
9: '9',
10: 'a',
11: 'b',
12: 'c',
13: 'd',
14: 'e',
15: 'f',
}
def _a ( _SCREAMING_SNAKE_CASE ) -> str:
assert type(_SCREAMING_SNAKE_CASE ) in (int, float) and decimal == int(_SCREAMING_SNAKE_CASE )
snake_case_ = int(_SCREAMING_SNAKE_CASE )
snake_case_ = """"""
snake_case_ = False
if decimal < 0:
snake_case_ = True
decimal *= -1
while decimal > 0:
snake_case_ , snake_case_ = divmod(_SCREAMING_SNAKE_CASE , 16 )
snake_case_ = values[remainder] + hexadecimal
snake_case_ = """0x""" + hexadecimal
if negative:
snake_case_ = """-""" + hexadecimal
return hexadecimal
if __name__ == "__main__":
import doctest
doctest.testmod()
| 347 |
"""simple docstring"""
from __future__ import annotations
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> bool:
snake_case_ = get_failure_array(_SCREAMING_SNAKE_CASE )
# 2) Step through text searching for pattern
snake_case_ , snake_case_ = 0, 0 # index into text, pattern
while i < len(_SCREAMING_SNAKE_CASE ):
if pattern[j] == text[i]:
if j == (len(_SCREAMING_SNAKE_CASE ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
snake_case_ = failure[j - 1]
continue
i += 1
return False
def _a ( _SCREAMING_SNAKE_CASE ) -> list[int]:
snake_case_ = [0]
snake_case_ = 0
snake_case_ = 1
while j < len(_SCREAMING_SNAKE_CASE ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
snake_case_ = failure[i - 1]
continue
j += 1
failure.append(_SCREAMING_SNAKE_CASE )
return failure
if __name__ == "__main__":
# Test 1)
__SCREAMING_SNAKE_CASE : Optional[int] = 'abc1abc12'
__SCREAMING_SNAKE_CASE : Optional[int] = 'alskfjaldsabc1abc1abc12k23adsfabcabc'
__SCREAMING_SNAKE_CASE : List[str] = 'alskfjaldsk23adsfabcabc'
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
__SCREAMING_SNAKE_CASE : int = 'ABABX'
__SCREAMING_SNAKE_CASE : Optional[Any] = 'ABABZABABYABABX'
assert kmp(pattern, text)
# Test 3)
__SCREAMING_SNAKE_CASE : Any = 'AAAB'
__SCREAMING_SNAKE_CASE : List[Any] = 'ABAAAAAB'
assert kmp(pattern, text)
# Test 4)
__SCREAMING_SNAKE_CASE : Optional[int] = 'abcdabcy'
__SCREAMING_SNAKE_CASE : str = 'abcxabcdabxabcdabcdabcy'
assert kmp(pattern, text)
# Test 5)
__SCREAMING_SNAKE_CASE : Any = 'aabaabaaa'
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 347 | 1 |
"""simple docstring"""
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger('transformers.models.encodec')
__SCREAMING_SNAKE_CASE : List[Any] = {
'quantizer.vq.layers.*._codebook.inited': 'quantizer.layers.*.codebook.inited',
'quantizer.vq.layers.*._codebook.cluster_size': 'quantizer.layers.*.codebook.cluster_size',
'quantizer.vq.layers.*._codebook.embed': 'quantizer.layers.*.codebook.embed',
'quantizer.vq.layers.*._codebook.embed_avg': 'quantizer.layers.*.codebook.embed_avg',
}
__SCREAMING_SNAKE_CASE : List[Any] = {
'encoder.model.0.conv.conv': 'encoder.layers.0.conv',
'encoder.model.1.block.1.conv.conv': 'encoder.layers.1.block.1.conv',
'encoder.model.1.block.3.conv.conv': 'encoder.layers.1.block.3.conv',
'encoder.model.1.shortcut.conv.conv': 'encoder.layers.1.shortcut.conv',
'encoder.model.3.conv.conv': 'encoder.layers.3.conv',
'encoder.model.4.block.1.conv.conv': 'encoder.layers.4.block.1.conv',
'encoder.model.4.block.3.conv.conv': 'encoder.layers.4.block.3.conv',
'encoder.model.4.shortcut.conv.conv': 'encoder.layers.4.shortcut.conv',
'encoder.model.6.conv.conv': 'encoder.layers.6.conv',
'encoder.model.7.block.1.conv.conv': 'encoder.layers.7.block.1.conv',
'encoder.model.7.block.3.conv.conv': 'encoder.layers.7.block.3.conv',
'encoder.model.7.shortcut.conv.conv': 'encoder.layers.7.shortcut.conv',
'encoder.model.9.conv.conv': 'encoder.layers.9.conv',
'encoder.model.10.block.1.conv.conv': 'encoder.layers.10.block.1.conv',
'encoder.model.10.block.3.conv.conv': 'encoder.layers.10.block.3.conv',
'encoder.model.10.shortcut.conv.conv': 'encoder.layers.10.shortcut.conv',
'encoder.model.12.conv.conv': 'encoder.layers.12.conv',
'encoder.model.13.lstm': 'encoder.layers.13.lstm',
'encoder.model.15.conv.conv': 'encoder.layers.15.conv',
}
__SCREAMING_SNAKE_CASE : int = {
'encoder.model.0.conv.norm': 'encoder.layers.0.norm',
'encoder.model.1.block.1.conv.norm': 'encoder.layers.1.block.1.norm',
'encoder.model.1.block.3.conv.norm': 'encoder.layers.1.block.3.norm',
'encoder.model.1.shortcut.conv.norm': 'encoder.layers.1.shortcut.norm',
'encoder.model.3.conv.norm': 'encoder.layers.3.norm',
'encoder.model.4.block.1.conv.norm': 'encoder.layers.4.block.1.norm',
'encoder.model.4.block.3.conv.norm': 'encoder.layers.4.block.3.norm',
'encoder.model.4.shortcut.conv.norm': 'encoder.layers.4.shortcut.norm',
'encoder.model.6.conv.norm': 'encoder.layers.6.norm',
'encoder.model.7.block.1.conv.norm': 'encoder.layers.7.block.1.norm',
'encoder.model.7.block.3.conv.norm': 'encoder.layers.7.block.3.norm',
'encoder.model.7.shortcut.conv.norm': 'encoder.layers.7.shortcut.norm',
'encoder.model.9.conv.norm': 'encoder.layers.9.norm',
'encoder.model.10.block.1.conv.norm': 'encoder.layers.10.block.1.norm',
'encoder.model.10.block.3.conv.norm': 'encoder.layers.10.block.3.norm',
'encoder.model.10.shortcut.conv.norm': 'encoder.layers.10.shortcut.norm',
'encoder.model.12.conv.norm': 'encoder.layers.12.norm',
'encoder.model.15.conv.norm': 'encoder.layers.15.norm',
}
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
'decoder.model.0.conv.conv': 'decoder.layers.0.conv',
'decoder.model.1.lstm': 'decoder.layers.1.lstm',
'decoder.model.3.convtr.convtr': 'decoder.layers.3.conv',
'decoder.model.4.block.1.conv.conv': 'decoder.layers.4.block.1.conv',
'decoder.model.4.block.3.conv.conv': 'decoder.layers.4.block.3.conv',
'decoder.model.4.shortcut.conv.conv': 'decoder.layers.4.shortcut.conv',
'decoder.model.6.convtr.convtr': 'decoder.layers.6.conv',
'decoder.model.7.block.1.conv.conv': 'decoder.layers.7.block.1.conv',
'decoder.model.7.block.3.conv.conv': 'decoder.layers.7.block.3.conv',
'decoder.model.7.shortcut.conv.conv': 'decoder.layers.7.shortcut.conv',
'decoder.model.9.convtr.convtr': 'decoder.layers.9.conv',
'decoder.model.10.block.1.conv.conv': 'decoder.layers.10.block.1.conv',
'decoder.model.10.block.3.conv.conv': 'decoder.layers.10.block.3.conv',
'decoder.model.10.shortcut.conv.conv': 'decoder.layers.10.shortcut.conv',
'decoder.model.12.convtr.convtr': 'decoder.layers.12.conv',
'decoder.model.13.block.1.conv.conv': 'decoder.layers.13.block.1.conv',
'decoder.model.13.block.3.conv.conv': 'decoder.layers.13.block.3.conv',
'decoder.model.13.shortcut.conv.conv': 'decoder.layers.13.shortcut.conv',
'decoder.model.15.conv.conv': 'decoder.layers.15.conv',
}
__SCREAMING_SNAKE_CASE : Optional[Any] = {
'decoder.model.0.conv.norm': 'decoder.layers.0.norm',
'decoder.model.3.convtr.norm': 'decoder.layers.3.norm',
'decoder.model.4.block.1.conv.norm': 'decoder.layers.4.block.1.norm',
'decoder.model.4.block.3.conv.norm': 'decoder.layers.4.block.3.norm',
'decoder.model.4.shortcut.conv.norm': 'decoder.layers.4.shortcut.norm',
'decoder.model.6.convtr.norm': 'decoder.layers.6.norm',
'decoder.model.7.block.1.conv.norm': 'decoder.layers.7.block.1.norm',
'decoder.model.7.block.3.conv.norm': 'decoder.layers.7.block.3.norm',
'decoder.model.7.shortcut.conv.norm': 'decoder.layers.7.shortcut.norm',
'decoder.model.9.convtr.norm': 'decoder.layers.9.norm',
'decoder.model.10.block.1.conv.norm': 'decoder.layers.10.block.1.norm',
'decoder.model.10.block.3.conv.norm': 'decoder.layers.10.block.3.norm',
'decoder.model.10.shortcut.conv.norm': 'decoder.layers.10.shortcut.norm',
'decoder.model.12.convtr.norm': 'decoder.layers.12.norm',
'decoder.model.13.block.1.conv.norm': 'decoder.layers.13.block.1.norm',
'decoder.model.13.block.3.conv.norm': 'decoder.layers.13.block.3.norm',
'decoder.model.13.shortcut.conv.norm': 'decoder.layers.13.shortcut.norm',
'decoder.model.15.conv.norm': 'decoder.layers.15.norm',
}
__SCREAMING_SNAKE_CASE : List[Any] = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
__SCREAMING_SNAKE_CASE : Tuple = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
__SCREAMING_SNAKE_CASE : List[Any] = []
__SCREAMING_SNAKE_CASE : Optional[Any] = []
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
for attribute in key.split(""".""" ):
snake_case_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if weight_type is not None:
snake_case_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).shape
else:
snake_case_ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
snake_case_ = value
elif weight_type == "weight_g":
snake_case_ = value
elif weight_type == "weight_v":
snake_case_ = value
elif weight_type == "bias":
snake_case_ = value
elif weight_type == "running_mean":
snake_case_ = value
elif weight_type == "running_var":
snake_case_ = value
elif weight_type == "num_batches_tracked":
snake_case_ = value
elif weight_type == "weight_ih_l0":
snake_case_ = value
elif weight_type == "weight_hh_l0":
snake_case_ = value
elif weight_type == "bias_ih_l0":
snake_case_ = value
elif weight_type == "bias_hh_l0":
snake_case_ = value
elif weight_type == "weight_ih_l1":
snake_case_ = value
elif weight_type == "weight_hh_l1":
snake_case_ = value
elif weight_type == "bias_ih_l1":
snake_case_ = value
elif weight_type == "bias_hh_l1":
snake_case_ = value
else:
snake_case_ = value
logger.info(f"""{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.""" )
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]:
for key in ignore_keys:
if key.endswith(""".*""" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
snake_case_ , snake_case_ = key.split(""".*.""" )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
snake_case_ = []
if model_name == "encodec_24khz" or "encodec_32khz":
snake_case_ = MAPPING_24K
elif model_name == "encodec_48khz":
snake_case_ = MAPPING_48K
else:
raise ValueError(f"""Unsupported model: {model_name}""" )
for name, value in orig_dict.items():
if should_ignore(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
logger.info(f"""{name} was ignored""" )
continue
snake_case_ = False
for key, mapped_key in MAPPING.items():
if "*" in key:
snake_case_ , snake_case_ = key.split(""".*.""" )
if prefix in name and suffix in name:
snake_case_ = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith("""embed""" ) and name.endswith("""embed_avg""" ):
continue
snake_case_ = True
if "*" in mapped_key:
snake_case_ = name.split(_SCREAMING_SNAKE_CASE )[0].split(""".""" )[-2]
snake_case_ = mapped_key.replace("""*""" , _SCREAMING_SNAKE_CASE )
if "weight_g" in name:
snake_case_ = """weight_g"""
elif "weight_v" in name:
snake_case_ = """weight_v"""
elif "weight_ih_l0" in name:
snake_case_ = """weight_ih_l0"""
elif "weight_hh_l0" in name:
snake_case_ = """weight_hh_l0"""
elif "bias_ih_l0" in name:
snake_case_ = """bias_ih_l0"""
elif "bias_hh_l0" in name:
snake_case_ = """bias_hh_l0"""
elif "weight_ih_l1" in name:
snake_case_ = """weight_ih_l1"""
elif "weight_hh_l1" in name:
snake_case_ = """weight_hh_l1"""
elif "bias_ih_l1" in name:
snake_case_ = """bias_ih_l1"""
elif "bias_hh_l1" in name:
snake_case_ = """bias_hh_l1"""
elif "bias" in name:
snake_case_ = """bias"""
elif "weight" in name:
snake_case_ = """weight"""
elif "running_mean" in name:
snake_case_ = """running_mean"""
elif "running_var" in name:
snake_case_ = """running_var"""
elif "num_batches_tracked" in name:
snake_case_ = """num_batches_tracked"""
else:
snake_case_ = None
set_recursively(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
continue
if not is_used:
unused_weights.append(_SCREAMING_SNAKE_CASE )
logger.warning(f"""Unused weights: {unused_weights}""" )
@torch.no_grad()
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , ) -> List[str]:
if config_path is not None:
snake_case_ = EncodecConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
else:
snake_case_ = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
snake_case_ = [8, 5, 4, 4]
snake_case_ = [2.2]
snake_case_ = 64
snake_case_ = 32_000
snake_case_ = 2_048
snake_case_ = False
snake_case_ = False
snake_case_ = False
elif model_name == "encodec_48khz":
snake_case_ = [8, 5, 4, 2]
snake_case_ = [3.0, 6.0, 12.0, 24.0]
snake_case_ = 48_000
snake_case_ = 2
snake_case_ = False
snake_case_ = """time_group_norm"""
snake_case_ = True
snake_case_ = 1.0
snake_case_ = 0.01
else:
raise ValueError(f"""Unknown model name: {model_name}""" )
snake_case_ = EncodecModel(_SCREAMING_SNAKE_CASE )
snake_case_ = EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(_SCREAMING_SNAKE_CASE )
snake_case_ = torch.load(_SCREAMING_SNAKE_CASE )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
snake_case_ = original_checkpoint["""best_state"""]
recursively_load_weights(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
if repo_id:
print("""Pushing to the hub...""" )
feature_extractor.push_to_hub(_SCREAMING_SNAKE_CASE )
model.push_to_hub(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Dict = argparse.ArgumentParser()
parser.add_argument(
'--model',
default='encodec_24khz',
type=str,
help='The model to convert. Should be one of \'encodec_24khz\', \'encodec_32khz\', \'encodec_48khz\'.',
)
parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to original checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
__SCREAMING_SNAKE_CASE : str = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 347 |
"""simple docstring"""
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class __A (snake_case__):
'''simple docstring'''
@slow
@require_torch
def lowerCAmelCase ( self : Union[str, Any] ) ->Dict:
"""simple docstring"""
snake_case_ = EncoderDecoderModel.from_encoder_decoder_pretrained("""prajjwal1/bert-tiny""" , """prajjwal1/bert-tiny""" )
snake_case_ = BertTokenizer.from_pretrained("""bert-base-uncased""" )
snake_case_ = bertabert.config.encoder.vocab_size
snake_case_ = tokenizer.sep_token_id
snake_case_ = tokenizer.cls_token_id
snake_case_ = 128
snake_case_ = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""train[:1%]""" )
snake_case_ = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""validation[:1%]""" )
snake_case_ = train_dataset.select(range(32 ) )
snake_case_ = val_dataset.select(range(16 ) )
snake_case_ = 4
def _map_to_encoder_decoder_inputs(UpperCAmelCase_ : int ):
# Tokenizer will automatically set [BOS] <text> [EOS]
snake_case_ = tokenizer(batch["""article"""] , padding="""max_length""" , truncation=UpperCAmelCase_ , max_length=512 )
snake_case_ = tokenizer(batch["""highlights"""] , padding="""max_length""" , truncation=UpperCAmelCase_ , max_length=128 )
snake_case_ = inputs.input_ids
snake_case_ = inputs.attention_mask
snake_case_ = outputs.input_ids
snake_case_ = outputs.input_ids.copy()
snake_case_ = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["""labels"""]
]
snake_case_ = outputs.attention_mask
assert all(len(UpperCAmelCase_ ) == 512 for x in inputs.input_ids )
assert all(len(UpperCAmelCase_ ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(UpperCAmelCase_ : Union[str, Any] ):
snake_case_ = pred.label_ids
snake_case_ = pred.predictions
# all unnecessary tokens are removed
snake_case_ = tokenizer.batch_decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ )
snake_case_ = tokenizer.batch_decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ )
snake_case_ = sum([int(pred_str[i] == label_str[i] ) for i in range(len(UpperCAmelCase_ ) )] ) / len(UpperCAmelCase_ )
return {"accuracy": accuracy}
# map train dataset
snake_case_ = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=UpperCAmelCase_ , batch_size=UpperCAmelCase_ , remove_columns=["""article""", """highlights"""] , )
train_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
# same for validation dataset
snake_case_ = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=UpperCAmelCase_ , batch_size=UpperCAmelCase_ , remove_columns=["""article""", """highlights"""] , )
val_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
snake_case_ = self.get_auto_remove_tmp_dir()
snake_case_ = SeqaSeqTrainingArguments(
output_dir=UpperCAmelCase_ , per_device_train_batch_size=UpperCAmelCase_ , per_device_eval_batch_size=UpperCAmelCase_ , predict_with_generate=UpperCAmelCase_ , evaluation_strategy="""steps""" , do_train=UpperCAmelCase_ , do_eval=UpperCAmelCase_ , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
snake_case_ = SeqaSeqTrainer(
model=UpperCAmelCase_ , args=UpperCAmelCase_ , compute_metrics=_compute_metrics , train_dataset=UpperCAmelCase_ , eval_dataset=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , )
# start training
trainer.train()
| 347 | 1 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__)
def _a ( _SCREAMING_SNAKE_CASE ) -> int:
snake_case_ = ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
snake_case_ = 128
elif "12-12" in model_name:
snake_case_ = 12
snake_case_ = 12
elif "14-14" in model_name:
snake_case_ = 14
snake_case_ = 14
elif "16-16" in model_name:
snake_case_ = 16
snake_case_ = 16
else:
raise ValueError("""Model not supported""" )
snake_case_ = """huggingface/label-files"""
if "speech-commands" in model_name:
snake_case_ = 35
snake_case_ = """speech-commands-v2-id2label.json"""
else:
snake_case_ = 527
snake_case_ = """audioset-id2label.json"""
snake_case_ = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="""dataset""" ) , """r""" ) )
snake_case_ = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
snake_case_ = idalabel
snake_case_ = {v: k for k, v in idalabel.items()}
return config
def _a ( _SCREAMING_SNAKE_CASE ) -> str:
if "module.v" in name:
snake_case_ = name.replace("""module.v""" , """audio_spectrogram_transformer""" )
if "cls_token" in name:
snake_case_ = name.replace("""cls_token""" , """embeddings.cls_token""" )
if "dist_token" in name:
snake_case_ = name.replace("""dist_token""" , """embeddings.distillation_token""" )
if "pos_embed" in name:
snake_case_ = name.replace("""pos_embed""" , """embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
snake_case_ = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
# transformer blocks
if "blocks" in name:
snake_case_ = name.replace("""blocks""" , """encoder.layer""" )
if "attn.proj" in name:
snake_case_ = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
snake_case_ = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
snake_case_ = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
snake_case_ = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
snake_case_ = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
snake_case_ = name.replace("""mlp.fc2""" , """output.dense""" )
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
snake_case_ = name.replace("""audio_spectrogram_transformer.norm""" , """audio_spectrogram_transformer.layernorm""" )
# classifier head
if "module.mlp_head.0" in name:
snake_case_ = name.replace("""module.mlp_head.0""" , """classifier.layernorm""" )
if "module.mlp_head.1" in name:
snake_case_ = name.replace("""module.mlp_head.1""" , """classifier.dense""" )
return name
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
for key in orig_state_dict.copy().keys():
snake_case_ = orig_state_dict.pop(_SCREAMING_SNAKE_CASE )
if "qkv" in key:
snake_case_ = key.split(""".""" )
snake_case_ = int(key_split[3] )
snake_case_ = config.hidden_size
if "weight" in key:
snake_case_ = val[:dim, :]
snake_case_ = val[dim : dim * 2, :]
snake_case_ = val[-dim:, :]
else:
snake_case_ = val[:dim]
snake_case_ = val[dim : dim * 2]
snake_case_ = val[-dim:]
else:
snake_case_ = val
return orig_state_dict
def _a ( _SCREAMING_SNAKE_CASE ) -> Tuple:
snake_case_ = [
"""module.v.head.weight""",
"""module.v.head.bias""",
"""module.v.head_dist.weight""",
"""module.v.head_dist.bias""",
]
for k in ignore_keys:
state_dict.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@torch.no_grad()
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Union[str, Any]:
snake_case_ = get_audio_spectrogram_transformer_config(_SCREAMING_SNAKE_CASE )
snake_case_ = {
"""ast-finetuned-audioset-10-10-0.4593""": (
"""https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1"""
),
"""ast-finetuned-audioset-10-10-0.450""": (
"""https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1"""
),
"""ast-finetuned-audioset-10-10-0.448""": (
"""https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1"""
),
"""ast-finetuned-audioset-10-10-0.448-v2""": (
"""https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1"""
),
"""ast-finetuned-audioset-12-12-0.447""": (
"""https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1"""
),
"""ast-finetuned-audioset-14-14-0.443""": (
"""https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1"""
),
"""ast-finetuned-audioset-16-16-0.442""": (
"""https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1"""
),
"""ast-finetuned-speech-commands-v2""": (
"""https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1"""
),
}
# load original state_dict
snake_case_ = model_name_to_url[model_name]
snake_case_ = torch.hub.load_state_dict_from_url(_SCREAMING_SNAKE_CASE , map_location="""cpu""" )
# remove some keys
remove_keys(_SCREAMING_SNAKE_CASE )
# rename some keys
snake_case_ = convert_state_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# load 🤗 model
snake_case_ = ASTForAudioClassification(_SCREAMING_SNAKE_CASE )
model.eval()
model.load_state_dict(_SCREAMING_SNAKE_CASE )
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
snake_case_ = -4.267_7393 if """speech-commands""" not in model_name else -6.84_5978
snake_case_ = 4.568_9974 if """speech-commands""" not in model_name else 5.565_4526
snake_case_ = 1_024 if """speech-commands""" not in model_name else 128
snake_case_ = ASTFeatureExtractor(mean=_SCREAMING_SNAKE_CASE , std=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE )
if "speech-commands" in model_name:
snake_case_ = load_dataset("""speech_commands""" , """v0.02""" , split="""validation""" )
snake_case_ = dataset[0]["""audio"""]["""array"""]
else:
snake_case_ = hf_hub_download(
repo_id="""nielsr/audio-spectogram-transformer-checkpoint""" , filename="""sample_audio.flac""" , repo_type="""dataset""" , )
snake_case_ , snake_case_ = torchaudio.load(_SCREAMING_SNAKE_CASE )
snake_case_ = waveform.squeeze().numpy()
snake_case_ = feature_extractor(_SCREAMING_SNAKE_CASE , sampling_rate=16_000 , return_tensors="""pt""" )
# forward pass
snake_case_ = model(**_SCREAMING_SNAKE_CASE )
snake_case_ = outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
snake_case_ = torch.tensor([-0.8760, -7.0042, -8.6602] )
elif model_name == "ast-finetuned-audioset-10-10-0.450":
snake_case_ = torch.tensor([-1.1986, -7.0903, -8.2718] )
elif model_name == "ast-finetuned-audioset-10-10-0.448":
snake_case_ = torch.tensor([-2.6128, -8.0080, -9.4344] )
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
snake_case_ = torch.tensor([-1.5080, -7.4534, -8.8917] )
elif model_name == "ast-finetuned-audioset-12-12-0.447":
snake_case_ = torch.tensor([-0.5050, -6.5833, -8.0843] )
elif model_name == "ast-finetuned-audioset-14-14-0.443":
snake_case_ = torch.tensor([-0.3826, -7.0336, -8.2413] )
elif model_name == "ast-finetuned-audioset-16-16-0.442":
snake_case_ = torch.tensor([-1.2113, -6.9101, -8.3470] )
elif model_name == "ast-finetuned-speech-commands-v2":
snake_case_ = torch.tensor([6.1589, -8.0566, -8.7984] )
else:
raise ValueError("""Unknown model name""" )
if not torch.allclose(logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ):
raise ValueError("""Logits don't match""" )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
print(f"""Saving feature extractor to {pytorch_dump_folder_path}""" )
feature_extractor.save_pretrained(_SCREAMING_SNAKE_CASE )
if push_to_hub:
print("""Pushing model and feature extractor to the hub...""" )
model.push_to_hub(f"""MIT/{model_name}""" )
feature_extractor.push_to_hub(f"""MIT/{model_name}""" )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='ast-finetuned-audioset-10-10-0.4593',
type=str,
help='Name of the Audio Spectrogram Transformer model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
__SCREAMING_SNAKE_CASE : int = parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 347 |
"""simple docstring"""
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
__SCREAMING_SNAKE_CASE : Tuple = subprocess.check_output('git merge-base main HEAD'.split()).decode('utf-8')
__SCREAMING_SNAKE_CASE : Tuple = subprocess.check_output(f"""git diff --name-only {fork_point_sha}""".split()).decode('utf-8').split()
__SCREAMING_SNAKE_CASE : Any = '|'.join(sys.argv[1:])
__SCREAMING_SNAKE_CASE : Optional[Any] = re.compile(Rf"""^({joined_dirs}).*?\.py$""")
__SCREAMING_SNAKE_CASE : List[str] = [x for x in modified_files if regex.match(x)]
print(' '.join(relevant_modified_files), end='')
| 347 | 1 |
"""simple docstring"""
from __future__ import annotations
from typing import Generic, TypeVar
__SCREAMING_SNAKE_CASE : Dict = TypeVar('T')
class __A (Generic[T]):
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCAmelCase_ : T ) ->None:
"""simple docstring"""
snake_case_ = data
snake_case_ = self
snake_case_ = 0
class __A (Generic[T]):
'''simple docstring'''
def __init__( self : Tuple ) ->None:
"""simple docstring"""
snake_case_ = {}
def lowerCAmelCase ( self : Tuple , UpperCAmelCase_ : T ) ->None:
"""simple docstring"""
snake_case_ = DisjointSetTreeNode(UpperCAmelCase_ )
def lowerCAmelCase ( self : List[str] , UpperCAmelCase_ : T ) ->DisjointSetTreeNode[T]:
"""simple docstring"""
snake_case_ = self.map[data]
if elem_ref != elem_ref.parent:
snake_case_ = self.find_set(elem_ref.parent.data )
return elem_ref.parent
def lowerCAmelCase ( self : str , UpperCAmelCase_ : DisjointSetTreeNode[T] , UpperCAmelCase_ : DisjointSetTreeNode[T] ) ->None:
"""simple docstring"""
if nodea.rank > nodea.rank:
snake_case_ = nodea
else:
snake_case_ = nodea
if nodea.rank == nodea.rank:
nodea.rank += 1
def lowerCAmelCase ( self : Union[str, Any] , UpperCAmelCase_ : T , UpperCAmelCase_ : T ) ->None:
"""simple docstring"""
self.link(self.find_set(UpperCAmelCase_ ) , self.find_set(UpperCAmelCase_ ) )
class __A (Generic[T]):
'''simple docstring'''
def __init__( self : Optional[Any] ) ->None:
"""simple docstring"""
snake_case_ = {}
def lowerCAmelCase ( self : int , UpperCAmelCase_ : T ) ->None:
"""simple docstring"""
if node not in self.connections:
snake_case_ = {}
def lowerCAmelCase ( self : str , UpperCAmelCase_ : T , UpperCAmelCase_ : T , UpperCAmelCase_ : int ) ->None:
"""simple docstring"""
self.add_node(UpperCAmelCase_ )
self.add_node(UpperCAmelCase_ )
snake_case_ = weight
snake_case_ = weight
def lowerCAmelCase ( self : Any ) ->GraphUndirectedWeighted[T]:
"""simple docstring"""
snake_case_ = []
snake_case_ = set()
for start in self.connections:
for end in self.connections[start]:
if (start, end) not in seen:
seen.add((end, start) )
edges.append((start, end, self.connections[start][end]) )
edges.sort(key=lambda UpperCAmelCase_ : x[2] )
# creating the disjoint set
snake_case_ = DisjointSetTree[T]()
for node in self.connections:
disjoint_set.make_set(UpperCAmelCase_ )
# MST generation
snake_case_ = 0
snake_case_ = 0
snake_case_ = GraphUndirectedWeighted[T]()
while num_edges < len(self.connections ) - 1:
snake_case_ , snake_case_ , snake_case_ = edges[index]
index += 1
snake_case_ = disjoint_set.find_set(UpperCAmelCase_ )
snake_case_ = disjoint_set.find_set(UpperCAmelCase_ )
if parent_u != parent_v:
num_edges += 1
graph.add_edge(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
disjoint_set.union(UpperCAmelCase_ , UpperCAmelCase_ )
return graph
| 347 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Tuple = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'adapter_layer': 'encoder.layers.*.adapter_layer',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
'pooling_layer.linear': 'projector',
'pooling_layer.projection': 'classifier',
}
__SCREAMING_SNAKE_CASE : List[Any] = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
'projector',
'classifier',
]
def _a ( _SCREAMING_SNAKE_CASE ) -> List[str]:
snake_case_ = {}
with open(_SCREAMING_SNAKE_CASE , """r""" ) as file:
for line_number, line in enumerate(_SCREAMING_SNAKE_CASE ):
snake_case_ = line.strip()
if line:
snake_case_ = line.split()
snake_case_ = line_number
snake_case_ = words[0]
snake_case_ = value
return result
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
for attribute in key.split(""".""" ):
snake_case_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case_ = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(_SCREAMING_SNAKE_CASE ):
snake_case_ = PARAM_MAPPING[full_name.split(""".""" )[-1]]
snake_case_ = """param"""
if weight_type is not None and weight_type != "param":
snake_case_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).shape
elif weight_type is not None and weight_type == "param":
snake_case_ = hf_pointer
for attribute in hf_param_name.split(""".""" ):
snake_case_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case_ = shape_pointer.shape
# let's reduce dimension
snake_case_ = value[0]
else:
snake_case_ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
snake_case_ = value
elif weight_type == "weight_g":
snake_case_ = value
elif weight_type == "weight_v":
snake_case_ = value
elif weight_type == "bias":
snake_case_ = value
elif weight_type == "param":
for attribute in hf_param_name.split(""".""" ):
snake_case_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case_ = value
else:
snake_case_ = value
logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
snake_case_ = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(_SCREAMING_SNAKE_CASE ):
snake_case_ = PARAM_MAPPING[full_name.split(""".""" )[-1]]
snake_case_ = """param"""
if weight_type is not None and weight_type != "param":
snake_case_ = """.""".join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
snake_case_ = """.""".join([key, hf_param_name] )
else:
snake_case_ = key
snake_case_ = value if """lm_head""" in full_key else value[0]
__SCREAMING_SNAKE_CASE : int = {
'W_a': 'linear_1.weight',
'W_b': 'linear_2.weight',
'b_a': 'linear_1.bias',
'b_b': 'linear_2.bias',
'ln_W': 'norm.weight',
'ln_b': 'norm.bias',
}
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> List[str]:
snake_case_ = False
for key, mapped_key in MAPPING.items():
snake_case_ = """wav2vec2.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
snake_case_ = True
if "*" in mapped_key:
snake_case_ = name.split(_SCREAMING_SNAKE_CASE )[0].split(""".""" )[-2]
snake_case_ = mapped_key.replace("""*""" , _SCREAMING_SNAKE_CASE )
if "weight_g" in name:
snake_case_ = """weight_g"""
elif "weight_v" in name:
snake_case_ = """weight_v"""
elif "bias" in name:
snake_case_ = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
snake_case_ = """weight"""
else:
snake_case_ = None
if hf_dict is not None:
rename_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
set_recursively(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return is_used
return is_used
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
snake_case_ = []
snake_case_ = fairseq_model.state_dict()
snake_case_ = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
snake_case_ = False
if "conv_layers" in name:
load_conv_layer(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == """group""" , )
snake_case_ = True
else:
snake_case_ = load_wavaveca_layer(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if not is_used:
unused_weights.append(_SCREAMING_SNAKE_CASE )
logger.warning(f"""Unused weights: {unused_weights}""" )
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
snake_case_ = full_name.split("""conv_layers.""" )[-1]
snake_case_ = name.split(""".""" )
snake_case_ = int(items[0] )
snake_case_ = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
snake_case_ = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
snake_case_ = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
snake_case_ = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
snake_case_ = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_SCREAMING_SNAKE_CASE )
@torch.no_grad()
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False ) -> int:
if config_path is not None:
snake_case_ = WavaVecaConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
else:
snake_case_ = WavaVecaConfig()
if is_seq_class:
snake_case_ = read_txt_into_dict(_SCREAMING_SNAKE_CASE )
snake_case_ = idalabel
snake_case_ = WavaVecaForSequenceClassification(_SCREAMING_SNAKE_CASE )
snake_case_ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , )
feature_extractor.save_pretrained(_SCREAMING_SNAKE_CASE )
elif is_finetuned:
if dict_path:
snake_case_ = Dictionary.load(_SCREAMING_SNAKE_CASE )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
snake_case_ = target_dict.pad_index
snake_case_ = target_dict.bos_index
snake_case_ = target_dict.eos_index
snake_case_ = len(target_dict.symbols )
snake_case_ = os.path.join(_SCREAMING_SNAKE_CASE , """vocab.json""" )
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(_SCREAMING_SNAKE_CASE ) )
return
os.makedirs(_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE )
snake_case_ = target_dict.indices
# fairseq has the <pad> and <s> switched
snake_case_ = 0
snake_case_ = 1
with open(_SCREAMING_SNAKE_CASE , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case_ = WavaVecaCTCTokenizer(
_SCREAMING_SNAKE_CASE , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=_SCREAMING_SNAKE_CASE , )
snake_case_ = True if config.feat_extract_norm == """layer""" else False
snake_case_ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , )
snake_case_ = WavaVecaProcessor(feature_extractor=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE )
processor.save_pretrained(_SCREAMING_SNAKE_CASE )
snake_case_ = WavaVecaForCTC(_SCREAMING_SNAKE_CASE )
else:
snake_case_ = WavaVecaForPreTraining(_SCREAMING_SNAKE_CASE )
if is_finetuned or is_seq_class:
snake_case_ , snake_case_ , snake_case_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
snake_case_ = argparse.Namespace(task="""audio_pretraining""" )
snake_case_ = fairseq.tasks.setup_task(_SCREAMING_SNAKE_CASE )
snake_case_ , snake_case_ , snake_case_ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=_SCREAMING_SNAKE_CASE )
snake_case_ = model[0].eval()
recursively_load_weights(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , not is_finetuned )
hf_wavavec.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : str = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
parser.add_argument(
'--is_seq_class',
action='store_true',
help='Whether the model to convert is a fine-tuned sequence classification model or not',
)
__SCREAMING_SNAKE_CASE : Any = parser.parse_args()
__SCREAMING_SNAKE_CASE : List[Any] = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 347 | 1 |
"""simple docstring"""
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope="""session""" )
def _a ( ) -> Any:
snake_case_ = 10
snake_case_ = datasets.Features(
{
"""tokens""": datasets.Sequence(datasets.Value("""string""" ) ),
"""labels""": datasets.Sequence(datasets.ClassLabel(names=["""negative""", """positive"""] ) ),
"""answers""": datasets.Sequence(
{
"""text""": datasets.Value("""string""" ),
"""answer_start""": datasets.Value("""int32""" ),
} ),
"""id""": datasets.Value("""int64""" ),
} )
snake_case_ = datasets.Dataset.from_dict(
{
"""tokens""": [["""foo"""] * 5] * n,
"""labels""": [[1] * 5] * n,
"""answers""": [{"""answer_start""": [97], """text""": ["""1976"""]}] * 10,
"""id""": list(range(_SCREAMING_SNAKE_CASE ) ),
} , features=_SCREAMING_SNAKE_CASE , )
return dataset
@pytest.fixture(scope="""session""" )
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
snake_case_ = str(tmp_path_factory.mktemp("""data""" ) / """file.arrow""" )
dataset.map(cache_file_name=_SCREAMING_SNAKE_CASE )
return filename
# FILE_CONTENT + files
__SCREAMING_SNAKE_CASE : Tuple = '\\n Text data.\n Second line of data.'
@pytest.fixture(scope="""session""" )
def _a ( _SCREAMING_SNAKE_CASE ) -> int:
snake_case_ = tmp_path_factory.mktemp("""data""" ) / """file.txt"""
snake_case_ = FILE_CONTENT
with open(_SCREAMING_SNAKE_CASE , """w""" ) as f:
f.write(_SCREAMING_SNAKE_CASE )
return filename
@pytest.fixture(scope="""session""" )
def _a ( _SCREAMING_SNAKE_CASE ) -> str:
import bza
snake_case_ = tmp_path_factory.mktemp("""data""" ) / """file.txt.bz2"""
snake_case_ = bytes(_SCREAMING_SNAKE_CASE , """utf-8""" )
with bza.open(_SCREAMING_SNAKE_CASE , """wb""" ) as f:
f.write(_SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope="""session""" )
def _a ( _SCREAMING_SNAKE_CASE ) -> List[str]:
import gzip
snake_case_ = str(tmp_path_factory.mktemp("""data""" ) / """file.txt.gz""" )
snake_case_ = bytes(_SCREAMING_SNAKE_CASE , """utf-8""" )
with gzip.open(_SCREAMING_SNAKE_CASE , """wb""" ) as f:
f.write(_SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope="""session""" )
def _a ( _SCREAMING_SNAKE_CASE ) -> List[Any]:
if datasets.config.LZ4_AVAILABLE:
import lza.frame
snake_case_ = tmp_path_factory.mktemp("""data""" ) / """file.txt.lz4"""
snake_case_ = bytes(_SCREAMING_SNAKE_CASE , """utf-8""" )
with lza.frame.open(_SCREAMING_SNAKE_CASE , """wb""" ) as f:
f.write(_SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope="""session""" )
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
snake_case_ = tmp_path_factory.mktemp("""data""" ) / """file.txt.7z"""
with pyazr.SevenZipFile(_SCREAMING_SNAKE_CASE , """w""" ) as archive:
archive.write(_SCREAMING_SNAKE_CASE , arcname=os.path.basename(_SCREAMING_SNAKE_CASE ) )
return path
@pytest.fixture(scope="""session""" )
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
import tarfile
snake_case_ = tmp_path_factory.mktemp("""data""" ) / """file.txt.tar"""
with tarfile.TarFile(_SCREAMING_SNAKE_CASE , """w""" ) as f:
f.add(_SCREAMING_SNAKE_CASE , arcname=os.path.basename(_SCREAMING_SNAKE_CASE ) )
return path
@pytest.fixture(scope="""session""" )
def _a ( _SCREAMING_SNAKE_CASE ) -> Tuple:
import lzma
snake_case_ = tmp_path_factory.mktemp("""data""" ) / """file.txt.xz"""
snake_case_ = bytes(_SCREAMING_SNAKE_CASE , """utf-8""" )
with lzma.open(_SCREAMING_SNAKE_CASE , """wb""" ) as f:
f.write(_SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope="""session""" )
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
import zipfile
snake_case_ = tmp_path_factory.mktemp("""data""" ) / """file.txt.zip"""
with zipfile.ZipFile(_SCREAMING_SNAKE_CASE , """w""" ) as f:
f.write(_SCREAMING_SNAKE_CASE , arcname=os.path.basename(_SCREAMING_SNAKE_CASE ) )
return path
@pytest.fixture(scope="""session""" )
def _a ( _SCREAMING_SNAKE_CASE ) -> str:
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
snake_case_ = tmp_path_factory.mktemp("""data""" ) / """file.txt.zst"""
snake_case_ = bytes(_SCREAMING_SNAKE_CASE , """utf-8""" )
with zstd.open(_SCREAMING_SNAKE_CASE , """wb""" ) as f:
f.write(_SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope="""session""" )
def _a ( _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
snake_case_ = tmp_path_factory.mktemp("""data""" ) / """file.xml"""
snake_case_ = textwrap.dedent(
"""\
<?xml version=\"1.0\" encoding=\"UTF-8\" ?>
<tmx version=\"1.4\">
<header segtype=\"sentence\" srclang=\"ca\" />
<body>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 1</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 1</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 2</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 2</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 3</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 3</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 4</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 4</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 5</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 5</seg></tuv>
</tu>
</body>
</tmx>""" )
with open(_SCREAMING_SNAKE_CASE , """w""" ) as f:
f.write(_SCREAMING_SNAKE_CASE )
return filename
__SCREAMING_SNAKE_CASE : int = [
{'col_1': '0', 'col_2': 0, 'col_3': 0.0},
{'col_1': '1', 'col_2': 1, 'col_3': 1.0},
{'col_1': '2', 'col_2': 2, 'col_3': 2.0},
{'col_1': '3', 'col_2': 3, 'col_3': 3.0},
]
__SCREAMING_SNAKE_CASE : Any = [
{'col_1': '4', 'col_2': 4, 'col_3': 4.0},
{'col_1': '5', 'col_2': 5, 'col_3': 5.0},
]
__SCREAMING_SNAKE_CASE : str = {
'col_1': ['0', '1', '2', '3'],
'col_2': [0, 1, 2, 3],
'col_3': [0.0, 1.0, 2.0, 3.0],
}
__SCREAMING_SNAKE_CASE : Any = [
{'col_3': 0.0, 'col_1': '0', 'col_2': 0},
{'col_3': 1.0, 'col_1': '1', 'col_2': 1},
]
__SCREAMING_SNAKE_CASE : str = [
{'col_1': 's0', 'col_2': 0, 'col_3': 0.0},
{'col_1': 's1', 'col_2': 1, 'col_3': 1.0},
{'col_1': 's2', 'col_2': 2, 'col_3': 2.0},
{'col_1': 's3', 'col_2': 3, 'col_3': 3.0},
]
@pytest.fixture(scope="""session""" )
def _a ( ) -> Dict:
return DATA_DICT_OF_LISTS
@pytest.fixture(scope="""session""" )
def _a ( _SCREAMING_SNAKE_CASE ) -> int:
snake_case_ = datasets.Dataset.from_dict(_SCREAMING_SNAKE_CASE )
snake_case_ = str(tmp_path_factory.mktemp("""data""" ) / """dataset.arrow""" )
dataset.map(cache_file_name=_SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope="""session""" )
def _a ( _SCREAMING_SNAKE_CASE ) -> int:
snake_case_ = str(tmp_path_factory.mktemp("""data""" ) / """dataset.sqlite""" )
with contextlib.closing(sqlitea.connect(_SCREAMING_SNAKE_CASE ) ) as con:
snake_case_ = con.cursor()
cur.execute("""CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)""" )
for item in DATA:
cur.execute("""INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)""" , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope="""session""" )
def _a ( _SCREAMING_SNAKE_CASE ) -> Optional[int]:
snake_case_ = str(tmp_path_factory.mktemp("""data""" ) / """dataset.csv""" )
with open(_SCREAMING_SNAKE_CASE , """w""" , newline="""""" ) as f:
snake_case_ = csv.DictWriter(_SCREAMING_SNAKE_CASE , fieldnames=["""col_1""", """col_2""", """col_3"""] )
writer.writeheader()
for item in DATA:
writer.writerow(_SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope="""session""" )
def _a ( _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
snake_case_ = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.csv""" )
with open(_SCREAMING_SNAKE_CASE , """w""" , newline="""""" ) as f:
snake_case_ = csv.DictWriter(_SCREAMING_SNAKE_CASE , fieldnames=["""col_1""", """col_2""", """col_3"""] )
writer.writeheader()
for item in DATA:
writer.writerow(_SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope="""session""" )
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
import bza
snake_case_ = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.bz2"""
with open(_SCREAMING_SNAKE_CASE , """rb""" ) as f:
snake_case_ = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(_SCREAMING_SNAKE_CASE , """wb""" ) as f:
f.write(_SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope="""session""" )
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
snake_case_ = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.zip"""
with zipfile.ZipFile(_SCREAMING_SNAKE_CASE , """w""" ) as f:
f.write(_SCREAMING_SNAKE_CASE , arcname=os.path.basename(_SCREAMING_SNAKE_CASE ) )
f.write(_SCREAMING_SNAKE_CASE , arcname=os.path.basename(_SCREAMING_SNAKE_CASE ) )
return path
@pytest.fixture(scope="""session""" )
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
snake_case_ = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.zip"""
with zipfile.ZipFile(_SCREAMING_SNAKE_CASE , """w""" ) as f:
f.write(_SCREAMING_SNAKE_CASE , arcname=os.path.basename(csv_path.replace(""".csv""" , """.CSV""" ) ) )
f.write(_SCREAMING_SNAKE_CASE , arcname=os.path.basename(csva_path.replace(""".csv""" , """.CSV""" ) ) )
return path
@pytest.fixture(scope="""session""" )
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
snake_case_ = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.csv.zip"""
with zipfile.ZipFile(_SCREAMING_SNAKE_CASE , """w""" ) as f:
f.write(_SCREAMING_SNAKE_CASE , arcname=os.path.join("""main_dir""" , os.path.basename(_SCREAMING_SNAKE_CASE ) ) )
f.write(_SCREAMING_SNAKE_CASE , arcname=os.path.join("""main_dir""" , os.path.basename(_SCREAMING_SNAKE_CASE ) ) )
return path
@pytest.fixture(scope="""session""" )
def _a ( _SCREAMING_SNAKE_CASE ) -> Any:
snake_case_ = str(tmp_path_factory.mktemp("""data""" ) / """dataset.parquet""" )
snake_case_ = pa.schema(
{
"""col_1""": pa.string(),
"""col_2""": pa.intaa(),
"""col_3""": pa.floataa(),
} )
with open(_SCREAMING_SNAKE_CASE , """wb""" ) as f:
snake_case_ = pq.ParquetWriter(_SCREAMING_SNAKE_CASE , schema=_SCREAMING_SNAKE_CASE )
snake_case_ = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(_SCREAMING_SNAKE_CASE ) )] for k in DATA[0]} , schema=_SCREAMING_SNAKE_CASE )
writer.write_table(_SCREAMING_SNAKE_CASE )
writer.close()
return path
@pytest.fixture(scope="""session""" )
def _a ( _SCREAMING_SNAKE_CASE ) -> Optional[int]:
snake_case_ = str(tmp_path_factory.mktemp("""data""" ) / """dataset.json""" )
snake_case_ = {"""data""": DATA}
with open(_SCREAMING_SNAKE_CASE , """w""" ) as f:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope="""session""" )
def _a ( _SCREAMING_SNAKE_CASE ) -> Any:
snake_case_ = str(tmp_path_factory.mktemp("""data""" ) / """dataset.json""" )
snake_case_ = {"""data""": DATA_DICT_OF_LISTS}
with open(_SCREAMING_SNAKE_CASE , """w""" ) as f:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope="""session""" )
def _a ( _SCREAMING_SNAKE_CASE ) -> Optional[int]:
snake_case_ = str(tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl""" )
with open(_SCREAMING_SNAKE_CASE , """w""" ) as f:
for item in DATA:
f.write(json.dumps(_SCREAMING_SNAKE_CASE ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def _a ( _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
snake_case_ = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.jsonl""" )
with open(_SCREAMING_SNAKE_CASE , """w""" ) as f:
for item in DATA:
f.write(json.dumps(_SCREAMING_SNAKE_CASE ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def _a ( _SCREAMING_SNAKE_CASE ) -> List[Any]:
snake_case_ = str(tmp_path_factory.mktemp("""data""" ) / """dataset_312.jsonl""" )
with open(_SCREAMING_SNAKE_CASE , """w""" ) as f:
for item in DATA_312:
f.write(json.dumps(_SCREAMING_SNAKE_CASE ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def _a ( _SCREAMING_SNAKE_CASE ) -> str:
snake_case_ = str(tmp_path_factory.mktemp("""data""" ) / """dataset-str.jsonl""" )
with open(_SCREAMING_SNAKE_CASE , """w""" ) as f:
for item in DATA_STR:
f.write(json.dumps(_SCREAMING_SNAKE_CASE ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
import gzip
snake_case_ = str(tmp_path_factory.mktemp("""data""" ) / """dataset.txt.gz""" )
with open(_SCREAMING_SNAKE_CASE , """rb""" ) as orig_file:
with gzip.open(_SCREAMING_SNAKE_CASE , """wb""" ) as zipped_file:
zipped_file.writelines(_SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope="""session""" )
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
import gzip
snake_case_ = str(tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.gz""" )
with open(_SCREAMING_SNAKE_CASE , """rb""" ) as orig_file:
with gzip.open(_SCREAMING_SNAKE_CASE , """wb""" ) as zipped_file:
zipped_file.writelines(_SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope="""session""" )
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
snake_case_ = tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.zip"""
with zipfile.ZipFile(_SCREAMING_SNAKE_CASE , """w""" ) as f:
f.write(_SCREAMING_SNAKE_CASE , arcname=os.path.basename(_SCREAMING_SNAKE_CASE ) )
f.write(_SCREAMING_SNAKE_CASE , arcname=os.path.basename(_SCREAMING_SNAKE_CASE ) )
return path
@pytest.fixture(scope="""session""" )
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]:
snake_case_ = tmp_path_factory.mktemp("""data""" ) / """dataset_nested.jsonl.zip"""
with zipfile.ZipFile(_SCREAMING_SNAKE_CASE , """w""" ) as f:
f.write(_SCREAMING_SNAKE_CASE , arcname=os.path.join("""nested""" , os.path.basename(_SCREAMING_SNAKE_CASE ) ) )
return path
@pytest.fixture(scope="""session""" )
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]:
snake_case_ = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.jsonl.zip"""
with zipfile.ZipFile(_SCREAMING_SNAKE_CASE , """w""" ) as f:
f.write(_SCREAMING_SNAKE_CASE , arcname=os.path.join("""main_dir""" , os.path.basename(_SCREAMING_SNAKE_CASE ) ) )
f.write(_SCREAMING_SNAKE_CASE , arcname=os.path.join("""main_dir""" , os.path.basename(_SCREAMING_SNAKE_CASE ) ) )
return path
@pytest.fixture(scope="""session""" )
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
snake_case_ = tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.tar"""
with tarfile.TarFile(_SCREAMING_SNAKE_CASE , """w""" ) as f:
f.add(_SCREAMING_SNAKE_CASE , arcname=os.path.basename(_SCREAMING_SNAKE_CASE ) )
f.add(_SCREAMING_SNAKE_CASE , arcname=os.path.basename(_SCREAMING_SNAKE_CASE ) )
return path
@pytest.fixture(scope="""session""" )
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
snake_case_ = tmp_path_factory.mktemp("""data""" ) / """dataset_nested.jsonl.tar"""
with tarfile.TarFile(_SCREAMING_SNAKE_CASE , """w""" ) as f:
f.add(_SCREAMING_SNAKE_CASE , arcname=os.path.join("""nested""" , os.path.basename(_SCREAMING_SNAKE_CASE ) ) )
return path
@pytest.fixture(scope="""session""" )
def _a ( _SCREAMING_SNAKE_CASE ) -> List[Any]:
snake_case_ = ["""0""", """1""", """2""", """3"""]
snake_case_ = str(tmp_path_factory.mktemp("""data""" ) / """dataset.txt""" )
with open(_SCREAMING_SNAKE_CASE , """w""" ) as f:
for item in data:
f.write(item + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def _a ( _SCREAMING_SNAKE_CASE ) -> Tuple:
snake_case_ = ["""0""", """1""", """2""", """3"""]
snake_case_ = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.txt""" )
with open(_SCREAMING_SNAKE_CASE , """w""" ) as f:
for item in data:
f.write(item + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def _a ( _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
snake_case_ = ["""0""", """1""", """2""", """3"""]
snake_case_ = tmp_path_factory.mktemp("""data""" ) / """dataset.abc"""
with open(_SCREAMING_SNAKE_CASE , """w""" ) as f:
for item in data:
f.write(item + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
snake_case_ = tmp_path_factory.mktemp("""data""" ) / """dataset.text.zip"""
with zipfile.ZipFile(_SCREAMING_SNAKE_CASE , """w""" ) as f:
f.write(_SCREAMING_SNAKE_CASE , arcname=os.path.basename(_SCREAMING_SNAKE_CASE ) )
f.write(_SCREAMING_SNAKE_CASE , arcname=os.path.basename(_SCREAMING_SNAKE_CASE ) )
return path
@pytest.fixture(scope="""session""" )
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
snake_case_ = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.text.zip"""
with zipfile.ZipFile(_SCREAMING_SNAKE_CASE , """w""" ) as f:
f.write(_SCREAMING_SNAKE_CASE , arcname=os.path.join("""main_dir""" , os.path.basename(_SCREAMING_SNAKE_CASE ) ) )
f.write(_SCREAMING_SNAKE_CASE , arcname=os.path.join("""main_dir""" , os.path.basename(_SCREAMING_SNAKE_CASE ) ) )
return path
@pytest.fixture(scope="""session""" )
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
snake_case_ = tmp_path_factory.mktemp("""data""" ) / """dataset.ext.zip"""
with zipfile.ZipFile(_SCREAMING_SNAKE_CASE , """w""" ) as f:
f.write(_SCREAMING_SNAKE_CASE , arcname=os.path.basename("""unsupported.ext""" ) )
f.write(_SCREAMING_SNAKE_CASE , arcname=os.path.basename("""unsupported_2.ext""" ) )
return path
@pytest.fixture(scope="""session""" )
def _a ( _SCREAMING_SNAKE_CASE ) -> Dict:
snake_case_ = """\n""".join(["""First""", """Second\u2029with Unicode new line""", """Third"""] )
snake_case_ = str(tmp_path_factory.mktemp("""data""" ) / """dataset_with_unicode_new_lines.txt""" )
with open(_SCREAMING_SNAKE_CASE , """w""" , encoding="""utf-8""" ) as f:
f.write(_SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope="""session""" )
def _a ( ) -> Optional[Any]:
return os.path.join("""tests""" , """features""" , """data""" , """test_image_rgb.jpg""" )
@pytest.fixture(scope="""session""" )
def _a ( ) -> Union[str, Any]:
return os.path.join("""tests""" , """features""" , """data""" , """test_audio_44100.wav""" )
@pytest.fixture(scope="""session""" )
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
snake_case_ = tmp_path_factory.mktemp("""data""" ) / """dataset.img.zip"""
with zipfile.ZipFile(_SCREAMING_SNAKE_CASE , """w""" ) as f:
f.write(_SCREAMING_SNAKE_CASE , arcname=os.path.basename(_SCREAMING_SNAKE_CASE ) )
f.write(_SCREAMING_SNAKE_CASE , arcname=os.path.basename(_SCREAMING_SNAKE_CASE ).replace(""".jpg""" , """2.jpg""" ) )
return path
@pytest.fixture(scope="""session""" )
def _a ( _SCREAMING_SNAKE_CASE ) -> Dict:
snake_case_ = tmp_path_factory.mktemp("""data_dir""" )
(data_dir / "subdir").mkdir()
with open(data_dir / """subdir""" / """train.txt""" , """w""" ) as f:
f.write("""foo\n""" * 10 )
with open(data_dir / """subdir""" / """test.txt""" , """w""" ) as f:
f.write("""bar\n""" * 10 )
# hidden file
with open(data_dir / """subdir""" / """.test.txt""" , """w""" ) as f:
f.write("""bar\n""" * 10 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / """.subdir""" / """train.txt""" , """w""" ) as f:
f.write("""foo\n""" * 10 )
with open(data_dir / """.subdir""" / """test.txt""" , """w""" ) as f:
f.write("""bar\n""" * 10 )
return data_dir
| 347 |
"""simple docstring"""
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class __A :
'''simple docstring'''
def __init__( self : Dict , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Any=14 , UpperCAmelCase_ : Union[str, Any]=7 , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : Union[str, Any]=False , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : str=99 , UpperCAmelCase_ : Union[str, Any]=32 , UpperCAmelCase_ : List[Any]=4 , UpperCAmelCase_ : Optional[int]=4 , UpperCAmelCase_ : int=4 , UpperCAmelCase_ : str=37 , UpperCAmelCase_ : Any="gelu" , UpperCAmelCase_ : str=0.1 , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : int=512 , UpperCAmelCase_ : Tuple=0.02 , ) ->List[str]:
"""simple docstring"""
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = seq_length
snake_case_ = is_training
snake_case_ = use_input_mask
snake_case_ = use_token_type_ids
snake_case_ = use_labels
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = rotary_dim
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = initializer_range
snake_case_ = None
snake_case_ = vocab_size - 1
snake_case_ = vocab_size - 1
snake_case_ = vocab_size - 1
def lowerCAmelCase ( self : int ) ->Optional[int]:
"""simple docstring"""
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ = None
if self.use_input_mask:
snake_case_ = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=UpperCAmelCase_ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def lowerCAmelCase ( self : Dict ) ->Tuple:
"""simple docstring"""
snake_case_ = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ = config_and_inputs
snake_case_ = {"""input_ids""": input_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def lowerCAmelCase ( self : int , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict ) ->Tuple:
"""simple docstring"""
snake_case_ = 20
snake_case_ = model_class_name(UpperCAmelCase_ )
snake_case_ = model.init_cache(input_ids.shape[0] , UpperCAmelCase_ )
snake_case_ = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype="""i4""" )
snake_case_ = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
snake_case_ = model(
input_ids[:, :-1] , attention_mask=UpperCAmelCase_ , past_key_values=UpperCAmelCase_ , position_ids=UpperCAmelCase_ , )
snake_case_ = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""" )
snake_case_ = model(
input_ids[:, -1:] , attention_mask=UpperCAmelCase_ , past_key_values=outputs_cache.past_key_values , position_ids=UpperCAmelCase_ , )
snake_case_ = model(UpperCAmelCase_ )
snake_case_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" )
def lowerCAmelCase ( self : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[Any] ) ->Dict:
"""simple docstring"""
snake_case_ = 20
snake_case_ = model_class_name(UpperCAmelCase_ )
snake_case_ = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
snake_case_ = model.init_cache(input_ids.shape[0] , UpperCAmelCase_ )
snake_case_ = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
snake_case_ = model(
input_ids[:, :-1] , attention_mask=UpperCAmelCase_ , past_key_values=UpperCAmelCase_ , position_ids=UpperCAmelCase_ , )
snake_case_ = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""" )
snake_case_ = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=UpperCAmelCase_ , position_ids=UpperCAmelCase_ , )
snake_case_ = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )
snake_case_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" )
@require_flax
class __A (snake_case__ , snake_case__ , unittest.TestCase):
'''simple docstring'''
__lowercase: Any = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
__lowercase: List[str] = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def lowerCAmelCase ( self : Tuple ) ->List[str]:
"""simple docstring"""
snake_case_ = FlaxGPTJModelTester(self )
def lowerCAmelCase ( self : int ) ->List[Any]:
"""simple docstring"""
for model_class_name in self.all_model_classes:
snake_case_ , snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCAmelCase ( self : List[str] ) ->Any:
"""simple docstring"""
for model_class_name in self.all_model_classes:
snake_case_ , snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
@tooslow
def lowerCAmelCase ( self : List[str] ) ->Optional[Any]:
"""simple docstring"""
snake_case_ = GPTaTokenizer.from_pretrained("""gpt2""" , pad_token="""<|endoftext|>""" , padding_side="""left""" )
snake_case_ = tokenizer(["""Hello this is a long string""", """Hey"""] , return_tensors="""np""" , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ )
snake_case_ = FlaxGPTJForCausalLM.from_pretrained("""EleutherAI/gpt-j-6B""" )
snake_case_ = False
snake_case_ = model.config.eos_token_id
snake_case_ = jax.jit(model.generate )
snake_case_ = jit_generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , pad_token_id=tokenizer.pad_token_id ).sequences
snake_case_ = tokenizer.batch_decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ )
snake_case_ = [
"""Hello this is a long string of text.\n\nI'm trying to get the text of the""",
"""Hey, I'm a little late to the party. I'm going to""",
]
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
@is_pt_flax_cross_test
def lowerCAmelCase ( self : int ) ->str:
"""simple docstring"""
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
snake_case_ = self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
snake_case_ = model_class.__name__[4:] # Skip the "Flax" at the beginning
snake_case_ = getattr(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ , snake_case_ = pt_inputs["""input_ids"""].shape
snake_case_ = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(UpperCAmelCase_ ):
snake_case_ = 0
snake_case_ = 1
snake_case_ = 0
snake_case_ = 1
snake_case_ = pt_model_class(UpperCAmelCase_ ).eval()
snake_case_ = model_class(UpperCAmelCase_ , dtype=jnp.floataa )
snake_case_ = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , UpperCAmelCase_ )
snake_case_ = fx_state
with torch.no_grad():
snake_case_ = pt_model(**UpperCAmelCase_ ).to_tuple()
snake_case_ = fx_model(**UpperCAmelCase_ ).to_tuple()
self.assertEqual(len(UpperCAmelCase_ ) , len(UpperCAmelCase_ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(UpperCAmelCase_ )
snake_case_ = model_class.from_pretrained(UpperCAmelCase_ , from_pt=UpperCAmelCase_ )
snake_case_ = fx_model_loaded(**UpperCAmelCase_ ).to_tuple()
self.assertEqual(
len(UpperCAmelCase_ ) , len(UpperCAmelCase_ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output_loaded, pt_output in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@is_pt_flax_cross_test
def lowerCAmelCase ( self : List[Any] ) ->str:
"""simple docstring"""
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
snake_case_ = self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
snake_case_ = model_class.__name__[4:] # Skip the "Flax" at the beginning
snake_case_ = getattr(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = pt_model_class(UpperCAmelCase_ ).eval()
snake_case_ = model_class(UpperCAmelCase_ , dtype=jnp.floataa )
snake_case_ = load_flax_weights_in_pytorch_model(UpperCAmelCase_ , fx_model.params )
snake_case_ , snake_case_ = pt_inputs["""input_ids"""].shape
snake_case_ = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(UpperCAmelCase_ ):
snake_case_ = 0
snake_case_ = 1
snake_case_ = 0
snake_case_ = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
snake_case_ = pt_model(**UpperCAmelCase_ ).to_tuple()
snake_case_ = fx_model(**UpperCAmelCase_ ).to_tuple()
self.assertEqual(len(UpperCAmelCase_ ) , len(UpperCAmelCase_ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(UpperCAmelCase_ )
snake_case_ = pt_model_class.from_pretrained(UpperCAmelCase_ , from_flax=UpperCAmelCase_ )
with torch.no_grad():
snake_case_ = pt_model_loaded(**UpperCAmelCase_ ).to_tuple()
self.assertEqual(
len(UpperCAmelCase_ ) , len(UpperCAmelCase_ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@tooslow
def lowerCAmelCase ( self : List[Any] ) ->str:
"""simple docstring"""
for model_class_name in self.all_model_classes:
snake_case_ = model_class_name.from_pretrained("""EleutherAI/gpt-j-6B""" )
snake_case_ = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCAmelCase_ )
| 347 | 1 |
"""simple docstring"""
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __A (snake_case__):
'''simple docstring'''
__lowercase: Union[str, Any] = """ClapFeatureExtractor"""
__lowercase: Tuple = ("""RobertaTokenizer""", """RobertaTokenizerFast""")
def __init__( self : str , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : int ) ->Optional[int]:
"""simple docstring"""
super().__init__(UpperCAmelCase_ , UpperCAmelCase_ )
def __call__( self : Any , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : Union[str, Any]=None , **UpperCAmelCase_ : Tuple ) ->Any:
"""simple docstring"""
snake_case_ = kwargs.pop("""sampling_rate""" , UpperCAmelCase_ )
if text is None and audios is None:
raise ValueError("""You have to specify either text or audios. Both cannot be none.""" )
if text is not None:
snake_case_ = self.tokenizer(UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ )
if audios is not None:
snake_case_ = self.feature_extractor(
UpperCAmelCase_ , sampling_rate=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ )
if text is not None and audios is not None:
snake_case_ = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCAmelCase_ ) , tensor_type=UpperCAmelCase_ )
def lowerCAmelCase ( self : str , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Tuple ) ->str:
"""simple docstring"""
return self.tokenizer.batch_decode(*UpperCAmelCase_ , **UpperCAmelCase_ )
def lowerCAmelCase ( self : List[Any] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : int ) ->Optional[int]:
"""simple docstring"""
return self.tokenizer.decode(*UpperCAmelCase_ , **UpperCAmelCase_ )
@property
def lowerCAmelCase ( self : Any ) ->List[str]:
"""simple docstring"""
snake_case_ = self.tokenizer.model_input_names
snake_case_ = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 347 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
__SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
class __A (snake_case__):
'''simple docstring'''
__lowercase: int = """upernet"""
def __init__( self : str , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : str=512 , UpperCAmelCase_ : int=0.02 , UpperCAmelCase_ : Optional[Any]=[1, 2, 3, 6] , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : Tuple=0.4 , UpperCAmelCase_ : Tuple=384 , UpperCAmelCase_ : Union[str, Any]=256 , UpperCAmelCase_ : str=1 , UpperCAmelCase_ : Tuple=False , UpperCAmelCase_ : Tuple=255 , **UpperCAmelCase_ : Dict , ) ->Union[str, Any]:
"""simple docstring"""
super().__init__(**UpperCAmelCase_ )
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
snake_case_ = CONFIG_MAPPING["""resnet"""](out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] )
elif isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
snake_case_ = backbone_config.get("""model_type""" )
snake_case_ = CONFIG_MAPPING[backbone_model_type]
snake_case_ = config_class.from_dict(UpperCAmelCase_ )
snake_case_ = backbone_config
snake_case_ = hidden_size
snake_case_ = initializer_range
snake_case_ = pool_scales
snake_case_ = use_auxiliary_head
snake_case_ = auxiliary_loss_weight
snake_case_ = auxiliary_in_channels
snake_case_ = auxiliary_channels
snake_case_ = auxiliary_num_convs
snake_case_ = auxiliary_concat_input
snake_case_ = loss_ignore_index
def lowerCAmelCase ( self : str ) ->Optional[Any]:
"""simple docstring"""
snake_case_ = copy.deepcopy(self.__dict__ )
snake_case_ = self.backbone_config.to_dict()
snake_case_ = self.__class__.model_type
return output
| 347 | 1 |
"""simple docstring"""
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
snake_case_ = flax_key_tuple[:-1] + ("""weight""",)
snake_case_ = torch.permute(_SCREAMING_SNAKE_CASE , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(_SCREAMING_SNAKE_CASE ):
# linear layer
snake_case_ = flax_key_tuple[:-1] + ("""weight""",)
snake_case_ = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
snake_case_ = flax_key_tuple[:-1] + ("""weight""",)
return flax_key_tuple, flax_tensor
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
if "metadata" in layer:
snake_case_ = layer.split("""metadata""" )
snake_case_ = """""".join(split_layer[0] )[:-1]
snake_case_ = [tuple(("""metadata""" + split_layer[1]).split("""/""" ) )]
elif "kvstore" in layer:
snake_case_ = layer.split("""kvstore""" )
snake_case_ = """""".join(split_layer[0] )[:-1]
snake_case_ = [tuple(("""kvstore""" + split_layer[1]).split("""/""" ) )]
else:
snake_case_ = layer.split("""/""" )
snake_case_ = """/""".join(split_layer[:-1] )
snake_case_ = (split_layer[-1],)
if "kvstore/path" in layer:
snake_case_ = f"""{switch_checkpoint_path}/{checkpoint_info[layer]}"""
elif "kvstore/driver" in layer:
snake_case_ = """file"""
else:
snake_case_ = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]:
snake_case_ = rename_keys(_SCREAMING_SNAKE_CASE )
snake_case_ = {}
for k, v in current_block.items():
snake_case_ = v
snake_case_ = new_current_block
torch.save(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = WEIGHTS_NAME ) -> Optional[int]:
snake_case_ = convert_file_size_to_int(_SCREAMING_SNAKE_CASE )
snake_case_ = []
snake_case_ = {}
snake_case_ = 0
snake_case_ = 0
os.makedirs(_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE )
with gfile.GFile(switch_checkpoint_path + """/checkpoint""" , """rb""" ) as fp:
snake_case_ = serialization.msgpack_restore(fp.read() )["""optimizer"""]["""target"""]
snake_case_ = flatten_dict(_SCREAMING_SNAKE_CASE , sep="""/""" )
snake_case_ = {}
for layer in checkpoint_info.keys():
snake_case_ , snake_case_ , snake_case_ = get_key_and_tensorstore_dict(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if curr_real_layer_name in all_layers:
snake_case_ = content
else:
snake_case_ = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
snake_case_ = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
snake_case_ = torch.tensor(_SCREAMING_SNAKE_CASE )
snake_case_ = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
snake_case_ , snake_case_ = rename_base_flax_keys(tuple(key.split("""/""" ) ) , _SCREAMING_SNAKE_CASE )
snake_case_ = """/""".join(_SCREAMING_SNAKE_CASE )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
snake_case_ = os.path.join(
_SCREAMING_SNAKE_CASE , weights_name.replace(""".bin""" , f"""-{len(_SCREAMING_SNAKE_CASE )+1:05d}-of-???.bin""" ) )
rename_and_save_block(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
sharded_state_dicts.append(current_block.keys() )
del current_block
snake_case_ = {}
snake_case_ = 0
snake_case_ = raw_weights.to(getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
snake_case_ = os.path.join(_SCREAMING_SNAKE_CASE , weights_name.replace(""".bin""" , f"""-{len(_SCREAMING_SNAKE_CASE )+1:05d}-of-???.bin""" ) )
rename_and_save_block(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(_SCREAMING_SNAKE_CASE ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
snake_case_ = {}
snake_case_ = {}
for idx, shard in enumerate(_SCREAMING_SNAKE_CASE ):
snake_case_ = weights_name.replace(
""".bin""" , f"""-{idx+1:05d}-of-{len(_SCREAMING_SNAKE_CASE ):05d}.bin""" ) # len(sharded_state_dicts):05d}
snake_case_ = os.path.join(_SCREAMING_SNAKE_CASE , weights_name.replace(""".bin""" , f"""-{idx+1:05d}-of-???.bin""" ) )
os.rename(_SCREAMING_SNAKE_CASE , os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
snake_case_ = shard
for key in shard:
snake_case_ = shard_file
# Add the metadata
snake_case_ = {"""total_size""": total_size}
snake_case_ = {"""metadata""": metadata, """weight_map""": weight_map}
with open(os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , """w""" , encoding="""utf-8""" ) as f:
snake_case_ = json.dumps(_SCREAMING_SNAKE_CASE , indent=2 , sort_keys=_SCREAMING_SNAKE_CASE ) + """\n"""
f.write(_SCREAMING_SNAKE_CASE )
return metadata, index
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--switch_t5x_checkpoint_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600',
type=str,
required=False,
help='Path to a directory containing a folder per layer. Follows the original Google format.',
)
parser.add_argument('--max_shard_size', default='10GB', required=False, help='Max shard size')
parser.add_argument('--dtype', default='bfloat16', type=str, required=False, help='dtype of the saved model')
parser.add_argument(
'--pytorch_dump_folder_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted',
type=str,
required=False,
help='Path to the output pytorch model.',
)
__SCREAMING_SNAKE_CASE : Dict = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def _a ( ) -> Optional[int]:
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
snake_case_ = SwitchTransformersConfig.from_pretrained("""google/switch-base-8""" )
config.save_pretrained("""/home/arthur_huggingface_co/transformers/switch_converted""" )
snake_case_ = SwitchTransformersForConditionalGeneration.from_pretrained(
"""/home/arthur_huggingface_co/transformers/switch_converted""" , device_map="""auto""" )
snake_case_ = TaTokenizer.from_pretrained("""t5-small""" )
snake_case_ = """A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>."""
snake_case_ = tokenizer(_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).input_ids
snake_case_ = model.generate(_SCREAMING_SNAKE_CASE , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 347 |
"""simple docstring"""
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class __A (unittest.TestCase):
'''simple docstring'''
def lowerCAmelCase ( self : Optional[int] ) ->Dict:
"""simple docstring"""
snake_case_ = """ylacombe/bark-small"""
snake_case_ = tempfile.mkdtemp()
snake_case_ = """en_speaker_1"""
snake_case_ = """This is a test string"""
snake_case_ = """speaker_embeddings_path.json"""
snake_case_ = """speaker_embeddings"""
def lowerCAmelCase ( self : List[str] , **UpperCAmelCase_ : str ) ->Optional[int]:
"""simple docstring"""
return AutoTokenizer.from_pretrained(self.checkpoint , **UpperCAmelCase_ )
def lowerCAmelCase ( self : Any ) ->Dict:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase ( self : List[Any] ) ->Dict:
"""simple docstring"""
snake_case_ = self.get_tokenizer()
snake_case_ = BarkProcessor(tokenizer=UpperCAmelCase_ )
processor.save_pretrained(self.tmpdirname )
snake_case_ = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def lowerCAmelCase ( self : Dict ) ->int:
"""simple docstring"""
snake_case_ = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
snake_case_ = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
snake_case_ = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="""(BOS)""" , eos_token="""(EOS)""" , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def lowerCAmelCase ( self : Optional[Any] ) ->Any:
"""simple docstring"""
snake_case_ = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
snake_case_ = 35
snake_case_ = 2
snake_case_ = 8
snake_case_ = {
"""semantic_prompt""": np.ones(UpperCAmelCase_ ),
"""coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ),
"""fine_prompt""": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
snake_case_ = processor(text=self.input_string , voice_preset=UpperCAmelCase_ )
snake_case_ = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(UpperCAmelCase_ , np.array([] ) ).tolist() )
# test loading voice preset from npz file
snake_case_ = os.path.join(self.tmpdirname , """file.npz""" )
np.savez(UpperCAmelCase_ , **UpperCAmelCase_ )
snake_case_ = processor(text=self.input_string , voice_preset=UpperCAmelCase_ )
snake_case_ = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(UpperCAmelCase_ , np.array([] ) ).tolist() )
# test loading voice preset from the hub
snake_case_ = processor(text=self.input_string , voice_preset=self.voice_preset )
def lowerCAmelCase ( self : Tuple ) ->Dict:
"""simple docstring"""
snake_case_ = self.get_tokenizer()
snake_case_ = BarkProcessor(tokenizer=UpperCAmelCase_ )
snake_case_ = processor(text=self.input_string )
snake_case_ = tokenizer(
self.input_string , padding="""max_length""" , max_length=256 , add_special_tokens=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , return_token_type_ids=UpperCAmelCase_ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 347 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : str = {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json',
'google/bigbird-roberta-large': 'https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json',
'google/bigbird-base-trivia-itc': 'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json',
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class __A (snake_case__):
'''simple docstring'''
__lowercase: List[str] = """big_bird"""
def __init__( self : Any , UpperCAmelCase_ : Tuple=50_358 , UpperCAmelCase_ : Tuple=768 , UpperCAmelCase_ : Optional[int]=12 , UpperCAmelCase_ : Dict=12 , UpperCAmelCase_ : Optional[Any]=3_072 , UpperCAmelCase_ : Union[str, Any]="gelu_new" , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : Optional[int]=4_096 , UpperCAmelCase_ : Optional[Any]=2 , UpperCAmelCase_ : Union[str, Any]=0.02 , UpperCAmelCase_ : Optional[Any]=1E-12 , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Any=0 , UpperCAmelCase_ : Tuple=1 , UpperCAmelCase_ : str=2 , UpperCAmelCase_ : Any=66 , UpperCAmelCase_ : Any="block_sparse" , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : int=False , UpperCAmelCase_ : str=64 , UpperCAmelCase_ : Dict=3 , UpperCAmelCase_ : str=None , **UpperCAmelCase_ : Optional[int] , ) ->List[str]:
"""simple docstring"""
super().__init__(
pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , sep_token_id=UpperCAmelCase_ , **UpperCAmelCase_ , )
snake_case_ = vocab_size
snake_case_ = max_position_embeddings
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = initializer_range
snake_case_ = type_vocab_size
snake_case_ = layer_norm_eps
snake_case_ = use_cache
snake_case_ = rescale_embeddings
snake_case_ = attention_type
snake_case_ = use_bias
snake_case_ = block_size
snake_case_ = num_random_blocks
snake_case_ = classifier_dropout
class __A (snake_case__):
'''simple docstring'''
@property
def lowerCAmelCase ( self : Optional[Any] ) ->Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
snake_case_ = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
snake_case_ = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 347 |
"""simple docstring"""
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
__SCREAMING_SNAKE_CASE : int = sys.version_info >= (3, 10)
def _a ( _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> Tuple:
return field(default_factory=lambda: default , metadata=_SCREAMING_SNAKE_CASE )
@dataclass
class __A :
'''simple docstring'''
__lowercase: int
__lowercase: float
__lowercase: str
__lowercase: bool
@dataclass
class __A :
'''simple docstring'''
__lowercase: int = 42
__lowercase: str = field(default="""toto""" , metadata={"""help""": """help message"""})
@dataclass
class __A :
'''simple docstring'''
__lowercase: bool = False
__lowercase: bool = True
__lowercase: Optional[bool] = None
class __A (snake_case__):
'''simple docstring'''
__lowercase: str = """titi"""
__lowercase: Any = """toto"""
class __A (snake_case__):
'''simple docstring'''
__lowercase: int = """titi"""
__lowercase: Optional[Any] = """toto"""
__lowercase: List[Any] = 42
@dataclass
class __A :
'''simple docstring'''
__lowercase: BasicEnum = "toto"
def lowerCAmelCase ( self : int ) ->List[Any]:
"""simple docstring"""
snake_case_ = BasicEnum(self.foo )
@dataclass
class __A :
'''simple docstring'''
__lowercase: MixedTypeEnum = "toto"
def lowerCAmelCase ( self : Optional[int] ) ->Optional[Any]:
"""simple docstring"""
snake_case_ = MixedTypeEnum(self.foo )
@dataclass
class __A :
'''simple docstring'''
__lowercase: Optional[int] = None
__lowercase: Optional[float] = field(default=snake_case__ , metadata={"""help""": """help message"""})
__lowercase: Optional[str] = None
__lowercase: Optional[List[str]] = list_field(default=[])
__lowercase: Optional[List[int]] = list_field(default=[])
@dataclass
class __A :
'''simple docstring'''
__lowercase: List[int] = list_field(default=[])
__lowercase: List[int] = list_field(default=[1, 2, 3])
__lowercase: List[str] = list_field(default=["""Hallo""", """Bonjour""", """Hello"""])
__lowercase: List[float] = list_field(default=[0.1, 0.2, 0.3])
@dataclass
class __A :
'''simple docstring'''
__lowercase: List[int] = field()
__lowercase: str = field()
__lowercase: BasicEnum = field()
def lowerCAmelCase ( self : Any ) ->str:
"""simple docstring"""
snake_case_ = BasicEnum(self.required_enum )
@dataclass
class __A :
'''simple docstring'''
__lowercase: int
__lowercase: "BasicEnum" = field()
__lowercase: "Optional[bool]" = None
__lowercase: "str" = field(default="""toto""" , metadata={"""help""": """help message"""})
__lowercase: "List[str]" = list_field(default=["""Hallo""", """Bonjour""", """Hello"""])
if is_python_no_less_than_3_10:
@dataclass
class __A :
'''simple docstring'''
__lowercase: bool = False
__lowercase: bool = True
__lowercase: bool | None = None
@dataclass
class __A :
'''simple docstring'''
__lowercase: int | None = None
__lowercase: float | None = field(default=snake_case__ , metadata={"""help""": """help message"""})
__lowercase: str | None = None
__lowercase: list[str] | None = list_field(default=[])
__lowercase: list[int] | None = list_field(default=[])
class __A (unittest.TestCase):
'''simple docstring'''
def lowerCAmelCase ( self : Optional[int] , UpperCAmelCase_ : argparse.ArgumentParser , UpperCAmelCase_ : argparse.ArgumentParser ) ->Optional[int]:
"""simple docstring"""
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
snake_case_ = {k: v for k, v in vars(UpperCAmelCase_ ).items() if k != """container"""}
snake_case_ = {k: v for k, v in vars(UpperCAmelCase_ ).items() if k != """container"""}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get("""choices""" , UpperCAmelCase_ ) and yy.get("""choices""" , UpperCAmelCase_ ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx["""type"""](UpperCAmelCase_ ) , yy["""type"""](UpperCAmelCase_ ) )
del xx["type"], yy["type"]
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCAmelCase ( self : int ) ->Any:
"""simple docstring"""
snake_case_ = HfArgumentParser(UpperCAmelCase_ )
snake_case_ = argparse.ArgumentParser()
expected.add_argument("""--foo""" , type=UpperCAmelCase_ , required=UpperCAmelCase_ )
expected.add_argument("""--bar""" , type=UpperCAmelCase_ , required=UpperCAmelCase_ )
expected.add_argument("""--baz""" , type=UpperCAmelCase_ , required=UpperCAmelCase_ )
expected.add_argument("""--flag""" , type=UpperCAmelCase_ , default=UpperCAmelCase_ , const=UpperCAmelCase_ , nargs="""?""" )
self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = ["""--foo""", """1""", """--baz""", """quux""", """--bar""", """0.5"""]
((snake_case_) , ) = parser.parse_args_into_dataclasses(UpperCAmelCase_ , look_for_args_file=UpperCAmelCase_ )
self.assertFalse(example.flag )
def lowerCAmelCase ( self : Optional[Any] ) ->List[Any]:
"""simple docstring"""
snake_case_ = HfArgumentParser(UpperCAmelCase_ )
snake_case_ = argparse.ArgumentParser()
expected.add_argument("""--foo""" , default=42 , type=UpperCAmelCase_ )
expected.add_argument("""--baz""" , default="""toto""" , type=UpperCAmelCase_ , help="""help message""" )
self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCAmelCase ( self : List[Any] ) ->Union[str, Any]:
"""simple docstring"""
snake_case_ = argparse.ArgumentParser()
expected.add_argument("""--foo""" , type=UpperCAmelCase_ , default=UpperCAmelCase_ , const=UpperCAmelCase_ , nargs="""?""" )
expected.add_argument("""--baz""" , type=UpperCAmelCase_ , default=UpperCAmelCase_ , const=UpperCAmelCase_ , nargs="""?""" )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument("""--no_baz""" , action="""store_false""" , default=UpperCAmelCase_ , dest="""baz""" )
expected.add_argument("""--opt""" , type=UpperCAmelCase_ , default=UpperCAmelCase_ )
snake_case_ = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(UpperCAmelCase_ )
for dataclass_type in dataclass_types:
snake_case_ = HfArgumentParser(UpperCAmelCase_ )
self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = parser.parse_args([] )
self.assertEqual(UpperCAmelCase_ , Namespace(foo=UpperCAmelCase_ , baz=UpperCAmelCase_ , opt=UpperCAmelCase_ ) )
snake_case_ = parser.parse_args(["""--foo""", """--no_baz"""] )
self.assertEqual(UpperCAmelCase_ , Namespace(foo=UpperCAmelCase_ , baz=UpperCAmelCase_ , opt=UpperCAmelCase_ ) )
snake_case_ = parser.parse_args(["""--foo""", """--baz"""] )
self.assertEqual(UpperCAmelCase_ , Namespace(foo=UpperCAmelCase_ , baz=UpperCAmelCase_ , opt=UpperCAmelCase_ ) )
snake_case_ = parser.parse_args(["""--foo""", """True""", """--baz""", """True""", """--opt""", """True"""] )
self.assertEqual(UpperCAmelCase_ , Namespace(foo=UpperCAmelCase_ , baz=UpperCAmelCase_ , opt=UpperCAmelCase_ ) )
snake_case_ = parser.parse_args(["""--foo""", """False""", """--baz""", """False""", """--opt""", """False"""] )
self.assertEqual(UpperCAmelCase_ , Namespace(foo=UpperCAmelCase_ , baz=UpperCAmelCase_ , opt=UpperCAmelCase_ ) )
def lowerCAmelCase ( self : int ) ->List[str]:
"""simple docstring"""
snake_case_ = HfArgumentParser(UpperCAmelCase_ )
snake_case_ = argparse.ArgumentParser()
expected.add_argument(
"""--foo""" , default="""toto""" , choices=["""titi""", """toto""", 42] , type=make_choice_type_function(["""titi""", """toto""", 42] ) , )
self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = parser.parse_args([] )
self.assertEqual(args.foo , """toto""" )
snake_case_ = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
snake_case_ = parser.parse_args(["""--foo""", """titi"""] )
self.assertEqual(args.foo , """titi""" )
snake_case_ = parser.parse_args_into_dataclasses(["""--foo""", """titi"""] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
snake_case_ = parser.parse_args(["""--foo""", """42"""] )
self.assertEqual(args.foo , 42 )
snake_case_ = parser.parse_args_into_dataclasses(["""--foo""", """42"""] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def lowerCAmelCase ( self : Dict ) ->str:
"""simple docstring"""
@dataclass
class __A :
'''simple docstring'''
__lowercase: Literal["titi", "toto", 42] = "toto"
snake_case_ = HfArgumentParser(UpperCAmelCase_ )
snake_case_ = argparse.ArgumentParser()
expected.add_argument(
"""--foo""" , default="""toto""" , choices=("""titi""", """toto""", 42) , type=make_choice_type_function(["""titi""", """toto""", 42] ) , )
self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = parser.parse_args([] )
self.assertEqual(args.foo , """toto""" )
snake_case_ = parser.parse_args(["""--foo""", """titi"""] )
self.assertEqual(args.foo , """titi""" )
snake_case_ = parser.parse_args(["""--foo""", """42"""] )
self.assertEqual(args.foo , 42 )
def lowerCAmelCase ( self : Optional[int] ) ->Dict:
"""simple docstring"""
snake_case_ = HfArgumentParser(UpperCAmelCase_ )
snake_case_ = argparse.ArgumentParser()
expected.add_argument("""--foo_int""" , nargs="""+""" , default=[] , type=UpperCAmelCase_ )
expected.add_argument("""--bar_int""" , nargs="""+""" , default=[1, 2, 3] , type=UpperCAmelCase_ )
expected.add_argument("""--foo_str""" , nargs="""+""" , default=["""Hallo""", """Bonjour""", """Hello"""] , type=UpperCAmelCase_ )
expected.add_argument("""--foo_float""" , nargs="""+""" , default=[0.1, 0.2, 0.3] , type=UpperCAmelCase_ )
self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = parser.parse_args([] )
self.assertEqual(
UpperCAmelCase_ , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=["""Hallo""", """Bonjour""", """Hello"""] , foo_float=[0.1, 0.2, 0.3] ) , )
snake_case_ = parser.parse_args("""--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7""".split() )
self.assertEqual(UpperCAmelCase_ , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=["""a""", """b""", """c"""] , foo_float=[0.1, 0.7] ) )
def lowerCAmelCase ( self : Optional[int] ) ->List[Any]:
"""simple docstring"""
snake_case_ = argparse.ArgumentParser()
expected.add_argument("""--foo""" , default=UpperCAmelCase_ , type=UpperCAmelCase_ )
expected.add_argument("""--bar""" , default=UpperCAmelCase_ , type=UpperCAmelCase_ , help="""help message""" )
expected.add_argument("""--baz""" , default=UpperCAmelCase_ , type=UpperCAmelCase_ )
expected.add_argument("""--ces""" , nargs="""+""" , default=[] , type=UpperCAmelCase_ )
expected.add_argument("""--des""" , nargs="""+""" , default=[] , type=UpperCAmelCase_ )
snake_case_ = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(UpperCAmelCase_ )
for dataclass_type in dataclass_types:
snake_case_ = HfArgumentParser(UpperCAmelCase_ )
self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = parser.parse_args([] )
self.assertEqual(UpperCAmelCase_ , Namespace(foo=UpperCAmelCase_ , bar=UpperCAmelCase_ , baz=UpperCAmelCase_ , ces=[] , des=[] ) )
snake_case_ = parser.parse_args("""--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3""".split() )
self.assertEqual(UpperCAmelCase_ , Namespace(foo=12 , bar=3.14 , baz="""42""" , ces=["""a""", """b""", """c"""] , des=[1, 2, 3] ) )
def lowerCAmelCase ( self : Optional[Any] ) ->Optional[Any]:
"""simple docstring"""
snake_case_ = HfArgumentParser(UpperCAmelCase_ )
snake_case_ = argparse.ArgumentParser()
expected.add_argument("""--required_list""" , nargs="""+""" , type=UpperCAmelCase_ , required=UpperCAmelCase_ )
expected.add_argument("""--required_str""" , type=UpperCAmelCase_ , required=UpperCAmelCase_ )
expected.add_argument(
"""--required_enum""" , type=make_choice_type_function(["""titi""", """toto"""] ) , choices=["""titi""", """toto"""] , required=UpperCAmelCase_ , )
self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCAmelCase ( self : Optional[int] ) ->List[Any]:
"""simple docstring"""
snake_case_ = HfArgumentParser(UpperCAmelCase_ )
snake_case_ = argparse.ArgumentParser()
expected.add_argument("""--foo""" , type=UpperCAmelCase_ , required=UpperCAmelCase_ )
expected.add_argument(
"""--required_enum""" , type=make_choice_type_function(["""titi""", """toto"""] ) , choices=["""titi""", """toto"""] , required=UpperCAmelCase_ , )
expected.add_argument("""--opt""" , type=UpperCAmelCase_ , default=UpperCAmelCase_ )
expected.add_argument("""--baz""" , default="""toto""" , type=UpperCAmelCase_ , help="""help message""" )
expected.add_argument("""--foo_str""" , nargs="""+""" , default=["""Hallo""", """Bonjour""", """Hello"""] , type=UpperCAmelCase_ )
self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCAmelCase ( self : Dict ) ->Tuple:
"""simple docstring"""
snake_case_ = HfArgumentParser(UpperCAmelCase_ )
snake_case_ = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
}
snake_case_ = parser.parse_dict(UpperCAmelCase_ )[0]
snake_case_ = BasicExample(**UpperCAmelCase_ )
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCAmelCase ( self : Optional[Any] ) ->List[Any]:
"""simple docstring"""
snake_case_ = HfArgumentParser(UpperCAmelCase_ )
snake_case_ = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
"""extra""": 42,
}
self.assertRaises(UpperCAmelCase_ , parser.parse_dict , UpperCAmelCase_ , allow_extra_keys=UpperCAmelCase_ )
def lowerCAmelCase ( self : Any ) ->Dict:
"""simple docstring"""
snake_case_ = HfArgumentParser(UpperCAmelCase_ )
snake_case_ = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case_ = os.path.join(UpperCAmelCase_ , """temp_json""" )
os.mkdir(UpperCAmelCase_ )
with open(temp_local_path + """.json""" , """w+""" ) as f:
json.dump(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = parser.parse_yaml_file(Path(temp_local_path + """.json""" ) )[0]
snake_case_ = BasicExample(**UpperCAmelCase_ )
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCAmelCase ( self : Optional[int] ) ->List[str]:
"""simple docstring"""
snake_case_ = HfArgumentParser(UpperCAmelCase_ )
snake_case_ = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case_ = os.path.join(UpperCAmelCase_ , """temp_yaml""" )
os.mkdir(UpperCAmelCase_ )
with open(temp_local_path + """.yaml""" , """w+""" ) as f:
yaml.dump(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = parser.parse_yaml_file(Path(temp_local_path + """.yaml""" ) )[0]
snake_case_ = BasicExample(**UpperCAmelCase_ )
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCAmelCase ( self : Dict ) ->Any:
"""simple docstring"""
snake_case_ = HfArgumentParser(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
| 347 | 1 |
"""simple docstring"""
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize("""dataset_size""" , [None, 400 * 2**20, 600 * 2**20] )
@pytest.mark.parametrize("""input_in_memory_max_size""" , ["""default""", 0, 100 * 2**20, 900 * 2**20] )
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]:
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , """IN_MEMORY_MAX_SIZE""" , _SCREAMING_SNAKE_CASE )
snake_case_ = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
snake_case_ = dataset_size < in_memory_max_size
else:
snake_case_ = False
snake_case_ = is_small_dataset(_SCREAMING_SNAKE_CASE )
assert result == expected
| 347 |
"""simple docstring"""
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
__SCREAMING_SNAKE_CASE : Any = logging.getLogger(__name__)
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False , ) -> Optional[Any]:
snake_case_ = bnb_quantization_config.load_in_abit
snake_case_ = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
"""You have a version of `bitsandbytes` that is not compatible with 8bit quantization,"""
""" make sure you have the latest version of `bitsandbytes` installed.""" )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
"""You have a version of `bitsandbytes` that is not compatible with 4bit quantization,"""
"""make sure you have the latest version of `bitsandbytes` installed.""" )
snake_case_ = []
# custom device map
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and len(device_map.keys() ) > 1:
snake_case_ = [key for key, value in device_map.items() if value in ["""disk""", """cpu"""]]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
snake_case_ = get_keys_to_not_convert(_SCREAMING_SNAKE_CASE )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(_SCREAMING_SNAKE_CASE )
snake_case_ = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
snake_case_ = []
snake_case_ = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(_SCREAMING_SNAKE_CASE )
# compatibility with peft
snake_case_ = load_in_abit
snake_case_ = load_in_abit
snake_case_ = get_parameter_device(_SCREAMING_SNAKE_CASE )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
"""It is not recommended to quantize a loaded model. """
"""The model should be instantiated under the `init_empty_weights` context manager.""" )
snake_case_ = replace_with_bnb_layers(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , modules_to_not_convert=_SCREAMING_SNAKE_CASE )
# convert param to the right dtype
snake_case_ = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
snake_case_ = name.replace(""".weight""" , """""" ).replace(""".bias""" , """""" )
snake_case_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(_SCREAMING_SNAKE_CASE ):
param.to(_SCREAMING_SNAKE_CASE )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" )
logger.info(
f"""The model device type is {model_device.type}. However, cuda is needed for quantization."""
"""We move the model to cuda.""" )
return model
elif weights_location is None:
raise RuntimeError(
f"""`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} """ )
else:
with init_empty_weights():
snake_case_ = replace_with_bnb_layers(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , modules_to_not_convert=_SCREAMING_SNAKE_CASE )
snake_case_ = get_quantized_model_device_map(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , max_memory=_SCREAMING_SNAKE_CASE , no_split_module_classes=_SCREAMING_SNAKE_CASE , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
snake_case_ = True
snake_case_ = any(x in list(device_map.values() ) for x in ["""cpu""", """disk"""] )
load_checkpoint_in_model(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , dtype=bnb_quantization_config.torch_dtype , offload_folder=_SCREAMING_SNAKE_CASE , offload_state_dict=_SCREAMING_SNAKE_CASE , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(_SCREAMING_SNAKE_CASE , device_map=_SCREAMING_SNAKE_CASE , offload_dir=_SCREAMING_SNAKE_CASE )
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> Tuple:
if device_map is None:
if torch.cuda.is_available():
snake_case_ = {"""""": torch.cuda.current_device()}
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" )
logger.info("""The device_map was not initialized.""" """Setting device_map to `{'':torch.cuda.current_device()}`.""" )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
"""If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or """
"""'sequential'.""" )
snake_case_ = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
snake_case_ = {}
snake_case_ = special_dtypes
snake_case_ = no_split_module_classes
snake_case_ = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
snake_case_ = get_balanced_memory(
_SCREAMING_SNAKE_CASE , low_zero=(device_map == """balanced_low_0""") , max_memory=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
snake_case_ = max_memory
snake_case_ = infer_auto_device_map(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
# check if don't have any quantized module on the cpu
snake_case_ = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
snake_case_ = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
"""
Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit
the quantized model. If you want to dispatch the model on the CPU or the disk while keeping
these modules in `torch_dtype`, you need to pass a custom `device_map` to
`load_and_quantize_model`. Check
https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk
for more details.
""" )
else:
logger.info(
"""Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit""" )
del device_map_without_some_modules
return device_map
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> Tuple:
if modules_to_not_convert is None:
snake_case_ = []
snake_case_ , snake_case_ = _replace_with_bnb_layers(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if not has_been_replaced:
logger.warning(
"""You are loading your model in 8bit or 4bit but no linear modules were found in your model."""
""" this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers."""
""" Please double check your model architecture, or submit an issue on github if you think this is"""
""" a bug.""" )
return model
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , ) -> List[Any]:
snake_case_ = False
for name, module in model.named_children():
if current_key_name is None:
snake_case_ = []
current_key_name.append(_SCREAMING_SNAKE_CASE )
if isinstance(_SCREAMING_SNAKE_CASE , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
snake_case_ = """.""".join(_SCREAMING_SNAKE_CASE )
snake_case_ = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
snake_case_ = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
snake_case_ = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=_SCREAMING_SNAKE_CASE , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
snake_case_ = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError("""load_in_8bit and load_in_4bit can't be both False""" )
snake_case_ = module.weight.data
if module.bias is not None:
snake_case_ = module.bias.data
bnb_module.requires_grad_(_SCREAMING_SNAKE_CASE )
setattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case_ = True
if len(list(module.children() ) ) > 0:
snake_case_ , snake_case_ = _replace_with_bnb_layers(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case_ = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def _a ( _SCREAMING_SNAKE_CASE ) -> Any:
# Create a copy of the model
with init_empty_weights():
snake_case_ = deepcopy(_SCREAMING_SNAKE_CASE ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
snake_case_ = find_tied_parameters(_SCREAMING_SNAKE_CASE )
# For compatibility with Accelerate < 0.18
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
snake_case_ = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
snake_case_ = sum(_SCREAMING_SNAKE_CASE , [] )
snake_case_ = len(_SCREAMING_SNAKE_CASE ) > 0
# Check if it is a base model
snake_case_ = False
if hasattr(_SCREAMING_SNAKE_CASE , """base_model_prefix""" ):
snake_case_ = not hasattr(_SCREAMING_SNAKE_CASE , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
snake_case_ = list(model.named_children() )
snake_case_ = [list_modules[-1][0]]
# add last module together with tied weights
snake_case_ = set(_SCREAMING_SNAKE_CASE ) - set(_SCREAMING_SNAKE_CASE )
snake_case_ = list(set(_SCREAMING_SNAKE_CASE ) ) + list(_SCREAMING_SNAKE_CASE )
# remove ".weight" from the keys
snake_case_ = [""".weight""", """.bias"""]
snake_case_ = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
snake_case_ = name.replace(_SCREAMING_SNAKE_CASE , """""" )
filtered_module_names.append(_SCREAMING_SNAKE_CASE )
return filtered_module_names
def _a ( _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
for m in model.modules():
if isinstance(_SCREAMING_SNAKE_CASE , bnb.nn.Linearabit ):
return True
return False
def _a ( _SCREAMING_SNAKE_CASE ) -> Optional[int]:
return next(parameter.parameters() ).device
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
# if it is not quantized, we quantize and offload the quantized weights and the SCB stats
if fpaa_statistics is None:
set_module_tensor_to_device(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 0 , dtype=_SCREAMING_SNAKE_CASE , value=_SCREAMING_SNAKE_CASE )
snake_case_ = param_name
snake_case_ = model
if "." in tensor_name:
snake_case_ = tensor_name.split(""".""" )
for split in splits[:-1]:
snake_case_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if new_module is None:
raise ValueError(f"""{module} has no attribute {split}.""" )
snake_case_ = new_module
snake_case_ = splits[-1]
# offload weights
snake_case_ = False
offload_weight(module._parameters[tensor_name] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE )
if hasattr(module._parameters[tensor_name] , """SCB""" ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace("""weight""" , """SCB""" ) , _SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE , )
else:
offload_weight(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE )
offload_weight(_SCREAMING_SNAKE_CASE , param_name.replace("""weight""" , """SCB""" ) , _SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE )
set_module_tensor_to_device(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , """meta""" , dtype=_SCREAMING_SNAKE_CASE , value=torch.empty(*param.size() ) )
| 347 | 1 |
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
__SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__)
class __A (snake_case__):
'''simple docstring'''
__lowercase: Union[str, Any] = ["""pixel_values"""]
def __init__( self : List[str] , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Optional[Dict[str, int]] = None , UpperCAmelCase_ : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Union[int, float] = 1 / 255 , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Optional[Union[float, List[float]]] = None , UpperCAmelCase_ : Optional[Union[float, List[float]]] = None , **UpperCAmelCase_ : List[str] , ) ->None:
"""simple docstring"""
super().__init__(**UpperCAmelCase_ )
snake_case_ = size if size is not None else {"""shortest_edge""": 256}
snake_case_ = get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_ )
snake_case_ = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
snake_case_ = get_size_dict(UpperCAmelCase_ , param_name="""crop_size""" )
snake_case_ = do_resize
snake_case_ = size
snake_case_ = resample
snake_case_ = do_center_crop
snake_case_ = crop_size
snake_case_ = do_rescale
snake_case_ = rescale_factor
snake_case_ = do_normalize
snake_case_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
snake_case_ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCAmelCase ( self : Union[str, Any] , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Dict[str, int] , UpperCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Optional[int] , ) ->np.ndarray:
"""simple docstring"""
snake_case_ = get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_ )
if "shortest_edge" not in size:
raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
snake_case_ = get_resize_output_image_size(UpperCAmelCase_ , size=size["""shortest_edge"""] , default_to_square=UpperCAmelCase_ )
return resize(UpperCAmelCase_ , size=UpperCAmelCase_ , resample=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_ )
def lowerCAmelCase ( self : List[str] , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Dict[str, int] , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Any , ) ->np.ndarray:
"""simple docstring"""
snake_case_ = get_size_dict(UpperCAmelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}""" )
return center_crop(UpperCAmelCase_ , size=(size["""height"""], size["""width"""]) , data_format=UpperCAmelCase_ , **UpperCAmelCase_ )
def lowerCAmelCase ( self : int , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : float , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : List[str] ) ->np.ndarray:
"""simple docstring"""
return rescale(UpperCAmelCase_ , scale=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_ )
def lowerCAmelCase ( self : Tuple , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Union[float, List[float]] , UpperCAmelCase_ : Union[float, List[float]] , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Union[str, Any] , ) ->np.ndarray:
"""simple docstring"""
return normalize(UpperCAmelCase_ , mean=UpperCAmelCase_ , std=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_ )
def lowerCAmelCase ( self : Union[str, Any] , UpperCAmelCase_ : ImageInput , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : PILImageResampling = None , UpperCAmelCase_ : bool = None , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[float] = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[Union[float, List[float]]] = None , UpperCAmelCase_ : Optional[Union[float, List[float]]] = None , UpperCAmelCase_ : Optional[Union[str, TensorType]] = None , UpperCAmelCase_ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **UpperCAmelCase_ : int , ) ->List[str]:
"""simple docstring"""
snake_case_ = do_resize if do_resize is not None else self.do_resize
snake_case_ = size if size is not None else self.size
snake_case_ = get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_ )
snake_case_ = resample if resample is not None else self.resample
snake_case_ = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case_ = crop_size if crop_size is not None else self.crop_size
snake_case_ = get_size_dict(UpperCAmelCase_ , param_name="""crop_size""" )
snake_case_ = do_rescale if do_rescale is not None else self.do_rescale
snake_case_ = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case_ = do_normalize if do_normalize is not None else self.do_normalize
snake_case_ = image_mean if image_mean is not None else self.image_mean
snake_case_ = image_std if image_std is not None else self.image_std
snake_case_ = make_list_of_images(UpperCAmelCase_ )
if not valid_images(UpperCAmelCase_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
snake_case_ = [to_numpy_array(UpperCAmelCase_ ) for image in images]
if do_resize:
snake_case_ = [self.resize(image=UpperCAmelCase_ , size=UpperCAmelCase_ , resample=UpperCAmelCase_ ) for image in images]
if do_center_crop:
snake_case_ = [self.center_crop(image=UpperCAmelCase_ , size=UpperCAmelCase_ ) for image in images]
if do_rescale:
snake_case_ = [self.rescale(image=UpperCAmelCase_ , scale=UpperCAmelCase_ ) for image in images]
if do_normalize:
snake_case_ = [self.normalize(image=UpperCAmelCase_ , mean=UpperCAmelCase_ , std=UpperCAmelCase_ ) for image in images]
snake_case_ = [to_channel_dimension_format(UpperCAmelCase_ , UpperCAmelCase_ ) for image in images]
snake_case_ = {"""pixel_values""": images}
return BatchFeature(data=UpperCAmelCase_ , tensor_type=UpperCAmelCase_ )
def lowerCAmelCase ( self : int , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[Tuple] = None ) ->Tuple:
"""simple docstring"""
snake_case_ = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(UpperCAmelCase_ ) != len(UpperCAmelCase_ ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(UpperCAmelCase_ ):
snake_case_ = target_sizes.numpy()
snake_case_ = []
for idx in range(len(UpperCAmelCase_ ) ):
snake_case_ = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="""bilinear""" , align_corners=UpperCAmelCase_ )
snake_case_ = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(UpperCAmelCase_ )
else:
snake_case_ = logits.argmax(dim=1 )
snake_case_ = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 347 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Tuple = {
'microsoft/beit-base-patch16-224-pt22k': (
'https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json'
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class __A (snake_case__):
'''simple docstring'''
__lowercase: Optional[int] = """beit"""
def __init__( self : List[str] , UpperCAmelCase_ : List[Any]=8_192 , UpperCAmelCase_ : Dict=768 , UpperCAmelCase_ : int=12 , UpperCAmelCase_ : Tuple=12 , UpperCAmelCase_ : List[Any]=3_072 , UpperCAmelCase_ : Tuple="gelu" , UpperCAmelCase_ : Dict=0.0 , UpperCAmelCase_ : List[str]=0.0 , UpperCAmelCase_ : Any=0.02 , UpperCAmelCase_ : Optional[Any]=1E-12 , UpperCAmelCase_ : int=224 , UpperCAmelCase_ : Tuple=16 , UpperCAmelCase_ : List[str]=3 , UpperCAmelCase_ : int=False , UpperCAmelCase_ : List[str]=False , UpperCAmelCase_ : Tuple=False , UpperCAmelCase_ : int=False , UpperCAmelCase_ : List[Any]=0.1 , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : Dict=[3, 5, 7, 11] , UpperCAmelCase_ : Tuple=[1, 2, 3, 6] , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : List[Any]=0.4 , UpperCAmelCase_ : Optional[Any]=256 , UpperCAmelCase_ : Optional[Any]=1 , UpperCAmelCase_ : int=False , UpperCAmelCase_ : Tuple=255 , **UpperCAmelCase_ : List[str] , ) ->Optional[Any]:
"""simple docstring"""
super().__init__(**UpperCAmelCase_ )
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = initializer_range
snake_case_ = layer_norm_eps
snake_case_ = image_size
snake_case_ = patch_size
snake_case_ = num_channels
snake_case_ = use_mask_token
snake_case_ = use_absolute_position_embeddings
snake_case_ = use_relative_position_bias
snake_case_ = use_shared_relative_position_bias
snake_case_ = layer_scale_init_value
snake_case_ = drop_path_rate
snake_case_ = use_mean_pooling
# decode head attributes (semantic segmentation)
snake_case_ = out_indices
snake_case_ = pool_scales
# auxiliary head attributes (semantic segmentation)
snake_case_ = use_auxiliary_head
snake_case_ = auxiliary_loss_weight
snake_case_ = auxiliary_channels
snake_case_ = auxiliary_num_convs
snake_case_ = auxiliary_concat_input
snake_case_ = semantic_loss_ignore_index
class __A (snake_case__):
'''simple docstring'''
__lowercase: List[Any] = version.parse("""1.11""")
@property
def lowerCAmelCase ( self : Dict ) ->Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCAmelCase ( self : Any ) ->float:
"""simple docstring"""
return 1E-4
| 347 | 1 |
"""simple docstring"""
import argparse
from collections import defaultdict
import yaml
__SCREAMING_SNAKE_CASE : Tuple = 'docs/source/en/_toctree.yml'
def _a ( _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
snake_case_ = defaultdict(_SCREAMING_SNAKE_CASE )
for doc in model_doc:
counts[doc["local"]] += 1
snake_case_ = [key for key, value in counts.items() if value > 1]
snake_case_ = []
for duplicate_key in duplicates:
snake_case_ = list({doc["""title"""] for doc in model_doc if doc["""local"""] == duplicate_key} )
if len(_SCREAMING_SNAKE_CASE ) > 1:
raise ValueError(
f"""{duplicate_key} is present several times in the documentation table of content at """
"""`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the """
"""others.""" )
# Only add this once
new_doc.append({"""local""": duplicate_key, """title""": titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc["""local"""]] == 1] )
# Sort
return sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : s["title"].lower() )
def _a ( _SCREAMING_SNAKE_CASE=False ) -> Optional[Any]:
with open(_SCREAMING_SNAKE_CASE , encoding="""utf-8""" ) as f:
snake_case_ = yaml.safe_load(f.read() )
# Get to the API doc
snake_case_ = 0
while content[api_idx]["title"] != "API":
api_idx += 1
snake_case_ = content[api_idx]["""sections"""]
# Then to the model doc
snake_case_ = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
snake_case_ = api_doc[model_idx]["""sections"""]
snake_case_ = [(idx, section) for idx, section in enumerate(_SCREAMING_SNAKE_CASE ) if """sections""" in section]
snake_case_ = False
for idx, modality_doc in modalities_docs:
snake_case_ = modality_doc["""sections"""]
snake_case_ = clean_model_doc_toc(_SCREAMING_SNAKE_CASE )
if old_modality_doc != new_modality_doc:
snake_case_ = True
if overwrite:
snake_case_ = new_modality_doc
if diff:
if overwrite:
snake_case_ = model_doc
snake_case_ = api_doc
with open(_SCREAMING_SNAKE_CASE , """w""" , encoding="""utf-8""" ) as f:
f.write(yaml.dump(_SCREAMING_SNAKE_CASE , allow_unicode=_SCREAMING_SNAKE_CASE ) )
else:
raise ValueError(
"""The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Tuple = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
__SCREAMING_SNAKE_CASE : Optional[Any] = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 347 |
"""simple docstring"""
import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
__SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : List[Any] = {'vocab_file': 'spiece.model'}
__SCREAMING_SNAKE_CASE : int = {
'vocab_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/spiece.model',
't5-base': 'https://huggingface.co/t5-base/resolve/main/spiece.model',
't5-large': 'https://huggingface.co/t5-large/resolve/main/spiece.model',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/spiece.model',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/spiece.model',
}
}
# TODO(PVP) - this should be removed in Transformers v5
__SCREAMING_SNAKE_CASE : Dict = {
't5-small': 512,
't5-base': 512,
't5-large': 512,
't5-3b': 512,
't5-11b': 512,
}
__SCREAMING_SNAKE_CASE : Optional[int] = '▁'
class __A (snake_case__):
'''simple docstring'''
__lowercase: Optional[int] = VOCAB_FILES_NAMES
__lowercase: Any = PRETRAINED_VOCAB_FILES_MAP
__lowercase: Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase: List[str] = ["""input_ids""", """attention_mask"""]
def __init__( self : Optional[int] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any]="</s>" , UpperCAmelCase_ : Optional[Any]="<unk>" , UpperCAmelCase_ : Any="<pad>" , UpperCAmelCase_ : Tuple=100 , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : Optional[Dict[str, Any]] = None , UpperCAmelCase_ : Optional[int]=True , **UpperCAmelCase_ : Dict , ) ->None:
"""simple docstring"""
if extra_ids > 0 and additional_special_tokens is None:
snake_case_ = [F"""<extra_id_{i}>""" for i in range(UpperCAmelCase_ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
snake_case_ = len(set(filter(lambda UpperCAmelCase_ : bool("""extra_id""" in str(UpperCAmelCase_ ) ) , UpperCAmelCase_ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F"""Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"""
""" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"""
""" tokens""" )
if legacy:
logger.warning_once(
F"""You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to"""
""" read the related pull request available at https://github.com/huggingface/transformers/pull/24565""" )
snake_case_ = legacy
snake_case_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , extra_ids=UpperCAmelCase_ , additional_special_tokens=UpperCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , legacy=UpperCAmelCase_ , **UpperCAmelCase_ , )
snake_case_ = vocab_file
snake_case_ = extra_ids
snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCAmelCase_ )
@staticmethod
def lowerCAmelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[Any] ) ->Union[str, Any]:
"""simple docstring"""
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
snake_case_ = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"""This tokenizer was incorrectly instantiated with a model max length of"""
F""" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this"""
""" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"""
""" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"""
F""" {pretrained_model_name_or_path} automatically truncating your input to"""
F""" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences"""
F""" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with"""
""" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"""
""" instantiate this tokenizer with `model_max_length` set to your preferred value.""" , UpperCAmelCase_ , )
return max_model_length
@property
def lowerCAmelCase ( self : Optional[Any] ) ->Optional[Any]:
"""simple docstring"""
return self.sp_model.get_piece_size() + self._extra_ids
def lowerCAmelCase ( self : Any ) ->Optional[int]:
"""simple docstring"""
snake_case_ = {self.convert_ids_to_tokens(UpperCAmelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCAmelCase ( self : List[str] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None , UpperCAmelCase_ : bool = False ) ->List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase_ , token_ids_a=UpperCAmelCase_ , already_has_special_tokens=UpperCAmelCase_ )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(UpperCAmelCase_ )) + [1]
return ([0] * len(UpperCAmelCase_ )) + [1] + ([0] * len(UpperCAmelCase_ )) + [1]
def lowerCAmelCase ( self : Any ) ->List[str]:
"""simple docstring"""
return list(
set(filter(lambda UpperCAmelCase_ : bool(re.search(R"""<extra_id_\d+>""" , UpperCAmelCase_ ) ) is not None , self.additional_special_tokens ) ) )
def lowerCAmelCase ( self : Dict ) ->str:
"""simple docstring"""
return [self._convert_token_to_id(UpperCAmelCase_ ) for token in self.get_sentinel_tokens()]
def lowerCAmelCase ( self : Dict , UpperCAmelCase_ : List[int] ) ->List[int]:
"""simple docstring"""
if len(UpperCAmelCase_ ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
F"""This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated"""
""" eos tokens being added.""" )
return token_ids
else:
return token_ids + [self.eos_token_id]
def lowerCAmelCase ( self : str , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None ) ->List[int]:
"""simple docstring"""
snake_case_ = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def lowerCAmelCase ( self : Optional[int] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None ) ->List[int]:
"""simple docstring"""
snake_case_ = self._add_eos_if_not_present(UpperCAmelCase_ )
if token_ids_a is None:
return token_ids_a
else:
snake_case_ = self._add_eos_if_not_present(UpperCAmelCase_ )
return token_ids_a + token_ids_a
def __getstate__( self : Optional[Any] ) ->Tuple:
"""simple docstring"""
snake_case_ = self.__dict__.copy()
snake_case_ = None
return state
def __setstate__( self : Optional[Any] , UpperCAmelCase_ : List[Any] ) ->List[Any]:
"""simple docstring"""
snake_case_ = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
snake_case_ = {}
snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCAmelCase ( self : int , UpperCAmelCase_ : "TextInput" , **UpperCAmelCase_ : Tuple ) ->List[str]:
"""simple docstring"""
if not self.legacy:
snake_case_ = SPIECE_UNDERLINE + text.replace(UpperCAmelCase_ , """ """ )
return super().tokenize(UpperCAmelCase_ , **UpperCAmelCase_ )
def lowerCAmelCase ( self : Dict , UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Any ) ->Tuple:
"""simple docstring"""
if not self.legacy:
snake_case_ = text.startswith(UpperCAmelCase_ )
if is_first:
snake_case_ = text[1:]
snake_case_ = self.sp_model.encode(UpperCAmelCase_ , out_type=UpperCAmelCase_ )
if not self.legacy and not is_first and not text.startswith(""" """ ) and tokens[0].startswith(UpperCAmelCase_ ):
snake_case_ = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:]
return tokens
def lowerCAmelCase ( self : List[str] , UpperCAmelCase_ : List[Any] ) ->Tuple:
"""simple docstring"""
if token.startswith("""<extra_id_""" ):
snake_case_ = re.match(R"""<extra_id_(\d+)>""" , UpperCAmelCase_ )
snake_case_ = int(match.group(1 ) )
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(UpperCAmelCase_ )
def lowerCAmelCase ( self : List[str] , UpperCAmelCase_ : Optional[Any] ) ->List[Any]:
"""simple docstring"""
if index < self.sp_model.get_piece_size():
snake_case_ = self.sp_model.IdToPiece(UpperCAmelCase_ )
else:
snake_case_ = F"""<extra_id_{self.vocab_size - 1 - index}>"""
return token
def lowerCAmelCase ( self : List[Any] , UpperCAmelCase_ : List[str] ) ->Optional[Any]:
"""simple docstring"""
snake_case_ = []
snake_case_ = """"""
snake_case_ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(UpperCAmelCase_ ) + token
snake_case_ = True
snake_case_ = []
else:
current_sub_tokens.append(UpperCAmelCase_ )
snake_case_ = False
out_string += self.sp_model.decode(UpperCAmelCase_ )
return out_string.strip()
def lowerCAmelCase ( self : str , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None ) ->Tuple[str]:
"""simple docstring"""
if not os.path.isdir(UpperCAmelCase_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case_ = os.path.join(
UpperCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCAmelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCAmelCase_ , """wb""" ) as fi:
snake_case_ = self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase_ )
return (out_vocab_file,)
| 347 | 1 |
"""simple docstring"""
import random
class __A :
'''simple docstring'''
@staticmethod
def lowerCAmelCase ( UpperCAmelCase_ : str ) ->tuple[list[int], list[int]]:
"""simple docstring"""
snake_case_ = [ord(UpperCAmelCase_ ) for i in text]
snake_case_ = []
snake_case_ = []
for i in plain:
snake_case_ = random.randint(1 , 300 )
snake_case_ = (i + k) * k
cipher.append(UpperCAmelCase_ )
key.append(UpperCAmelCase_ )
return cipher, key
@staticmethod
def lowerCAmelCase ( UpperCAmelCase_ : list[int] , UpperCAmelCase_ : list[int] ) ->str:
"""simple docstring"""
snake_case_ = []
for i in range(len(UpperCAmelCase_ ) ):
snake_case_ = int((cipher[i] - (key[i]) ** 2) / key[i] )
plain.append(chr(UpperCAmelCase_ ) )
return "".join(UpperCAmelCase_ )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = Onepad().encrypt('Hello')
print(c, k)
print(Onepad().decrypt(c, k))
| 347 |
"""simple docstring"""
def _a ( _SCREAMING_SNAKE_CASE = 1_000_000 ) -> int:
snake_case_ = [i - 1 for i in range(limit + 1 )]
for i in range(2 , limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i , limit + 1 , _SCREAMING_SNAKE_CASE ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 347 | 1 |
"""simple docstring"""
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class __A :
'''simple docstring'''
def __init__( self : Any , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[str]=2 , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : str=False , UpperCAmelCase_ : List[Any]=10 , UpperCAmelCase_ : List[str]=3 , UpperCAmelCase_ : Union[str, Any]=32 * 8 , UpperCAmelCase_ : Any=32 * 8 , UpperCAmelCase_ : Optional[int]=4 , UpperCAmelCase_ : Tuple=64 , ) ->int:
"""simple docstring"""
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = is_training
snake_case_ = use_auxiliary_loss
snake_case_ = num_queries
snake_case_ = num_channels
snake_case_ = min_size
snake_case_ = max_size
snake_case_ = num_labels
snake_case_ = hidden_dim
snake_case_ = hidden_dim
def lowerCAmelCase ( self : Dict ) ->Union[str, Any]:
"""simple docstring"""
snake_case_ = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
UpperCAmelCase_ )
snake_case_ = torch.ones([self.batch_size, self.min_size, self.max_size] , device=UpperCAmelCase_ )
snake_case_ = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=UpperCAmelCase_ ) > 0.5
).float()
snake_case_ = (torch.rand((self.batch_size, self.num_labels) , device=UpperCAmelCase_ ) > 0.5).long()
snake_case_ = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def lowerCAmelCase ( self : str ) ->Dict:
"""simple docstring"""
snake_case_ = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
snake_case_ = self.num_queries
snake_case_ = self.num_labels
snake_case_ = [1, 1, 1, 1]
snake_case_ = self.num_channels
snake_case_ = 64
snake_case_ = 128
snake_case_ = self.hidden_dim
snake_case_ = self.hidden_dim
snake_case_ = self.hidden_dim
return config
def lowerCAmelCase ( self : List[Any] ) ->str:
"""simple docstring"""
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ = self.prepare_config_and_inputs()
snake_case_ = {"""pixel_values""": pixel_values, """pixel_mask""": pixel_mask}
return config, inputs_dict
def lowerCAmelCase ( self : Dict , UpperCAmelCase_ : str , UpperCAmelCase_ : List[str] ) ->Optional[int]:
"""simple docstring"""
snake_case_ = output.encoder_hidden_states
snake_case_ = output.pixel_decoder_hidden_states
snake_case_ = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(UpperCAmelCase_ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(UpperCAmelCase_ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(UpperCAmelCase_ ) , config.decoder_layers )
def lowerCAmelCase ( self : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : str=False ) ->List[Any]:
"""simple docstring"""
with torch.no_grad():
snake_case_ = MaskaFormerModel(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
snake_case_ = model(pixel_values=UpperCAmelCase_ , pixel_mask=UpperCAmelCase_ )
snake_case_ = model(UpperCAmelCase_ , output_hidden_states=UpperCAmelCase_ )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCAmelCase ( self : Tuple , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[int] ) ->List[str]:
"""simple docstring"""
snake_case_ = MaskaFormerForUniversalSegmentation(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
def comm_check_on_output(UpperCAmelCase_ : List[Any] ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
snake_case_ = model(pixel_values=UpperCAmelCase_ , pixel_mask=UpperCAmelCase_ )
snake_case_ = model(UpperCAmelCase_ )
comm_check_on_output(UpperCAmelCase_ )
snake_case_ = model(
pixel_values=UpperCAmelCase_ , pixel_mask=UpperCAmelCase_ , mask_labels=UpperCAmelCase_ , class_labels=UpperCAmelCase_ )
comm_check_on_output(UpperCAmelCase_ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class __A (snake_case__ , snake_case__ , unittest.TestCase):
'''simple docstring'''
__lowercase: str = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
__lowercase: Union[str, Any] = {"""feature-extraction""": MaskaFormerModel} if is_torch_available() else {}
__lowercase: str = False
__lowercase: List[str] = False
__lowercase: Dict = False
__lowercase: Optional[Any] = False
def lowerCAmelCase ( self : Any ) ->List[str]:
"""simple docstring"""
snake_case_ = MaskaFormerModelTester(self )
snake_case_ = ConfigTester(self , config_class=UpperCAmelCase_ , has_text_modality=UpperCAmelCase_ )
def lowerCAmelCase ( self : int ) ->Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : List[Any] ) ->str:
"""simple docstring"""
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(UpperCAmelCase_ , **UpperCAmelCase_ , output_hidden_states=UpperCAmelCase_ )
def lowerCAmelCase ( self : Dict ) ->Union[str, Any]:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*UpperCAmelCase_ )
@unittest.skip(reason="""Mask2Former does not use inputs_embeds""" )
def lowerCAmelCase ( self : str ) ->Any:
"""simple docstring"""
pass
@unittest.skip(reason="""Mask2Former does not have a get_input_embeddings method""" )
def lowerCAmelCase ( self : Tuple ) ->Dict:
"""simple docstring"""
pass
@unittest.skip(reason="""Mask2Former is not a generative model""" )
def lowerCAmelCase ( self : List[str] ) ->str:
"""simple docstring"""
pass
@unittest.skip(reason="""Mask2Former does not use token embeddings""" )
def lowerCAmelCase ( self : List[Any] ) ->List[str]:
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(
reason="""Mask2Former has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def lowerCAmelCase ( self : Optional[int] ) ->Dict:
"""simple docstring"""
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCAmelCase ( self : Tuple ) ->Optional[int]:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Optional[Any] ) ->Optional[int]:
"""simple docstring"""
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = model_class(UpperCAmelCase_ )
snake_case_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ = [*signature.parameters.keys()]
snake_case_ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCAmelCase_ )
@slow
def lowerCAmelCase ( self : Union[str, Any] ) ->Any:
"""simple docstring"""
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
snake_case_ = MaskaFormerModel.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
def lowerCAmelCase ( self : List[Any] ) ->Dict:
"""simple docstring"""
snake_case_ = (self.model_tester.min_size,) * 2
snake_case_ = {
"""pixel_values""": torch.randn((2, 3, *size) , device=UpperCAmelCase_ ),
"""mask_labels""": torch.randn((2, 10, *size) , device=UpperCAmelCase_ ),
"""class_labels""": torch.zeros(2 , 10 , device=UpperCAmelCase_ ).long(),
}
snake_case_ = self.model_tester.get_config()
snake_case_ = MaskaFormerForUniversalSegmentation(UpperCAmelCase_ ).to(UpperCAmelCase_ )
snake_case_ = model(**UpperCAmelCase_ )
self.assertTrue(outputs.loss is not None )
def lowerCAmelCase ( self : Dict ) ->Optional[int]:
"""simple docstring"""
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(UpperCAmelCase_ , **UpperCAmelCase_ , output_hidden_states=UpperCAmelCase_ )
def lowerCAmelCase ( self : Union[str, Any] ) ->List[str]:
"""simple docstring"""
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = model_class(UpperCAmelCase_ ).to(UpperCAmelCase_ )
snake_case_ = model(**UpperCAmelCase_ , output_attentions=UpperCAmelCase_ )
self.assertTrue(outputs.attentions is not None )
def lowerCAmelCase ( self : Optional[int] ) ->Optional[int]:
"""simple docstring"""
if not self.model_tester.is_training:
return
snake_case_ = self.all_model_classes[1]
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs()
snake_case_ = model_class(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.train()
snake_case_ = model(UpperCAmelCase_ , mask_labels=UpperCAmelCase_ , class_labels=UpperCAmelCase_ ).loss
loss.backward()
def lowerCAmelCase ( self : Optional[int] ) ->str:
"""simple docstring"""
snake_case_ = self.all_model_classes[1]
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs()
snake_case_ = True
snake_case_ = True
snake_case_ = model_class(UpperCAmelCase_ ).to(UpperCAmelCase_ )
model.train()
snake_case_ = model(UpperCAmelCase_ , mask_labels=UpperCAmelCase_ , class_labels=UpperCAmelCase_ )
snake_case_ = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
snake_case_ = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
snake_case_ = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
snake_case_ = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=UpperCAmelCase_ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
__SCREAMING_SNAKE_CASE : Any = 1E-4
def _a ( ) -> List[Any]:
snake_case_ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_vision
@slow
class __A (unittest.TestCase):
'''simple docstring'''
@cached_property
def lowerCAmelCase ( self : int ) ->Dict:
"""simple docstring"""
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def lowerCAmelCase ( self : Any ) ->int:
"""simple docstring"""
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def lowerCAmelCase ( self : int ) ->str:
"""simple docstring"""
snake_case_ = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(UpperCAmelCase_ )
snake_case_ = self.default_image_processor
snake_case_ = prepare_img()
snake_case_ = image_processor(UpperCAmelCase_ , return_tensors="""pt""" ).to(UpperCAmelCase_ )
snake_case_ = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(UpperCAmelCase_ , (1, 3, 384, 384) )
with torch.no_grad():
snake_case_ = model(**UpperCAmelCase_ )
snake_case_ = torch.tensor(
[[-0.2_790, -1.0_717, -1.1_668], [-0.5_128, -0.3_128, -0.4_987], [-0.5_832, 0.1_971, -0.0_197]] ).to(UpperCAmelCase_ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , UpperCAmelCase_ , atol=UpperCAmelCase_ ) )
snake_case_ = torch.tensor(
[[0.8_973, 1.1_847, 1.1_776], [1.1_934, 1.5_040, 1.5_128], [1.1_153, 1.4_486, 1.4_951]] ).to(UpperCAmelCase_ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , UpperCAmelCase_ , atol=UpperCAmelCase_ ) )
snake_case_ = torch.tensor(
[[2.1_152, 1.7_000, -0.8_603], [1.5_808, 1.8_004, -0.9_353], [1.6_043, 1.7_495, -0.5_999]] ).to(UpperCAmelCase_ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , UpperCAmelCase_ , atol=UpperCAmelCase_ ) )
def lowerCAmelCase ( self : Any ) ->Union[str, Any]:
"""simple docstring"""
snake_case_ = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(UpperCAmelCase_ ).eval()
snake_case_ = self.default_image_processor
snake_case_ = prepare_img()
snake_case_ = image_processor(UpperCAmelCase_ , return_tensors="""pt""" ).to(UpperCAmelCase_ )
snake_case_ = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(UpperCAmelCase_ , (1, 3, 384, 384) )
with torch.no_grad():
snake_case_ = model(**UpperCAmelCase_ )
# masks_queries_logits
snake_case_ = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
snake_case_ = [
[-8.7_839, -9.0_056, -8.8_121],
[-7.4_104, -7.0_313, -6.5_401],
[-6.6_105, -6.3_427, -6.4_675],
]
snake_case_ = torch.tensor(UpperCAmelCase_ ).to(UpperCAmelCase_ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , UpperCAmelCase_ , atol=UpperCAmelCase_ ) )
# class_queries_logits
snake_case_ = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
snake_case_ = torch.tensor(
[
[1.8_324, -8.0_835, -4.1_922],
[0.8_450, -9.0_050, -3.6_053],
[0.3_045, -7.7_293, -3.0_275],
] ).to(UpperCAmelCase_ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , UpperCAmelCase_ , atol=UpperCAmelCase_ ) )
def lowerCAmelCase ( self : str ) ->Union[str, Any]:
"""simple docstring"""
snake_case_ = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(UpperCAmelCase_ ).eval()
snake_case_ = self.default_image_processor
snake_case_ = image_processor(
[np.zeros((3, 800, 1_333) ), np.zeros((3, 800, 1_333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors="""pt""" , )
snake_case_ = inputs["""pixel_values"""].to(UpperCAmelCase_ )
snake_case_ = [el.to(UpperCAmelCase_ ) for el in inputs["""mask_labels"""]]
snake_case_ = [el.to(UpperCAmelCase_ ) for el in inputs["""class_labels"""]]
with torch.no_grad():
snake_case_ = model(**UpperCAmelCase_ )
self.assertTrue(outputs.loss is not None )
| 347 |
"""simple docstring"""
from __future__ import annotations
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]:
print(f"""Vertex\tShortest Distance from vertex {src}""" )
for i, d in enumerate(_SCREAMING_SNAKE_CASE ):
print(f"""{i}\t\t{d}""" )
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
for j in range(_SCREAMING_SNAKE_CASE ):
snake_case_ , snake_case_ , snake_case_ = (graph[j][k] for k in ["""src""", """dst""", """weight"""])
if distance[u] != float("""inf""" ) and distance[u] + w < distance[v]:
return True
return False
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> list[float]:
snake_case_ = [float("""inf""" )] * vertex_count
snake_case_ = 0.0
for _ in range(vertex_count - 1 ):
for j in range(_SCREAMING_SNAKE_CASE ):
snake_case_ , snake_case_ , snake_case_ = (graph[j][k] for k in ["""src""", """dst""", """weight"""])
if distance[u] != float("""inf""" ) and distance[u] + w < distance[v]:
snake_case_ = distance[u] + w
snake_case_ = check_negative_cycle(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if negative_cycle_exists:
raise Exception("""Negative cycle found""" )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
__SCREAMING_SNAKE_CASE : int = int(input('Enter number of vertices: ').strip())
__SCREAMING_SNAKE_CASE : Dict = int(input('Enter number of edges: ').strip())
__SCREAMING_SNAKE_CASE : list[dict[str, int]] = [{} for _ in range(E)]
for i in range(E):
print('Edge ', i + 1)
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = (
int(x)
for x in input('Enter source, destination, weight: ').strip().split(' ')
)
__SCREAMING_SNAKE_CASE : Union[str, Any] = {'src': src, 'dst': dest, 'weight': weight}
__SCREAMING_SNAKE_CASE : Union[str, Any] = int(input('\nEnter shortest path source:').strip())
__SCREAMING_SNAKE_CASE : str = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 347 | 1 |
"""simple docstring"""
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
__SCREAMING_SNAKE_CASE : Any = get_tests_dir('fixtures/test_sentencepiece.model')
__SCREAMING_SNAKE_CASE : str = {'target_lang': 'fi', 'source_lang': 'en'}
__SCREAMING_SNAKE_CASE : List[str] = '>>zh<<'
__SCREAMING_SNAKE_CASE : List[str] = 'Helsinki-NLP/'
if is_torch_available():
__SCREAMING_SNAKE_CASE : Optional[int] = 'pt'
elif is_tf_available():
__SCREAMING_SNAKE_CASE : Dict = 'tf'
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = 'jax'
@require_sentencepiece
class __A (snake_case__ , unittest.TestCase):
'''simple docstring'''
__lowercase: str = MarianTokenizer
__lowercase: List[Any] = False
__lowercase: List[str] = True
def lowerCAmelCase ( self : Any ) ->Union[str, Any]:
"""simple docstring"""
super().setUp()
snake_case_ = ["""</s>""", """<unk>""", """▁This""", """▁is""", """▁a""", """▁t""", """est""", """\u0120""", """<pad>"""]
snake_case_ = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_ ) ) ) )
snake_case_ = Path(self.tmpdirname )
save_json(UpperCAmelCase_ , save_dir / VOCAB_FILES_NAMES["""vocab"""] )
save_json(UpperCAmelCase_ , save_dir / VOCAB_FILES_NAMES["""tokenizer_config_file"""] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(UpperCAmelCase_ , save_dir / VOCAB_FILES_NAMES["""source_spm"""] )
copyfile(UpperCAmelCase_ , save_dir / VOCAB_FILES_NAMES["""target_spm"""] )
snake_case_ = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase ( self : Optional[int] , **UpperCAmelCase_ : str ) ->MarianTokenizer:
"""simple docstring"""
return MarianTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_ )
def lowerCAmelCase ( self : Union[str, Any] , UpperCAmelCase_ : int ) ->List[str]:
"""simple docstring"""
return (
"This is a test",
"This is a test",
)
def lowerCAmelCase ( self : int ) ->Optional[Any]:
"""simple docstring"""
snake_case_ = """</s>"""
snake_case_ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase_ ) , UpperCAmelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase_ ) , UpperCAmelCase_ )
def lowerCAmelCase ( self : List[Any] ) ->Tuple:
"""simple docstring"""
snake_case_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """</s>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """<pad>""" )
self.assertEqual(len(UpperCAmelCase_ ) , 9 )
def lowerCAmelCase ( self : Union[str, Any] ) ->Dict:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def lowerCAmelCase ( self : Optional[Any] ) ->Tuple:
"""simple docstring"""
snake_case_ = MarianTokenizer.from_pretrained(F"""{ORG_NAME}opus-mt-en-de""" )
snake_case_ = en_de_tokenizer(["""I am a small frog"""] , return_tensors=UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = [38, 121, 14, 697, 38_848, 0]
self.assertListEqual(UpperCAmelCase_ , batch.input_ids[0] )
snake_case_ = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(UpperCAmelCase_ )
snake_case_ = [x.name for x in Path(UpperCAmelCase_ ).glob("""*""" )]
self.assertIn("""source.spm""" , UpperCAmelCase_ )
MarianTokenizer.from_pretrained(UpperCAmelCase_ )
def lowerCAmelCase ( self : Union[str, Any] ) ->str:
"""simple docstring"""
snake_case_ = self.get_tokenizer()
snake_case_ = tok(
["""I am a small frog""" * 1_000, """I am a small frog"""] , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
self.assertEqual(batch.input_ids.shape , (2, 512) )
def lowerCAmelCase ( self : List[str] ) ->Union[str, Any]:
"""simple docstring"""
snake_case_ = self.get_tokenizer()
snake_case_ = tok(["""I am a tiny frog""", """I am a small frog"""] , padding=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
self.assertEqual(batch_smaller.input_ids.shape , (2, 10) )
@slow
def lowerCAmelCase ( self : Optional[Any] ) ->List[str]:
"""simple docstring"""
snake_case_ = {"""input_ids""": [[43_495, 462, 20, 42_164, 1_369, 52, 464, 132, 1_703, 492, 13, 7_491, 38_999, 6, 8, 464, 132, 1_703, 492, 13, 4_669, 37_867, 13, 7_525, 27, 1_593, 988, 13, 33_972, 7_029, 6, 20, 8_251, 383, 2, 270, 5_866, 3_788, 2, 2_353, 8_251, 12_338, 2, 13_958, 387, 2, 3_629, 6_953, 188, 2_900, 2, 13_958, 8_011, 11_501, 23, 8_460, 4_073, 34_009, 20, 435, 11_439, 27, 8, 8_460, 4_073, 6_004, 20, 9_988, 375, 27, 33, 266, 1_945, 1_076, 1_350, 37_867, 3_288, 5, 577, 1_076, 4_374, 8, 5_082, 5, 26_453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 10_767, 6, 316, 304, 4_239, 3, 0], [148, 15_722, 19, 1_839, 12, 1_350, 13, 22_327, 5_082, 5_418, 47_567, 35_938, 59, 318, 19_552, 108, 2_183, 54, 14_976, 4_835, 32, 547, 1_114, 8, 315, 2_417, 5, 92, 19_088, 3, 0, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100], [36, 6_395, 12_570, 39_147, 11_597, 6, 266, 4, 45_405, 7_296, 3, 0, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase_ , model_name="""Helsinki-NLP/opus-mt-en-de""" , revision="""1a8c2263da11e68e50938f97e10cd57820bd504c""" , decode_kwargs={"""use_source_tokenizer""": True} , )
def lowerCAmelCase ( self : Optional[int] ) ->Optional[Any]:
"""simple docstring"""
snake_case_ = MarianTokenizer.from_pretrained("""hf-internal-testing/test-marian-two-vocabs""" )
snake_case_ = """Tämä on testi"""
snake_case_ = """This is a test"""
snake_case_ = [76, 7, 2_047, 2]
snake_case_ = [69, 12, 11, 940, 2]
snake_case_ = tokenizer(UpperCAmelCase_ ).input_ids
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = tokenizer(text_target=UpperCAmelCase_ ).input_ids
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = tokenizer.decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ )
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ )
| 347 |
"""simple docstring"""
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
__SCREAMING_SNAKE_CASE : List[str] = logging.getLogger(__name__)
__SCREAMING_SNAKE_CASE : str = tf.data.AUTOTUNE
def _a ( ) -> List[str]:
snake_case_ = argparse.ArgumentParser(description="""Train a masked language model on TPU.""" )
parser.add_argument(
"""--pretrained_model_config""" , type=_SCREAMING_SNAKE_CASE , default="""roberta-base""" , help="""The model config to use. Note that we don't copy the model's weights, only the config!""" , )
parser.add_argument(
"""--tokenizer""" , type=_SCREAMING_SNAKE_CASE , default="""unigram-tokenizer-wikitext""" , help="""The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model's vocab size.""" , )
parser.add_argument(
"""--per_replica_batch_size""" , type=_SCREAMING_SNAKE_CASE , default=8 , help="""Batch size per TPU core.""" , )
parser.add_argument(
"""--no_tpu""" , action="""store_true""" , help="""If set, run on CPU and don't try to initialize a TPU. Useful for debugging on non-TPU instances.""" , )
parser.add_argument(
"""--tpu_name""" , type=_SCREAMING_SNAKE_CASE , help="""Name of TPU resource to initialize. Should be blank on Colab, and 'local' on TPU VMs.""" , default="""local""" , )
parser.add_argument(
"""--tpu_zone""" , type=_SCREAMING_SNAKE_CASE , help="""Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.""" , )
parser.add_argument(
"""--gcp_project""" , type=_SCREAMING_SNAKE_CASE , help="""Google cloud project name. Only used for non-Colab TPU nodes.""" )
parser.add_argument(
"""--bfloat16""" , action="""store_true""" , help="""Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.""" , )
parser.add_argument(
"""--train_dataset""" , type=_SCREAMING_SNAKE_CASE , help="""Path to training dataset to load. If the path begins with `gs://`"""
""" then the dataset will be loaded from a Google Cloud Storage bucket.""" , )
parser.add_argument(
"""--shuffle_buffer_size""" , type=_SCREAMING_SNAKE_CASE , default=2**18 , help="""Size of the shuffle buffer (in samples)""" , )
parser.add_argument(
"""--eval_dataset""" , type=_SCREAMING_SNAKE_CASE , help="""Path to evaluation dataset to load. If the path begins with `gs://`"""
""" then the dataset will be loaded from a Google Cloud Storage bucket.""" , )
parser.add_argument(
"""--num_epochs""" , type=_SCREAMING_SNAKE_CASE , default=1 , help="""Number of epochs to train for.""" , )
parser.add_argument(
"""--learning_rate""" , type=_SCREAMING_SNAKE_CASE , default=1E-4 , help="""Learning rate to use for training.""" , )
parser.add_argument(
"""--weight_decay_rate""" , type=_SCREAMING_SNAKE_CASE , default=1E-3 , help="""Weight decay rate to use for training.""" , )
parser.add_argument(
"""--max_length""" , type=_SCREAMING_SNAKE_CASE , default=512 , help="""Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py""" , )
parser.add_argument(
"""--mlm_probability""" , type=_SCREAMING_SNAKE_CASE , default=0.15 , help="""Fraction of tokens to mask during training.""" , )
parser.add_argument("""--output_dir""" , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , help="""Path to save model checkpoints to.""" )
parser.add_argument("""--hub_model_id""" , type=_SCREAMING_SNAKE_CASE , help="""Model ID to upload to on the Hugging Face Hub.""" )
snake_case_ = parser.parse_args()
return args
def _a ( _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
try:
if args.tpu_name:
snake_case_ = tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name , zone=args.tpu_zone , project=args.gcp_project )
else:
snake_case_ = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
"""Couldn't connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or """
"""--gcp_project. When running on a TPU VM, use --tpu_name local.""" )
tf.config.experimental_connect_to_cluster(_SCREAMING_SNAKE_CASE )
tf.tpu.experimental.initialize_tpu_system(_SCREAMING_SNAKE_CASE )
return tpu
def _a ( _SCREAMING_SNAKE_CASE ) -> List[str]:
snake_case_ = 0
for file in file_list:
snake_case_ = file.split("""/""" )[-1]
snake_case_ = re.search(r"""-\d+-(\d+)\.tfrecord""" , _SCREAMING_SNAKE_CASE ).group(1 )
snake_case_ = int(_SCREAMING_SNAKE_CASE )
num_samples += sample_count
return num_samples
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> Union[str, Any]:
snake_case_ = count_samples(_SCREAMING_SNAKE_CASE )
snake_case_ = tf.data.Dataset.from_tensor_slices(_SCREAMING_SNAKE_CASE )
if shuffle:
snake_case_ = dataset.shuffle(len(_SCREAMING_SNAKE_CASE ) )
snake_case_ = tf.data.TFRecordDataset(_SCREAMING_SNAKE_CASE , num_parallel_reads=_SCREAMING_SNAKE_CASE )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
snake_case_ = dataset.apply(tf.data.experimental.assert_cardinality(_SCREAMING_SNAKE_CASE ) )
snake_case_ = dataset.map(_SCREAMING_SNAKE_CASE , num_parallel_calls=_SCREAMING_SNAKE_CASE )
if shuffle:
assert shuffle_buffer_size is not None
snake_case_ = dataset.shuffle(args.shuffle_buffer_size )
snake_case_ = dataset.batch(_SCREAMING_SNAKE_CASE , drop_remainder=_SCREAMING_SNAKE_CASE )
snake_case_ = dataset.map(_SCREAMING_SNAKE_CASE , num_parallel_calls=_SCREAMING_SNAKE_CASE )
snake_case_ = dataset.prefetch(_SCREAMING_SNAKE_CASE )
return dataset
def _a ( _SCREAMING_SNAKE_CASE ) -> List[Any]:
if not args.no_tpu:
snake_case_ = initialize_tpu(_SCREAMING_SNAKE_CASE )
snake_case_ = tf.distribute.TPUStrategy(_SCREAMING_SNAKE_CASE )
else:
snake_case_ = tf.distribute.OneDeviceStrategy(device="""/gpu:0""" )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy("""mixed_bfloat16""" )
snake_case_ = AutoTokenizer.from_pretrained(args.tokenizer )
snake_case_ = AutoConfig.from_pretrained(args.pretrained_model_config )
snake_case_ = tokenizer.vocab_size
snake_case_ = tf.io.gfile.glob(os.path.join(args.train_dataset , """*.tfrecord""" ) )
if not training_records:
raise ValueError(f"""No .tfrecord files found in {args.train_dataset}.""" )
snake_case_ = tf.io.gfile.glob(os.path.join(args.eval_dataset , """*.tfrecord""" ) )
if not eval_records:
raise ValueError(f"""No .tfrecord files found in {args.eval_dataset}.""" )
snake_case_ = count_samples(_SCREAMING_SNAKE_CASE )
snake_case_ = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
snake_case_ = steps_per_epoch * args.num_epochs
with strategy.scope():
snake_case_ = TFAutoModelForMaskedLM.from_config(_SCREAMING_SNAKE_CASE )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
snake_case_ , snake_case_ = create_optimizer(
num_train_steps=_SCREAMING_SNAKE_CASE , num_warmup_steps=total_train_steps // 20 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=_SCREAMING_SNAKE_CASE , metrics=["""accuracy"""] )
def decode_fn(_SCREAMING_SNAKE_CASE ):
snake_case_ = {
"""input_ids""": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
"""attention_mask""": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
}
return tf.io.parse_single_example(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
snake_case_ = DataCollatorForLanguageModeling(
tokenizer=_SCREAMING_SNAKE_CASE , mlm_probability=args.mlm_probability , mlm=_SCREAMING_SNAKE_CASE , return_tensors="""tf""" )
def mask_with_collator(_SCREAMING_SNAKE_CASE ):
# TF really needs an isin() function
snake_case_ = (
~tf.cast(batch["""attention_mask"""] , tf.bool )
| (batch["""input_ids"""] == tokenizer.cls_token_id)
| (batch["""input_ids"""] == tokenizer.sep_token_id)
)
snake_case_ , snake_case_ = data_collator.tf_mask_tokens(
batch["""input_ids"""] , vocab_size=len(_SCREAMING_SNAKE_CASE ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=_SCREAMING_SNAKE_CASE , )
return batch
snake_case_ = args.per_replica_batch_size * strategy.num_replicas_in_sync
snake_case_ = prepare_dataset(
_SCREAMING_SNAKE_CASE , decode_fn=_SCREAMING_SNAKE_CASE , mask_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , shuffle=_SCREAMING_SNAKE_CASE , shuffle_buffer_size=args.shuffle_buffer_size , )
snake_case_ = prepare_dataset(
_SCREAMING_SNAKE_CASE , decode_fn=_SCREAMING_SNAKE_CASE , mask_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , shuffle=_SCREAMING_SNAKE_CASE , )
snake_case_ = []
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=_SCREAMING_SNAKE_CASE ) )
model.fit(
_SCREAMING_SNAKE_CASE , validation_data=_SCREAMING_SNAKE_CASE , epochs=args.num_epochs , callbacks=_SCREAMING_SNAKE_CASE , )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Union[str, Any] = parse_args()
main(args)
| 347 | 1 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Optional[Any] = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.linear_k': 'encoder.layers.*.self_attn.linear_k',
'self_attn.linear_v': 'encoder.layers.*.self_attn.linear_v',
'self_attn.linear_q': 'encoder.layers.*.self_attn.linear_q',
'self_attn.pos_bias_u': 'encoder.layers.*.self_attn.pos_bias_u',
'self_attn.pos_bias_v': 'encoder.layers.*.self_attn.pos_bias_v',
'self_attn.linear_out': 'encoder.layers.*.self_attn.linear_out',
'self_attn.linear_pos': 'encoder.layers.*.self_attn.linear_pos',
'self_attn.rotary_emb': 'encoder.embed_positions',
'self_attn_layer_norm': 'encoder.layers.*.self_attn_layer_norm',
'conv_module.pointwise_conv1': 'encoder.layers.*.conv_module.pointwise_conv1',
'conv_module.pointwise_conv2': 'encoder.layers.*.conv_module.pointwise_conv2',
'conv_module.depthwise_conv': 'encoder.layers.*.conv_module.depthwise_conv',
'conv_module.batch_norm': 'encoder.layers.*.conv_module.batch_norm',
'conv_module.layer_norm': 'encoder.layers.*.conv_module.layer_norm',
'ffn1.w_1': 'encoder.layers.*.ffn1.intermediate_dense',
'ffn1.w_2': 'encoder.layers.*.ffn1.output_dense',
'ffn1.layer_norm': 'encoder.layers.*.ffn1_layer_norm',
'ffn2.w_1': 'encoder.layers.*.ffn2.intermediate_dense',
'ffn2.w_2': 'encoder.layers.*.ffn2.output_dense',
'ffn2.layer_norm': 'encoder.layers.*.ffn2_layer_norm',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
__SCREAMING_SNAKE_CASE : Optional[int] = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
for attribute in key.split(""".""" ):
snake_case_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if weight_type is not None:
snake_case_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).shape
else:
snake_case_ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
snake_case_ = value
elif weight_type == "weight_g":
snake_case_ = value
elif weight_type == "weight_v":
snake_case_ = value
elif weight_type == "bias":
snake_case_ = value
elif weight_type == "running_mean":
snake_case_ = value
elif weight_type == "running_var":
snake_case_ = value
elif weight_type == "num_batches_tracked":
snake_case_ = value
elif weight_type == "inv_freq":
snake_case_ = value
else:
snake_case_ = value
logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
snake_case_ = []
snake_case_ = fairseq_model.state_dict()
snake_case_ = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
snake_case_ = False
if "conv_layers" in name:
load_conv_layer(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == """group""" , )
snake_case_ = True
else:
for key, mapped_key in MAPPING.items():
snake_case_ = """wav2vec2_conformer.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
snake_case_ = True
if "*" in mapped_key:
snake_case_ = name.split(_SCREAMING_SNAKE_CASE )[0].split(""".""" )[-2]
snake_case_ = mapped_key.replace("""*""" , _SCREAMING_SNAKE_CASE )
if "pos_bias_u" in name:
snake_case_ = None
elif "pos_bias_v" in name:
snake_case_ = None
elif "weight_g" in name:
snake_case_ = """weight_g"""
elif "weight_v" in name:
snake_case_ = """weight_v"""
elif "bias" in name:
snake_case_ = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
snake_case_ = """weight"""
elif "running_mean" in name:
snake_case_ = """running_mean"""
elif "inv_freq" in name:
snake_case_ = """inv_freq"""
elif "running_var" in name:
snake_case_ = """running_var"""
elif "num_batches_tracked" in name:
snake_case_ = """num_batches_tracked"""
else:
snake_case_ = None
set_recursively(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
continue
if not is_used:
unused_weights.append(_SCREAMING_SNAKE_CASE )
logger.warning(f"""Unused weights: {unused_weights}""" )
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
snake_case_ = full_name.split("""conv_layers.""" )[-1]
snake_case_ = name.split(""".""" )
snake_case_ = int(items[0] )
snake_case_ = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
snake_case_ = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
snake_case_ = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
snake_case_ = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
snake_case_ = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_SCREAMING_SNAKE_CASE )
@torch.no_grad()
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True ) -> List[Any]:
if config_path is not None:
snake_case_ = WavaVecaConformerConfig.from_pretrained(_SCREAMING_SNAKE_CASE , hidden_act="""swish""" )
else:
snake_case_ = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
snake_case_ = """rotary"""
if is_finetuned:
if dict_path:
snake_case_ = Dictionary.load(_SCREAMING_SNAKE_CASE )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
snake_case_ = target_dict.pad_index
snake_case_ = target_dict.bos_index
snake_case_ = target_dict.eos_index
snake_case_ = len(target_dict.symbols )
snake_case_ = os.path.join(_SCREAMING_SNAKE_CASE , """vocab.json""" )
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(_SCREAMING_SNAKE_CASE ) )
return
os.makedirs(_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE )
snake_case_ = target_dict.indices
# fairseq has the <pad> and <s> switched
snake_case_ = 0
snake_case_ = 1
with open(_SCREAMING_SNAKE_CASE , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case_ = WavaVecaCTCTokenizer(
_SCREAMING_SNAKE_CASE , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=_SCREAMING_SNAKE_CASE , )
snake_case_ = True if config.feat_extract_norm == """layer""" else False
snake_case_ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , )
snake_case_ = WavaVecaProcessor(feature_extractor=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE )
processor.save_pretrained(_SCREAMING_SNAKE_CASE )
snake_case_ = WavaVecaConformerForCTC(_SCREAMING_SNAKE_CASE )
else:
snake_case_ = WavaVecaConformerForPreTraining(_SCREAMING_SNAKE_CASE )
if is_finetuned:
snake_case_ , snake_case_ , snake_case_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
snake_case_ = argparse.Namespace(task="""audio_pretraining""" )
snake_case_ = fairseq.tasks.setup_task(_SCREAMING_SNAKE_CASE )
snake_case_ , snake_case_ , snake_case_ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=_SCREAMING_SNAKE_CASE )
snake_case_ = model[0].eval()
recursively_load_weights(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , not is_finetuned )
hf_wavavec.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : List[Any] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
__SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 347 |
"""simple docstring"""
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
if density <= 0:
raise ValueError("""Impossible fluid density""" )
if bulk_modulus <= 0:
raise ValueError("""Impossible bulk modulus""" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 347 | 1 |
"""simple docstring"""
from math import factorial
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
if successes > trials:
raise ValueError("""successes must be lower or equal to trials""" )
if trials < 0 or successes < 0:
raise ValueError("""the function is defined for non-negative integers""" )
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise ValueError("""the function is defined for non-negative integers""" )
if not 0 < prob < 1:
raise ValueError("""prob has to be in range of 1 - 0""" )
snake_case_ = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
snake_case_ = float(factorial(_SCREAMING_SNAKE_CASE ) )
coefficient /= factorial(_SCREAMING_SNAKE_CASE ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print('Probability of 2 successes out of 4 trails')
print('with probability of 0.75 is:', end=' ')
print(binomial_distribution(2, 4, 0.75))
| 347 |
"""simple docstring"""
def _a ( _SCREAMING_SNAKE_CASE ) -> bool:
if num < 0:
return False
snake_case_ = num
snake_case_ = 0
while num > 0:
snake_case_ = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 347 | 1 |
"""simple docstring"""
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
__SCREAMING_SNAKE_CASE : List[str] = True
except ImportError:
__SCREAMING_SNAKE_CASE : int = False
try:
from torch.hub import _get_torch_home
__SCREAMING_SNAKE_CASE : str = _get_torch_home()
except ImportError:
__SCREAMING_SNAKE_CASE : Tuple = os.path.expanduser(
os.getenv('TORCH_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'torch'))
)
__SCREAMING_SNAKE_CASE : List[Any] = os.path.join(torch_cache_home, 'transformers')
__SCREAMING_SNAKE_CASE : Tuple = 'https://cdn.huggingface.co'
__SCREAMING_SNAKE_CASE : Optional[Any] = 'https://s3.amazonaws.com/models.huggingface.co/bert'
__SCREAMING_SNAKE_CASE : List[str] = '/'.join(str(Path(__file__).resolve()).split('/')[:-1])
__SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(PATH, 'config.yaml')
__SCREAMING_SNAKE_CASE : Optional[Any] = os.path.join(PATH, 'attributes.txt')
__SCREAMING_SNAKE_CASE : str = os.path.join(PATH, 'objects.txt')
__SCREAMING_SNAKE_CASE : str = os.getenv('PYTORCH_PRETRAINED_BERT_CACHE', default_cache_path)
__SCREAMING_SNAKE_CASE : int = os.getenv('PYTORCH_TRANSFORMERS_CACHE', PYTORCH_PRETRAINED_BERT_CACHE)
__SCREAMING_SNAKE_CASE : Tuple = os.getenv('TRANSFORMERS_CACHE', PYTORCH_TRANSFORMERS_CACHE)
__SCREAMING_SNAKE_CASE : Union[str, Any] = 'pytorch_model.bin'
__SCREAMING_SNAKE_CASE : Optional[int] = 'config.yaml'
def _a ( _SCREAMING_SNAKE_CASE=OBJECTS , _SCREAMING_SNAKE_CASE=ATTRIBUTES ) -> List[str]:
snake_case_ = []
with open(_SCREAMING_SNAKE_CASE ) as f:
for object in f.readlines():
vg_classes.append(object.split(""",""" )[0].lower().strip() )
snake_case_ = []
with open(_SCREAMING_SNAKE_CASE ) as f:
for object in f.readlines():
vg_attrs.append(object.split(""",""" )[0].lower().strip() )
return vg_classes, vg_attrs
def _a ( _SCREAMING_SNAKE_CASE ) -> List[Any]:
snake_case_ = OrderedDict()
with open(_SCREAMING_SNAKE_CASE , """rb""" ) as f:
snake_case_ = pkl.load(_SCREAMING_SNAKE_CASE )["""model"""]
for k in copy.deepcopy(list(ckp.keys() ) ):
snake_case_ = ckp.pop(_SCREAMING_SNAKE_CASE )
if isinstance(_SCREAMING_SNAKE_CASE , np.ndarray ):
snake_case_ = torch.tensor(_SCREAMING_SNAKE_CASE )
else:
assert isinstance(_SCREAMING_SNAKE_CASE , torch.tensor ), type(_SCREAMING_SNAKE_CASE )
snake_case_ = v
return r
class __A :
'''simple docstring'''
__lowercase: Optional[Any] = {}
def __init__( self : Dict , UpperCAmelCase_ : dict , UpperCAmelCase_ : str = "root" , UpperCAmelCase_ : Tuple=0 ) ->Optional[Any]:
"""simple docstring"""
snake_case_ = name
snake_case_ = level
snake_case_ = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
snake_case_ = copy.deepcopy(UpperCAmelCase_ )
snake_case_ = copy.deepcopy(UpperCAmelCase_ )
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
snake_case_ = Config(UpperCAmelCase_ , name=UpperCAmelCase_ , level=level + 1 )
snake_case_ = v
setattr(self , UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = d
def __repr__( self : Dict ) ->int:
"""simple docstring"""
return str(list((self._pointer.keys()) ) )
def __setattr__( self : Optional[int] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : str ) ->Tuple:
"""simple docstring"""
snake_case_ = val
snake_case_ = val
snake_case_ = key.split(""".""" )
snake_case_ = len(UpperCAmelCase_ ) - 1
snake_case_ = self._pointer
if len(UpperCAmelCase_ ) > 1:
for i, l in enumerate(UpperCAmelCase_ ):
if hasattr(self , UpperCAmelCase_ ) and isinstance(getattr(self , UpperCAmelCase_ ) , UpperCAmelCase_ ):
setattr(getattr(self , UpperCAmelCase_ ) , """.""".join(levels[i:] ) , UpperCAmelCase_ )
if l == last_level:
snake_case_ = val
else:
snake_case_ = pointer[l]
def lowerCAmelCase ( self : List[Any] ) ->Optional[Any]:
"""simple docstring"""
return self._pointer
def lowerCAmelCase ( self : Tuple , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Any ) ->Dict:
"""simple docstring"""
with open(F"""{file_name}""" , """w""" ) as stream:
dump(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCAmelCase ( self : Tuple , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : int ) ->Tuple:
"""simple docstring"""
with open(F"""{file_name}""" , """w""" ) as stream:
json.dump(UpperCAmelCase_ , UpperCAmelCase_ )
@staticmethod
def lowerCAmelCase ( UpperCAmelCase_ : Optional[Any] ) ->Any:
"""simple docstring"""
with open(UpperCAmelCase_ ) as stream:
snake_case_ = load(UpperCAmelCase_ , Loader=UpperCAmelCase_ )
return data
def __str__( self : List[str] ) ->Optional[Any]:
"""simple docstring"""
snake_case_ = """ """
if self._name != "root":
snake_case_ = F"""{t * (self._level-1)}{self._name}:\n"""
else:
snake_case_ = """"""
snake_case_ = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
r += F"""{t * (self._level)}{v}\n"""
self._level += 1
else:
r += F"""{t * (self._level)}{k}: {v} ({type(UpperCAmelCase_ ).__name__})\n"""
snake_case_ = level
return r[:-1]
@classmethod
def lowerCAmelCase ( cls : List[Any] , UpperCAmelCase_ : str , **UpperCAmelCase_ : List[Any] ) ->Optional[Any]:
"""simple docstring"""
snake_case_ , snake_case_ = cls.get_config_dict(UpperCAmelCase_ , **UpperCAmelCase_ )
return cls(UpperCAmelCase_ )
@classmethod
def lowerCAmelCase ( cls : Any , UpperCAmelCase_ : str , **UpperCAmelCase_ : Union[str, Any] ) ->Any:
"""simple docstring"""
snake_case_ = kwargs.pop("""cache_dir""" , UpperCAmelCase_ )
snake_case_ = kwargs.pop("""force_download""" , UpperCAmelCase_ )
snake_case_ = kwargs.pop("""resume_download""" , UpperCAmelCase_ )
snake_case_ = kwargs.pop("""proxies""" , UpperCAmelCase_ )
snake_case_ = kwargs.pop("""local_files_only""" , UpperCAmelCase_ )
if os.path.isdir(UpperCAmelCase_ ):
snake_case_ = os.path.join(UpperCAmelCase_ , UpperCAmelCase_ )
elif os.path.isfile(UpperCAmelCase_ ) or is_remote_url(UpperCAmelCase_ ):
snake_case_ = pretrained_model_name_or_path
else:
snake_case_ = hf_bucket_url(UpperCAmelCase_ , filename=UpperCAmelCase_ , use_cdn=UpperCAmelCase_ )
try:
# Load from URL or cache if already cached
snake_case_ = cached_path(
UpperCAmelCase_ , cache_dir=UpperCAmelCase_ , force_download=UpperCAmelCase_ , proxies=UpperCAmelCase_ , resume_download=UpperCAmelCase_ , local_files_only=UpperCAmelCase_ , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
snake_case_ = Config.load_yaml(UpperCAmelCase_ )
except EnvironmentError:
snake_case_ = """Can't load config for"""
raise EnvironmentError(UpperCAmelCase_ )
if resolved_config_file == config_file:
print("""loading configuration file from path""" )
else:
print("""loading configuration file cache""" )
return Config.load_yaml(UpperCAmelCase_ ), kwargs
def _a ( _SCREAMING_SNAKE_CASE ) -> int:
snake_case_ = torch.load("""dump.pt""" , map_location=in_tensor.device )
snake_case_ = in_tensor.numpy()
snake_case_ = out_tensor.numpy()[0]
print(na.shape , na[0, 0, :5] )
print(na.shape , na[0, 0, :5] )
assert np.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , rtol=0.01 , atol=0.1 ), (
f"""{sum([1 for x in np.isclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , rtol=0.01 , atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %"""
" element-wise mismatch"
)
raise Exception("""tensors are all good""" )
# Hugging face functions below
def _a ( _SCREAMING_SNAKE_CASE ) -> Any:
snake_case_ = urlparse(_SCREAMING_SNAKE_CASE )
return parsed.scheme in ("http", "https")
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=True ) -> str:
snake_case_ = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
snake_case_ = """/""" not in model_id
if legacy_format:
return f"""{endpoint}/{model_id}-{filename}"""
else:
return f"""{endpoint}/{model_id}/{filename}"""
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=None , ) -> Any:
snake_case_ = """python/{}""".format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
ua += "; " + "; ".join("""{}/{}""".format(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for k, v in user_agent.items() )
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
ua += "; " + user_agent
snake_case_ = {"""user-agent""": ua}
if resume_size > 0:
snake_case_ = """bytes=%d-""" % (resume_size,)
snake_case_ = requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE , proxies=_SCREAMING_SNAKE_CASE , headers=_SCREAMING_SNAKE_CASE )
if response.status_code == 416: # Range not satisfiable
return
snake_case_ = response.headers.get("""Content-Length""" )
snake_case_ = resume_size + int(_SCREAMING_SNAKE_CASE ) if content_length is not None else None
snake_case_ = tqdm(
unit="""B""" , unit_scale=_SCREAMING_SNAKE_CASE , total=_SCREAMING_SNAKE_CASE , initial=_SCREAMING_SNAKE_CASE , desc="""Downloading""" , )
for chunk in response.iter_content(chunk_size=1_024 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(_SCREAMING_SNAKE_CASE ) )
temp_file.write(_SCREAMING_SNAKE_CASE )
progress.close()
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=False , ) -> Any:
if cache_dir is None:
snake_case_ = TRANSFORMERS_CACHE
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
snake_case_ = str(_SCREAMING_SNAKE_CASE )
os.makedirs(_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE )
snake_case_ = None
if not local_files_only:
try:
snake_case_ = requests.head(_SCREAMING_SNAKE_CASE , allow_redirects=_SCREAMING_SNAKE_CASE , proxies=_SCREAMING_SNAKE_CASE , timeout=_SCREAMING_SNAKE_CASE )
if response.status_code == 200:
snake_case_ = response.headers.get("""ETag""" )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
snake_case_ = url_to_filename(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# get cache path to put the file
snake_case_ = os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(_SCREAMING_SNAKE_CASE ):
return cache_path
else:
snake_case_ = [
file
for file in fnmatch.filter(os.listdir(_SCREAMING_SNAKE_CASE ) , filename + """.*""" )
if not file.endswith(""".json""" ) and not file.endswith(""".lock""" )
]
if len(_SCREAMING_SNAKE_CASE ) > 0:
return os.path.join(_SCREAMING_SNAKE_CASE , matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
"""Cannot find the requested files in the cached path and outgoing traffic has been"""
""" disabled. To enable model look-ups and downloads online, set 'local_files_only'"""
""" to False.""" )
return None
# From now on, etag is not None.
if os.path.exists(_SCREAMING_SNAKE_CASE ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
snake_case_ = cache_path + """.lock"""
with FileLock(_SCREAMING_SNAKE_CASE ):
# If the download just completed while the lock was activated.
if os.path.exists(_SCREAMING_SNAKE_CASE ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
snake_case_ = cache_path + """.incomplete"""
@contextmanager
def _resumable_file_manager():
with open(_SCREAMING_SNAKE_CASE , """a+b""" ) as f:
yield f
snake_case_ = _resumable_file_manager
if os.path.exists(_SCREAMING_SNAKE_CASE ):
snake_case_ = os.stat(_SCREAMING_SNAKE_CASE ).st_size
else:
snake_case_ = 0
else:
snake_case_ = partial(tempfile.NamedTemporaryFile , dir=_SCREAMING_SNAKE_CASE , delete=_SCREAMING_SNAKE_CASE )
snake_case_ = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
"""%s not found in cache or force_download set to True, downloading to %s""" , _SCREAMING_SNAKE_CASE , temp_file.name , )
http_get(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , proxies=_SCREAMING_SNAKE_CASE , resume_size=_SCREAMING_SNAKE_CASE , user_agent=_SCREAMING_SNAKE_CASE , )
os.replace(temp_file.name , _SCREAMING_SNAKE_CASE )
snake_case_ = {"""url""": url, """etag""": etag}
snake_case_ = cache_path + """.json"""
with open(_SCREAMING_SNAKE_CASE , """w""" ) as meta_file:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return cache_path
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> Union[str, Any]:
snake_case_ = url.encode("""utf-8""" )
snake_case_ = shaaaa(_SCREAMING_SNAKE_CASE )
snake_case_ = url_hash.hexdigest()
if etag:
snake_case_ = etag.encode("""utf-8""" )
snake_case_ = shaaaa(_SCREAMING_SNAKE_CASE )
filename += "." + etag_hash.hexdigest()
if url.endswith(""".h5""" ):
filename += ".h5"
return filename
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , ) -> Any:
if cache_dir is None:
snake_case_ = TRANSFORMERS_CACHE
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
snake_case_ = str(_SCREAMING_SNAKE_CASE )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
snake_case_ = str(_SCREAMING_SNAKE_CASE )
if is_remote_url(_SCREAMING_SNAKE_CASE ):
# URL, so get it from the cache (downloading if necessary)
snake_case_ = get_from_cache(
_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE , force_download=_SCREAMING_SNAKE_CASE , proxies=_SCREAMING_SNAKE_CASE , resume_download=_SCREAMING_SNAKE_CASE , user_agent=_SCREAMING_SNAKE_CASE , local_files_only=_SCREAMING_SNAKE_CASE , )
elif os.path.exists(_SCREAMING_SNAKE_CASE ):
# File, and it exists.
snake_case_ = url_or_filename
elif urlparse(_SCREAMING_SNAKE_CASE ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError("""file {} not found""".format(_SCREAMING_SNAKE_CASE ) )
else:
# Something unknown
raise ValueError("""unable to parse {} as a URL or as a local path""".format(_SCREAMING_SNAKE_CASE ) )
if extract_compressed_file:
if not is_zipfile(_SCREAMING_SNAKE_CASE ) and not tarfile.is_tarfile(_SCREAMING_SNAKE_CASE ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
snake_case_ , snake_case_ = os.path.split(_SCREAMING_SNAKE_CASE )
snake_case_ = output_file.replace(""".""" , """-""" ) + """-extracted"""
snake_case_ = os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if os.path.isdir(_SCREAMING_SNAKE_CASE ) and os.listdir(_SCREAMING_SNAKE_CASE ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
snake_case_ = output_path + """.lock"""
with FileLock(_SCREAMING_SNAKE_CASE ):
shutil.rmtree(_SCREAMING_SNAKE_CASE , ignore_errors=_SCREAMING_SNAKE_CASE )
os.makedirs(_SCREAMING_SNAKE_CASE )
if is_zipfile(_SCREAMING_SNAKE_CASE ):
with ZipFile(_SCREAMING_SNAKE_CASE , """r""" ) as zip_file:
zip_file.extractall(_SCREAMING_SNAKE_CASE )
zip_file.close()
elif tarfile.is_tarfile(_SCREAMING_SNAKE_CASE ):
snake_case_ = tarfile.open(_SCREAMING_SNAKE_CASE )
tar_file.extractall(_SCREAMING_SNAKE_CASE )
tar_file.close()
else:
raise EnvironmentError("""Archive format of {} could not be identified""".format(_SCREAMING_SNAKE_CASE ) )
return output_path_extracted
return output_path
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="," ) -> Dict:
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if os.path.isfile(_SCREAMING_SNAKE_CASE ):
with open(_SCREAMING_SNAKE_CASE ) as f:
snake_case_ = eval(f.read() )
else:
snake_case_ = requests.get(_SCREAMING_SNAKE_CASE )
try:
snake_case_ = requests.json()
except Exception:
snake_case_ = req.content.decode()
assert data is not None, "could not connect"
try:
snake_case_ = eval(_SCREAMING_SNAKE_CASE )
except Exception:
snake_case_ = data.split("""\n""" )
req.close()
return data
def _a ( _SCREAMING_SNAKE_CASE ) -> str:
snake_case_ = requests.get(_SCREAMING_SNAKE_CASE )
snake_case_ = np.array(Image.open(BytesIO(response.content ) ) )
return img
def _a ( _SCREAMING_SNAKE_CASE ) -> int:
snake_case_ = url.split("""/""" )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(_SCREAMING_SNAKE_CASE )
with open(_SCREAMING_SNAKE_CASE , """rb""" ) as stream:
snake_case_ = pkl.load(_SCREAMING_SNAKE_CASE )
snake_case_ = weights.pop("""model""" )
snake_case_ = {}
for k, v in model.items():
snake_case_ = torch.from_numpy(_SCREAMING_SNAKE_CASE )
if "running_var" in k:
snake_case_ = torch.tensor([0] )
snake_case_ = k.replace("""running_var""" , """num_batches_tracked""" )
snake_case_ = zero
return new
def _a ( ) -> str:
print(f"""{os.path.abspath(os.path.join(_SCREAMING_SNAKE_CASE , os.pardir ) )}/demo.ipynb""" )
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="RGB" ) -> List[Any]:
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if os.path.isfile(_SCREAMING_SNAKE_CASE ):
snake_case_ = cva.imread(_SCREAMING_SNAKE_CASE )
else:
snake_case_ = get_image_from_url(_SCREAMING_SNAKE_CASE )
assert img is not None, f"""could not connect to: {im}"""
snake_case_ = cva.cvtColor(_SCREAMING_SNAKE_CASE , cva.COLOR_BGR2RGB )
if input_format == "RGB":
snake_case_ = img[:, :, ::-1]
return img
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=1 ) -> int:
return (images[i : i + batch] for i in range(0 , len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ))
| 347 |
"""simple docstring"""
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
__SCREAMING_SNAKE_CASE : Tuple = get_tests_dir('fixtures/test_sentencepiece_bpe_char.model')
@require_sentencepiece
@require_tokenizers
class __A (snake_case__ , unittest.TestCase):
'''simple docstring'''
__lowercase: Tuple = SpeechTaTokenizer
__lowercase: int = False
__lowercase: List[str] = True
def lowerCAmelCase ( self : Any ) ->str:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
snake_case_ = SpeechTaTokenizer(UpperCAmelCase_ )
snake_case_ = AddedToken("""<mask>""" , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ )
snake_case_ = mask_token
tokenizer.add_special_tokens({"""mask_token""": mask_token} )
tokenizer.add_tokens(["""<ctc_blank>"""] )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase ( self : Optional[Any] , UpperCAmelCase_ : Optional[Any] ) ->Dict:
"""simple docstring"""
snake_case_ = """this is a test"""
snake_case_ = """this is a test"""
return input_text, output_text
def lowerCAmelCase ( self : str , UpperCAmelCase_ : int , UpperCAmelCase_ : Any=False , UpperCAmelCase_ : Tuple=20 , UpperCAmelCase_ : Dict=5 ) ->List[Any]:
"""simple docstring"""
snake_case_ , snake_case_ = self.get_input_output_texts(UpperCAmelCase_ )
snake_case_ = tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ )
snake_case_ = tokenizer.decode(UpperCAmelCase_ , clean_up_tokenization_spaces=UpperCAmelCase_ )
return text, ids
def lowerCAmelCase ( self : Union[str, Any] ) ->Optional[Any]:
"""simple docstring"""
snake_case_ = """<pad>"""
snake_case_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase_ ) , UpperCAmelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase_ ) , UpperCAmelCase_ )
def lowerCAmelCase ( self : int ) ->str:
"""simple docstring"""
snake_case_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-4] , """œ""" )
self.assertEqual(vocab_keys[-2] , """<mask>""" )
self.assertEqual(vocab_keys[-1] , """<ctc_blank>""" )
self.assertEqual(len(UpperCAmelCase_ ) , 81 )
def lowerCAmelCase ( self : Optional[int] ) ->int:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 79 )
def lowerCAmelCase ( self : Optional[int] ) ->List[Any]:
"""simple docstring"""
snake_case_ = self.get_tokenizers(do_lower_case=UpperCAmelCase_ )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
snake_case_ = tokenizer.vocab_size
snake_case_ = len(UpperCAmelCase_ )
self.assertNotEqual(UpperCAmelCase_ , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
snake_case_ = ["""aaaaa bbbbbb""", """cccccccccdddddddd"""]
snake_case_ = tokenizer.add_tokens(UpperCAmelCase_ )
snake_case_ = tokenizer.vocab_size
snake_case_ = len(UpperCAmelCase_ )
self.assertNotEqual(UpperCAmelCase_ , 0 )
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ )
self.assertEqual(UpperCAmelCase_ , len(UpperCAmelCase_ ) )
self.assertEqual(UpperCAmelCase_ , all_size + len(UpperCAmelCase_ ) )
snake_case_ = tokenizer.encode("""aaaaa bbbbbb low cccccccccdddddddd l""" , add_special_tokens=UpperCAmelCase_ )
self.assertGreaterEqual(len(UpperCAmelCase_ ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
snake_case_ = {"""eos_token""": """>>>>|||<||<<|<<""", """pad_token""": """<<<<<|||>|>>>>|>"""}
snake_case_ = tokenizer.add_special_tokens(UpperCAmelCase_ )
snake_case_ = tokenizer.vocab_size
snake_case_ = len(UpperCAmelCase_ )
self.assertNotEqual(UpperCAmelCase_ , 0 )
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ )
self.assertEqual(UpperCAmelCase_ , len(UpperCAmelCase_ ) )
self.assertEqual(UpperCAmelCase_ , all_size_a + len(UpperCAmelCase_ ) )
snake_case_ = tokenizer.encode(
""">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l""" , add_special_tokens=UpperCAmelCase_ )
self.assertGreaterEqual(len(UpperCAmelCase_ ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
def lowerCAmelCase ( self : Optional[Any] ) ->Tuple:
"""simple docstring"""
pass
def lowerCAmelCase ( self : List[str] ) ->Optional[Any]:
"""simple docstring"""
pass
def lowerCAmelCase ( self : List[str] ) ->List[str]:
"""simple docstring"""
snake_case_ = self.get_tokenizer()
snake_case_ = tokenizer.tokenize("""This is a test""" )
# fmt: off
self.assertListEqual(UpperCAmelCase_ , [SPIECE_UNDERLINE, """T""", """h""", """i""", """s""", SPIECE_UNDERLINE, """i""", """s""", SPIECE_UNDERLINE, """a""", SPIECE_UNDERLINE, """t""", """e""", """s""", """t"""] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , )
snake_case_ = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
UpperCAmelCase_ , [SPIECE_UNDERLINE, """I""", SPIECE_UNDERLINE, """w""", """a""", """s""", SPIECE_UNDERLINE, """b""", """o""", """r""", """n""", SPIECE_UNDERLINE, """i""", """n""", SPIECE_UNDERLINE, """92000""", """,""", SPIECE_UNDERLINE, """a""", """n""", """d""", SPIECE_UNDERLINE, """t""", """h""", """i""", """s""", SPIECE_UNDERLINE, """i""", """s""", SPIECE_UNDERLINE, """f""", """a""", """l""", """s""", """é""", """."""] )
snake_case_ = tokenizer.convert_tokens_to_ids(UpperCAmelCase_ )
# fmt: off
self.assertListEqual(UpperCAmelCase_ , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] )
# fmt: on
snake_case_ = tokenizer.convert_ids_to_tokens(UpperCAmelCase_ )
self.assertListEqual(
UpperCAmelCase_ , [SPIECE_UNDERLINE, """I""", SPIECE_UNDERLINE, """w""", """a""", """s""", SPIECE_UNDERLINE, """b""", """o""", """r""", """n""", SPIECE_UNDERLINE, """i""", """n""", SPIECE_UNDERLINE, """<unk>""", """,""", SPIECE_UNDERLINE, """a""", """n""", """d""", SPIECE_UNDERLINE, """t""", """h""", """i""", """s""", SPIECE_UNDERLINE, """i""", """s""", SPIECE_UNDERLINE, """f""", """a""", """l""", """s""", """é""", """."""] )
@slow
def lowerCAmelCase ( self : str ) ->Dict:
"""simple docstring"""
snake_case_ = [
"""Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides """
"""general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural """
"""Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained """
"""models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.""",
"""BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly """
"""conditioning on both left and right context in all layers.""",
"""The quick brown fox jumps over the lazy dog.""",
]
# fmt: off
snake_case_ = {
"""input_ids""": [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
"""attention_mask""": [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase_ , model_name="""microsoft/speecht5_asr""" , revision="""c5ef64c71905caeccde0e4462ef3f9077224c524""" , sequences=UpperCAmelCase_ , )
| 347 | 1 |
"""simple docstring"""
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __A (unittest.TestCase):
'''simple docstring'''
def __init__( self : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Any=3 , UpperCAmelCase_ : List[Any]=32 , UpperCAmelCase_ : str=3 , UpperCAmelCase_ : str=10 , UpperCAmelCase_ : List[str]=[10, 20, 30, 40] , UpperCAmelCase_ : Dict=[1, 1, 2, 1] , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : str=True , UpperCAmelCase_ : Optional[int]="relu" , UpperCAmelCase_ : Optional[int]=3 , UpperCAmelCase_ : Any=None , ) ->List[Any]:
"""simple docstring"""
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = image_size
snake_case_ = num_channels
snake_case_ = embeddings_size
snake_case_ = hidden_sizes
snake_case_ = depths
snake_case_ = is_training
snake_case_ = use_labels
snake_case_ = hidden_act
snake_case_ = num_labels
snake_case_ = scope
snake_case_ = len(UpperCAmelCase_ )
def lowerCAmelCase ( self : Optional[int] ) ->Union[str, Any]:
"""simple docstring"""
snake_case_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ = self.get_config()
return config, pixel_values
def lowerCAmelCase ( self : Any ) ->Dict:
"""simple docstring"""
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def lowerCAmelCase ( self : Union[str, Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Any ) ->Union[str, Any]:
"""simple docstring"""
snake_case_ = FlaxRegNetModel(config=UpperCAmelCase_ )
snake_case_ = model(UpperCAmelCase_ )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowerCAmelCase ( self : Any , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : str ) ->List[Any]:
"""simple docstring"""
snake_case_ = self.num_labels
snake_case_ = FlaxRegNetForImageClassification(config=UpperCAmelCase_ )
snake_case_ = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase ( self : Union[str, Any] ) ->Optional[int]:
"""simple docstring"""
snake_case_ = self.prepare_config_and_inputs()
snake_case_ , snake_case_ = config_and_inputs
snake_case_ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_flax
class __A (snake_case__ , unittest.TestCase):
'''simple docstring'''
__lowercase: Dict = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
__lowercase: int = False
__lowercase: Union[str, Any] = False
__lowercase: Any = False
def lowerCAmelCase ( self : List[str] ) ->None:
"""simple docstring"""
snake_case_ = FlaxRegNetModelTester(self )
snake_case_ = ConfigTester(self , config_class=UpperCAmelCase_ , has_text_modality=UpperCAmelCase_ )
def lowerCAmelCase ( self : int ) ->Union[str, Any]:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCAmelCase ( self : List[Any] ) ->str:
"""simple docstring"""
return
def lowerCAmelCase ( self : List[Any] ) ->str:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def lowerCAmelCase ( self : Optional[Any] ) ->Dict:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase_ )
@unittest.skip(reason="""RegNet does not use inputs_embeds""" )
def lowerCAmelCase ( self : Optional[Any] ) ->Dict:
"""simple docstring"""
pass
@unittest.skip(reason="""RegNet does not support input and output embeddings""" )
def lowerCAmelCase ( self : Union[str, Any] ) ->Optional[int]:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Union[str, Any] ) ->int:
"""simple docstring"""
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = model_class(UpperCAmelCase_ )
snake_case_ = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ = [*signature.parameters.keys()]
snake_case_ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCAmelCase_ )
def lowerCAmelCase ( self : List[str] ) ->int:
"""simple docstring"""
def check_hidden_states_output(UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Union[str, Any] ):
snake_case_ = model_class(UpperCAmelCase_ )
snake_case_ = model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ ) )
snake_case_ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
snake_case_ = self.model_tester.num_stages
self.assertEqual(len(UpperCAmelCase_ ) , expected_num_stages + 1 )
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = True
check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case_ = True
check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCAmelCase ( self : Optional[int] ) ->Optional[Any]:
"""simple docstring"""
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
snake_case_ = self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = model_class(UpperCAmelCase_ )
@jax.jit
def model_jitted(UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : List[str] ):
return model(pixel_values=UpperCAmelCase_ , **UpperCAmelCase_ )
with self.subTest("""JIT Enabled""" ):
snake_case_ = model_jitted(**UpperCAmelCase_ ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
snake_case_ = model_jitted(**UpperCAmelCase_ ).to_tuple()
self.assertEqual(len(UpperCAmelCase_ ) , len(UpperCAmelCase_ ) )
for jitted_output, output in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
self.assertEqual(jitted_output.shape , output.shape )
def _a ( ) -> Any:
snake_case_ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_flax
class __A (unittest.TestCase):
'''simple docstring'''
@cached_property
def lowerCAmelCase ( self : Optional[int] ) ->Any:
"""simple docstring"""
return AutoImageProcessor.from_pretrained("""facebook/regnet-y-040""" ) if is_vision_available() else None
@slow
def lowerCAmelCase ( self : List[Any] ) ->Optional[int]:
"""simple docstring"""
snake_case_ = FlaxRegNetForImageClassification.from_pretrained("""facebook/regnet-y-040""" )
snake_case_ = self.default_image_processor
snake_case_ = prepare_img()
snake_case_ = image_processor(images=UpperCAmelCase_ , return_tensors="""np""" )
snake_case_ = model(**UpperCAmelCase_ )
# verify the logits
snake_case_ = (1, 1_000)
self.assertEqual(outputs.logits.shape , UpperCAmelCase_ )
snake_case_ = jnp.array([-0.4_180, -1.5_051, -3.4_836] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , UpperCAmelCase_ , atol=1E-4 ) )
| 347 |
"""simple docstring"""
import datasets
__SCREAMING_SNAKE_CASE : Tuple = '\\n@InProceedings{conneau2018xnli,\n author = "Conneau, Alexis\n and Rinott, Ruty\n and Lample, Guillaume\n and Williams, Adina\n and Bowman, Samuel R.\n and Schwenk, Holger\n and Stoyanov, Veselin",\n title = "XNLI: Evaluating Cross-lingual Sentence Representations",\n booktitle = "Proceedings of the 2018 Conference on Empirical Methods\n in Natural Language Processing",\n year = "2018",\n publisher = "Association for Computational Linguistics",\n location = "Brussels, Belgium",\n}\n'
__SCREAMING_SNAKE_CASE : Dict = '\\nXNLI is a subset of a few thousand examples from MNLI which has been translated\ninto a 14 different languages (some low-ish resource). As with MNLI, the goal is\nto predict textual entailment (does sentence A imply/contradict/neither sentence\nB) and is a classification task (given two sentences, predict one of three\nlabels).\n'
__SCREAMING_SNAKE_CASE : List[str] = '\nComputes XNLI score which is just simple accuracy.\nArgs:\n predictions: Predicted labels.\n references: Ground truth labels.\nReturns:\n \'accuracy\': accuracy\nExamples:\n\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> xnli_metric = datasets.load_metric("xnli")\n >>> results = xnli_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n'
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]:
return (preds == labels).mean()
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class __A (datasets.Metric):
'''simple docstring'''
def lowerCAmelCase ( self : str ) ->Any:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""int64""" if self.config_name != """sts-b""" else """float32""" ),
"""references""": datasets.Value("""int64""" if self.config_name != """sts-b""" else """float32""" ),
} ) , codebase_urls=[] , reference_urls=[] , format="""numpy""" , )
def lowerCAmelCase ( self : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any ) ->int:
"""simple docstring"""
return {"accuracy": simple_accuracy(UpperCAmelCase_ , UpperCAmelCase_ )}
| 347 | 1 |
"""simple docstring"""
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class __A (snake_case__):
'''simple docstring'''
def __init__( self : int , UpperCAmelCase_ : Any , UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] ) ->List[str]:
"""simple docstring"""
snake_case_ = dataset
snake_case_ = process
snake_case_ = params
def __len__( self : str ) ->Union[str, Any]:
"""simple docstring"""
return len(self.dataset )
def __getitem__( self : Dict , UpperCAmelCase_ : Any ) ->str:
"""simple docstring"""
snake_case_ = self.dataset[i]
snake_case_ = self.process(UpperCAmelCase_ , **self.params )
return processed
class __A (snake_case__):
'''simple docstring'''
def __init__( self : Any , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Tuple=None ) ->Dict:
"""simple docstring"""
snake_case_ = loader
snake_case_ = infer
snake_case_ = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
snake_case_ = None
snake_case_ = loader_batch_size
# Internal bookkeeping
snake_case_ = None
snake_case_ = None
def __len__( self : int ) ->Tuple:
"""simple docstring"""
return len(self.loader )
def __iter__( self : Union[str, Any] ) ->Tuple:
"""simple docstring"""
snake_case_ = iter(self.loader )
return self
def lowerCAmelCase ( self : Optional[int] ) ->str:
"""simple docstring"""
if isinstance(self._loader_batch_data , torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
snake_case_ = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
snake_case_ = {}
for k, element in self._loader_batch_data.items():
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
# Convert ModelOutput to tuple first
snake_case_ = element.to_tuple()
if isinstance(element[0] , torch.Tensor ):
snake_case_ = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
snake_case_ = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor ):
snake_case_ = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
snake_case_ = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
snake_case_ = None
elif isinstance(element[self._loader_batch_index] , torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
snake_case_ = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] , np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
snake_case_ = np.expand_dims(element[self._loader_batch_index] , 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
snake_case_ = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
snake_case_ = self._loader_batch_data.__class__(UpperCAmelCase_ )
self._loader_batch_index += 1
return result
def lowerCAmelCase ( self : Any ) ->Union[str, Any]:
"""simple docstring"""
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
snake_case_ = next(self.iterator )
snake_case_ = self.infer(UpperCAmelCase_ , **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(UpperCAmelCase_ , torch.Tensor ):
snake_case_ = processed
else:
snake_case_ = list(processed.keys() )[0]
snake_case_ = processed[key]
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
snake_case_ = len(UpperCAmelCase_ )
else:
snake_case_ = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
snake_case_ = observed_batch_size
# Setting internal index to unwrap the batch
snake_case_ = processed
snake_case_ = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class __A (snake_case__):
'''simple docstring'''
def __init__( self : Any , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : int=None ) ->int:
"""simple docstring"""
super().__init__(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
def __iter__( self : Optional[Any] ) ->List[str]:
"""simple docstring"""
snake_case_ = iter(self.loader )
snake_case_ = None
return self
def lowerCAmelCase ( self : Tuple ) ->Optional[Any]:
"""simple docstring"""
if self.subiterator is None:
snake_case_ = self.infer(next(self.iterator ) , **self.params )
try:
# Try to return next item
snake_case_ = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
snake_case_ = self.infer(next(self.iterator ) , **self.params )
snake_case_ = next(self.subiterator )
return processed
class __A (snake_case__):
'''simple docstring'''
def __iter__( self : Any ) ->Tuple:
"""simple docstring"""
snake_case_ = iter(self.loader )
return self
def lowerCAmelCase ( self : int ) ->Any:
"""simple docstring"""
snake_case_ = False
snake_case_ = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
snake_case_ = self.loader_batch_item()
snake_case_ = item.pop("""is_last""" )
accumulator.append(UpperCAmelCase_ )
if is_last:
return accumulator
while not is_last:
snake_case_ = self.infer(next(self.iterator ) , **self.params )
if self.loader_batch_size is not None:
if isinstance(UpperCAmelCase_ , torch.Tensor ):
snake_case_ = processed
else:
snake_case_ = list(processed.keys() )[0]
snake_case_ = processed[key]
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
snake_case_ = len(UpperCAmelCase_ )
else:
snake_case_ = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
snake_case_ = observed_batch_size
snake_case_ = processed
snake_case_ = 0
while self._loader_batch_index < self.loader_batch_size:
snake_case_ = self.loader_batch_item()
snake_case_ = item.pop("""is_last""" )
accumulator.append(UpperCAmelCase_ )
if is_last:
return accumulator
else:
snake_case_ = processed
snake_case_ = item.pop("""is_last""" )
accumulator.append(UpperCAmelCase_ )
return accumulator
class __A (snake_case__):
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCAmelCase_ : Dataset , UpperCAmelCase_ : str ) ->Optional[Any]:
"""simple docstring"""
snake_case_ = dataset
snake_case_ = key
def __len__( self : List[str] ) ->Optional[Any]:
"""simple docstring"""
return len(self.dataset )
def __getitem__( self : Any , UpperCAmelCase_ : List[str] ) ->Tuple:
"""simple docstring"""
return self.dataset[i][self.key]
class __A (snake_case__):
'''simple docstring'''
def __init__( self : List[str] , UpperCAmelCase_ : Dataset , UpperCAmelCase_ : str , UpperCAmelCase_ : str ) ->Optional[Any]:
"""simple docstring"""
snake_case_ = dataset
snake_case_ = keya
snake_case_ = keya
def __len__( self : Dict ) ->str:
"""simple docstring"""
return len(self.dataset )
def __getitem__( self : Dict , UpperCAmelCase_ : Optional[int] ) ->Union[str, Any]:
"""simple docstring"""
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 347 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: List[Any] = ["""sentencepiece"""]
def __init__( self : int , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : List[str] ) ->List[Any]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: Optional[int] = ["""sentencepiece"""]
def __init__( self : Optional[int] , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Tuple ) ->Dict:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: Any = ["""sentencepiece"""]
def __init__( self : Any , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : List[Any] ) ->List[Any]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: Dict = ["""sentencepiece"""]
def __init__( self : List[str] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : int ) ->Optional[int]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: List[str] = ["""sentencepiece"""]
def __init__( self : Dict , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : int ) ->List[str]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: int = ["""sentencepiece"""]
def __init__( self : Tuple , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Dict ) ->Optional[int]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: int = ["""sentencepiece"""]
def __init__( self : int , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Union[str, Any] ) ->Dict:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: Optional[Any] = ["""sentencepiece"""]
def __init__( self : List[str] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : Optional[Any] ) ->Union[str, Any]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: Optional[int] = ["""sentencepiece"""]
def __init__( self : Optional[Any] , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : List[Any] ) ->Tuple:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: Any = ["""sentencepiece"""]
def __init__( self : List[Any] , *UpperCAmelCase_ : str , **UpperCAmelCase_ : int ) ->List[Any]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: Any = ["""sentencepiece"""]
def __init__( self : int , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : List[Any] ) ->Tuple:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: int = ["""sentencepiece"""]
def __init__( self : Optional[int] , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : Union[str, Any] ) ->Union[str, Any]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: Tuple = ["""sentencepiece"""]
def __init__( self : Dict , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : List[Any] ) ->Dict:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: Union[str, Any] = ["""sentencepiece"""]
def __init__( self : List[Any] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : List[Any] ) ->Optional[int]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: int = ["""sentencepiece"""]
def __init__( self : int , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : List[str] ) ->Union[str, Any]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: Tuple = ["""sentencepiece"""]
def __init__( self : int , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Optional[Any] ) ->str:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: List[Any] = ["""sentencepiece"""]
def __init__( self : Dict , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Optional[Any] ) ->int:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: Union[str, Any] = ["""sentencepiece"""]
def __init__( self : Optional[Any] , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Optional[int] ) ->Optional[int]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: Any = ["""sentencepiece"""]
def __init__( self : Optional[Any] , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Dict ) ->Dict:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: Any = ["""sentencepiece"""]
def __init__( self : List[Any] , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : List[str] ) ->List[Any]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: int = ["""sentencepiece"""]
def __init__( self : Union[str, Any] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Optional[Any] ) ->List[str]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: int = ["""sentencepiece"""]
def __init__( self : Union[str, Any] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : List[Any] ) ->Union[str, Any]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: List[Any] = ["""sentencepiece"""]
def __init__( self : Any , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Optional[Any] ) ->List[str]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: Dict = ["""sentencepiece"""]
def __init__( self : int , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Union[str, Any] ) ->List[Any]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: List[Any] = ["""sentencepiece"""]
def __init__( self : List[Any] , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Optional[int] ) ->Optional[Any]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: Union[str, Any] = ["""sentencepiece"""]
def __init__( self : Union[str, Any] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : str ) ->Optional[int]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: Optional[int] = ["""sentencepiece"""]
def __init__( self : Tuple , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Optional[int] ) ->Dict:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: Dict = ["""sentencepiece"""]
def __init__( self : Optional[int] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : List[str] ) ->Optional[Any]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: int = ["""sentencepiece"""]
def __init__( self : Dict , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Optional[int] ) ->Any:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: List[str] = ["""sentencepiece"""]
def __init__( self : List[str] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Union[str, Any] ) ->Optional[Any]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: Any = ["""sentencepiece"""]
def __init__( self : List[str] , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Optional[int] ) ->str:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
| 347 | 1 |
"""simple docstring"""
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class __A :
'''simple docstring'''
def __init__( self : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[Any]=None , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : Dict=None , UpperCAmelCase_ : int="resnet50" , UpperCAmelCase_ : List[str]=3 , UpperCAmelCase_ : Union[str, Any]=32 , UpperCAmelCase_ : List[Any]=3 , UpperCAmelCase_ : str=True , UpperCAmelCase_ : Optional[Any]=True , ) ->List[str]:
"""simple docstring"""
snake_case_ = parent
snake_case_ = out_indices if out_indices is not None else [4]
snake_case_ = stage_names
snake_case_ = out_features
snake_case_ = backbone
snake_case_ = batch_size
snake_case_ = image_size
snake_case_ = num_channels
snake_case_ = use_pretrained_backbone
snake_case_ = is_training
def lowerCAmelCase ( self : List[Any] ) ->int:
"""simple docstring"""
snake_case_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ = self.get_config()
return config, pixel_values
def lowerCAmelCase ( self : Tuple ) ->Tuple:
"""simple docstring"""
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def lowerCAmelCase ( self : List[str] , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[Any] ) ->Tuple:
"""simple docstring"""
snake_case_ = TimmBackbone(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
with torch.no_grad():
snake_case_ = model(UpperCAmelCase_ )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def lowerCAmelCase ( self : List[Any] ) ->Optional[int]:
"""simple docstring"""
snake_case_ = self.prepare_config_and_inputs()
snake_case_ , snake_case_ = config_and_inputs
snake_case_ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class __A (snake_case__ , snake_case__ , snake_case__ , unittest.TestCase):
'''simple docstring'''
__lowercase: Dict = (TimmBackbone,) if is_torch_available() else ()
__lowercase: Tuple = {"""feature-extraction""": TimmBackbone} if is_torch_available() else {}
__lowercase: int = False
__lowercase: Dict = False
__lowercase: Optional[int] = False
__lowercase: Optional[int] = False
def lowerCAmelCase ( self : Optional[int] ) ->str:
"""simple docstring"""
snake_case_ = TimmBackboneModelTester(self )
snake_case_ = ConfigTester(self , config_class=UpperCAmelCase_ , has_text_modality=UpperCAmelCase_ )
def lowerCAmelCase ( self : Dict ) ->List[str]:
"""simple docstring"""
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCAmelCase ( self : Optional[int] ) ->Union[str, Any]:
"""simple docstring"""
snake_case_ = """resnet18"""
snake_case_ = """microsoft/resnet-18"""
snake_case_ = AutoBackbone.from_pretrained(UpperCAmelCase_ , use_timm_backbone=UpperCAmelCase_ )
snake_case_ = AutoBackbone.from_pretrained(UpperCAmelCase_ )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
snake_case_ = AutoBackbone.from_pretrained(UpperCAmelCase_ , use_timm_backbone=UpperCAmelCase_ , out_indices=[1, 2, 3] )
snake_case_ = AutoBackbone.from_pretrained(UpperCAmelCase_ , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip("""TimmBackbone doesn't support feed forward chunking""" )
def lowerCAmelCase ( self : Tuple ) ->str:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone doesn't have num_hidden_layers attribute""" )
def lowerCAmelCase ( self : Optional[int] ) ->Optional[Any]:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone initialization is managed on the timm side""" )
def lowerCAmelCase ( self : Union[str, Any] ) ->Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone models doesn't have inputs_embeds""" )
def lowerCAmelCase ( self : Dict ) ->Tuple:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone models doesn't have inputs_embeds""" )
def lowerCAmelCase ( self : Optional[Any] ) ->Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone model cannot be created without specifying a backbone checkpoint""" )
def lowerCAmelCase ( self : Dict ) ->List[str]:
"""simple docstring"""
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" )
def lowerCAmelCase ( self : List[Any] ) ->Dict:
"""simple docstring"""
pass
@unittest.skip("""model weights aren't tied in TimmBackbone.""" )
def lowerCAmelCase ( self : str ) ->Optional[Any]:
"""simple docstring"""
pass
@unittest.skip("""model weights aren't tied in TimmBackbone.""" )
def lowerCAmelCase ( self : Optional[int] ) ->str:
"""simple docstring"""
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" )
def lowerCAmelCase ( self : Optional[Any] ) ->Optional[int]:
"""simple docstring"""
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" )
def lowerCAmelCase ( self : str ) ->Any:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone doesn't have hidden size info in its configuration.""" )
def lowerCAmelCase ( self : List[Any] ) ->Any:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone doesn't support output_attentions.""" )
def lowerCAmelCase ( self : int ) ->Tuple:
"""simple docstring"""
pass
@unittest.skip("""Safetensors is not supported by timm.""" )
def lowerCAmelCase ( self : Any ) ->Optional[Any]:
"""simple docstring"""
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCAmelCase ( self : List[str] ) ->Union[str, Any]:
"""simple docstring"""
pass
def lowerCAmelCase ( self : int ) ->Optional[int]:
"""simple docstring"""
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = model_class(UpperCAmelCase_ )
snake_case_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ = [*signature.parameters.keys()]
snake_case_ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCAmelCase_ )
def lowerCAmelCase ( self : Tuple ) ->Union[str, Any]:
"""simple docstring"""
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = True
snake_case_ = self.has_attentions
# no need to test all models as different heads yield the same functionality
snake_case_ = self.all_model_classes[0]
snake_case_ = model_class(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
snake_case_ = self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = model(**UpperCAmelCase_ )
snake_case_ = outputs[0][-1]
# Encoder-/Decoder-only models
snake_case_ = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
snake_case_ = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=UpperCAmelCase_ )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def lowerCAmelCase ( self : List[str] ) ->str:
"""simple docstring"""
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = model_class(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
snake_case_ = model(**UpperCAmelCase_ )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
snake_case_ = copy.deepcopy(UpperCAmelCase_ )
snake_case_ = None
snake_case_ = model_class(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
snake_case_ = model(**UpperCAmelCase_ )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
snake_case_ = copy.deepcopy(UpperCAmelCase_ )
snake_case_ = False
snake_case_ = model_class(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
snake_case_ = model(**UpperCAmelCase_ )
| 347 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
__SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
class __A (snake_case__):
'''simple docstring'''
def __init__( self : str , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : int ) ->None:
"""simple docstring"""
warnings.warn(
"""The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use MobileViTImageProcessor instead.""" , UpperCAmelCase_ , )
super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_ )
| 347 | 1 |
"""simple docstring"""
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
__SCREAMING_SNAKE_CASE : Any = None
__SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Optional[Any] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
__SCREAMING_SNAKE_CASE : Optional[Any] = {
'vocab_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/spiece.model',
't5-base': 'https://huggingface.co/t5-base/resolve/main/spiece.model',
't5-large': 'https://huggingface.co/t5-large/resolve/main/spiece.model',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/spiece.model',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/spiece.model',
},
'tokenizer_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/tokenizer.json',
't5-base': 'https://huggingface.co/t5-base/resolve/main/tokenizer.json',
't5-large': 'https://huggingface.co/t5-large/resolve/main/tokenizer.json',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/tokenizer.json',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/tokenizer.json',
},
}
# TODO(PVP) - this should be removed in Transformers v5
__SCREAMING_SNAKE_CASE : List[str] = {
't5-small': 512,
't5-base': 512,
't5-large': 512,
't5-3b': 512,
't5-11b': 512,
}
class __A (snake_case__):
'''simple docstring'''
__lowercase: Dict = VOCAB_FILES_NAMES
__lowercase: Any = PRETRAINED_VOCAB_FILES_MAP
__lowercase: int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase: Optional[Any] = ["""input_ids""", """attention_mask"""]
__lowercase: str = TaTokenizer
__lowercase: List[int] = []
def __init__( self : Any , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : int="</s>" , UpperCAmelCase_ : Union[str, Any]="<unk>" , UpperCAmelCase_ : Optional[Any]="<pad>" , UpperCAmelCase_ : Optional[int]=100 , UpperCAmelCase_ : Optional[Any]=None , **UpperCAmelCase_ : List[str] , ) ->Optional[Any]:
"""simple docstring"""
if extra_ids > 0 and additional_special_tokens is None:
snake_case_ = [F"""<extra_id_{i}>""" for i in range(UpperCAmelCase_ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
snake_case_ = len(set(filter(lambda UpperCAmelCase_ : bool("""extra_id_""" in str(UpperCAmelCase_ ) ) , UpperCAmelCase_ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F"""Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"""
""" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"""
""" tokens""" )
super().__init__(
UpperCAmelCase_ , tokenizer_file=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , extra_ids=UpperCAmelCase_ , additional_special_tokens=UpperCAmelCase_ , **UpperCAmelCase_ , )
snake_case_ = vocab_file
snake_case_ = False if not self.vocab_file else True
snake_case_ = extra_ids
@staticmethod
def lowerCAmelCase ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : List[Any] ) ->int:
"""simple docstring"""
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
snake_case_ = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"""This tokenizer was incorrectly instantiated with a model max length of"""
F""" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this"""
""" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"""
""" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"""
F""" {pretrained_model_name_or_path} automatically truncating your input to"""
F""" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences"""
F""" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with"""
""" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"""
""" instantiate this tokenizer with `model_max_length` set to your preferred value.""" , UpperCAmelCase_ , )
return max_model_length
def lowerCAmelCase ( self : Optional[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None ) ->Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(UpperCAmelCase_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case_ = os.path.join(
UpperCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase_ ):
copyfile(self.vocab_file , UpperCAmelCase_ )
logger.info(F"""Copy vocab file to {out_vocab_file}""" )
return (out_vocab_file,)
def lowerCAmelCase ( self : Tuple , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None ) ->List[int]:
"""simple docstring"""
snake_case_ = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
snake_case_ = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def lowerCAmelCase ( self : Dict , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None ) ->List[int]:
"""simple docstring"""
snake_case_ = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def lowerCAmelCase ( self : str ) ->Dict:
"""simple docstring"""
return list(
set(filter(lambda UpperCAmelCase_ : bool(re.search(R"""<extra_id_\d+>""" , UpperCAmelCase_ ) ) is not None , self.additional_special_tokens ) ) )
def lowerCAmelCase ( self : Optional[int] ) ->Optional[Any]:
"""simple docstring"""
return [self.convert_tokens_to_ids(UpperCAmelCase_ ) for token in self.get_sentinel_tokens()]
| 347 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Any:
snake_case_ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""blocks.{i}.norm1.weight""", f"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""blocks.{i}.norm1.bias""", f"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((f"""blocks.{i}.attn.proj.weight""", f"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.attn.proj.bias""", f"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""blocks.{i}.norm2.weight""", f"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""blocks.{i}.norm2.bias""", f"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.weight""", f"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.bias""", f"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.weight""", f"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.bias""", f"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
("""cls_token""", """vit.embeddings.cls_token"""),
("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""),
("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""),
("""pos_embed""", """vit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
("""pre_logits.fc.weight""", """pooler.dense.weight"""),
("""pre_logits.fc.bias""", """pooler.dense.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
snake_case_ = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Tuple:
for i in range(config.num_hidden_layers ):
if base_model:
snake_case_ = """"""
else:
snake_case_ = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
snake_case_ = state_dict.pop(f"""blocks.{i}.attn.qkv.weight""" )
snake_case_ = state_dict.pop(f"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
snake_case_ = in_proj_weight[
: config.hidden_size, :
]
snake_case_ = in_proj_bias[: config.hidden_size]
snake_case_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case_ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
snake_case_ = in_proj_weight[
-config.hidden_size :, :
]
snake_case_ = in_proj_bias[-config.hidden_size :]
def _a ( _SCREAMING_SNAKE_CASE ) -> List[str]:
snake_case_ = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
snake_case_ = dct.pop(_SCREAMING_SNAKE_CASE )
snake_case_ = val
def _a ( ) -> Any:
snake_case_ = """http://images.cocodataset.org/val2017/000000039769.jpg"""
snake_case_ = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
snake_case_ = ViTConfig()
snake_case_ = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
snake_case_ = True
snake_case_ = int(vit_name[-12:-10] )
snake_case_ = int(vit_name[-9:-6] )
else:
snake_case_ = 1_000
snake_case_ = """huggingface/label-files"""
snake_case_ = """imagenet-1k-id2label.json"""
snake_case_ = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="""dataset""" ) , """r""" ) )
snake_case_ = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
snake_case_ = idalabel
snake_case_ = {v: k for k, v in idalabel.items()}
snake_case_ = int(vit_name[-6:-4] )
snake_case_ = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith("""tiny""" ):
snake_case_ = 192
snake_case_ = 768
snake_case_ = 12
snake_case_ = 3
elif vit_name[9:].startswith("""small""" ):
snake_case_ = 384
snake_case_ = 1_536
snake_case_ = 12
snake_case_ = 6
else:
pass
else:
if vit_name[4:].startswith("""small""" ):
snake_case_ = 768
snake_case_ = 2_304
snake_case_ = 8
snake_case_ = 8
elif vit_name[4:].startswith("""base""" ):
pass
elif vit_name[4:].startswith("""large""" ):
snake_case_ = 1_024
snake_case_ = 4_096
snake_case_ = 24
snake_case_ = 16
elif vit_name[4:].startswith("""huge""" ):
snake_case_ = 1_280
snake_case_ = 5_120
snake_case_ = 32
snake_case_ = 16
# load original model from timm
snake_case_ = timm.create_model(_SCREAMING_SNAKE_CASE , pretrained=_SCREAMING_SNAKE_CASE )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
snake_case_ = timm_model.state_dict()
if base_model:
remove_classification_head_(_SCREAMING_SNAKE_CASE )
snake_case_ = create_rename_keys(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for src, dest in rename_keys:
rename_key(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
read_in_q_k_v(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# load HuggingFace model
if vit_name[-5:] == "in21k":
snake_case_ = ViTModel(_SCREAMING_SNAKE_CASE ).eval()
else:
snake_case_ = ViTForImageClassification(_SCREAMING_SNAKE_CASE ).eval()
model.load_state_dict(_SCREAMING_SNAKE_CASE )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
snake_case_ = DeiTImageProcessor(size=config.image_size )
else:
snake_case_ = ViTImageProcessor(size=config.image_size )
snake_case_ = image_processor(images=prepare_img() , return_tensors="""pt""" )
snake_case_ = encoding["""pixel_values"""]
snake_case_ = model(_SCREAMING_SNAKE_CASE )
if base_model:
snake_case_ = timm_model.forward_features(_SCREAMING_SNAKE_CASE )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(_SCREAMING_SNAKE_CASE , outputs.pooler_output , atol=1E-3 )
else:
snake_case_ = timm_model(_SCREAMING_SNAKE_CASE )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_SCREAMING_SNAKE_CASE , outputs.logits , atol=1E-3 )
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
print(f"""Saving model {vit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--vit_name',
default='vit_base_patch16_224',
type=str,
help='Name of the ViT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
__SCREAMING_SNAKE_CASE : int = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 347 | 1 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: Optional[Any] = ["""keras_nlp"""]
def __init__( self : Union[str, Any] , *UpperCAmelCase_ : str , **UpperCAmelCase_ : str ) ->Tuple:
"""simple docstring"""
requires_backends(self , ["""keras_nlp"""] )
| 347 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class __A (unittest.TestCase):
'''simple docstring'''
def __init__( self : List[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple=13 , UpperCAmelCase_ : List[Any]=7 , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Dict=True , UpperCAmelCase_ : int=True , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Dict=99 , UpperCAmelCase_ : str=32 , UpperCAmelCase_ : Tuple=5 , UpperCAmelCase_ : Union[str, Any]=4 , UpperCAmelCase_ : Any=37 , UpperCAmelCase_ : int="gelu" , UpperCAmelCase_ : Any=0.1 , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : Dict=512 , UpperCAmelCase_ : Optional[Any]=16 , UpperCAmelCase_ : Dict=2 , UpperCAmelCase_ : str=0.02 , UpperCAmelCase_ : str=4 , ) ->Tuple:
"""simple docstring"""
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = seq_length
snake_case_ = is_training
snake_case_ = use_attention_mask
snake_case_ = use_token_type_ids
snake_case_ = use_labels
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = type_sequence_label_size
snake_case_ = initializer_range
snake_case_ = num_choices
def lowerCAmelCase ( self : Optional[int] ) ->str:
"""simple docstring"""
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ = None
if self.use_attention_mask:
snake_case_ = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ = None
if self.use_token_type_ids:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase_ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCAmelCase ( self : List[str] ) ->Dict:
"""simple docstring"""
snake_case_ = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ , snake_case_ = config_and_inputs
snake_case_ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class __A (snake_case__ , unittest.TestCase):
'''simple docstring'''
__lowercase: Union[str, Any] = True
__lowercase: int = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCAmelCase ( self : Optional[Any] ) ->Tuple:
"""simple docstring"""
snake_case_ = FlaxRoFormerModelTester(self )
@slow
def lowerCAmelCase ( self : Any ) ->List[str]:
"""simple docstring"""
for model_class_name in self.all_model_classes:
snake_case_ = model_class_name.from_pretrained("""junnyu/roformer_chinese_small""" , from_pt=UpperCAmelCase_ )
snake_case_ = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCAmelCase_ )
@require_flax
class __A (unittest.TestCase):
'''simple docstring'''
@slow
def lowerCAmelCase ( self : str ) ->Dict:
"""simple docstring"""
snake_case_ = FlaxRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" )
snake_case_ = jnp.array([[0, 1, 2, 3, 4, 5]] )
snake_case_ = model(UpperCAmelCase_ )[0]
snake_case_ = 50_000
snake_case_ = (1, 6, vocab_size)
self.assertEqual(output.shape , UpperCAmelCase_ )
snake_case_ = jnp.array(
[[[-0.1_205, -1.0_265, 0.2_922], [-1.5_134, 0.1_974, 0.1_519], [-5.0_135, -3.9_003, -0.8_404]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , UpperCAmelCase_ , atol=1E-4 ) )
| 347 | 1 |
"""simple docstring"""
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
__SCREAMING_SNAKE_CASE : str = logging.getLogger(__name__)
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
snake_case_ = np.argmax(_SCREAMING_SNAKE_CASE , axis=1 )
return np.sum(outputs == labels )
def _a ( _SCREAMING_SNAKE_CASE ) -> List[Any]:
with open(_SCREAMING_SNAKE_CASE , encoding="""utf_8""" ) as f:
snake_case_ = csv.reader(_SCREAMING_SNAKE_CASE )
snake_case_ = []
next(_SCREAMING_SNAKE_CASE ) # skip the first line
for line in tqdm(_SCREAMING_SNAKE_CASE ):
output.append((""" """.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
snake_case_ = []
for dataset in encoded_datasets:
snake_case_ = len(_SCREAMING_SNAKE_CASE )
snake_case_ = np.zeros((n_batch, 2, input_len) , dtype=np.intaa )
snake_case_ = np.zeros((n_batch, 2) , dtype=np.intaa )
snake_case_ = np.full((n_batch, 2, input_len) , fill_value=-100 , dtype=np.intaa )
snake_case_ = np.zeros((n_batch,) , dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(_SCREAMING_SNAKE_CASE ):
snake_case_ = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
snake_case_ = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
snake_case_ = with_conta
snake_case_ = with_conta
snake_case_ = len(_SCREAMING_SNAKE_CASE ) - 1
snake_case_ = len(_SCREAMING_SNAKE_CASE ) - 1
snake_case_ = with_conta
snake_case_ = with_conta
snake_case_ = mc_label
snake_case_ = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(_SCREAMING_SNAKE_CASE ) for t in all_inputs ) )
return tensor_datasets
def _a ( ) -> Union[str, Any]:
snake_case_ = argparse.ArgumentParser()
parser.add_argument("""--model_name""" , type=_SCREAMING_SNAKE_CASE , default="""openai-gpt""" , help="""pretrained model name""" )
parser.add_argument("""--do_train""" , action="""store_true""" , help="""Whether to run training.""" )
parser.add_argument("""--do_eval""" , action="""store_true""" , help="""Whether to run eval on the dev set.""" )
parser.add_argument(
"""--output_dir""" , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , help="""The output directory where the model predictions and checkpoints will be written.""" , )
parser.add_argument("""--train_dataset""" , type=_SCREAMING_SNAKE_CASE , default="""""" )
parser.add_argument("""--eval_dataset""" , type=_SCREAMING_SNAKE_CASE , default="""""" )
parser.add_argument("""--seed""" , type=_SCREAMING_SNAKE_CASE , default=42 )
parser.add_argument("""--num_train_epochs""" , type=_SCREAMING_SNAKE_CASE , default=3 )
parser.add_argument("""--train_batch_size""" , type=_SCREAMING_SNAKE_CASE , default=8 )
parser.add_argument("""--eval_batch_size""" , type=_SCREAMING_SNAKE_CASE , default=16 )
parser.add_argument("""--adam_epsilon""" , default=1E-8 , type=_SCREAMING_SNAKE_CASE , help="""Epsilon for Adam optimizer.""" )
parser.add_argument("""--max_grad_norm""" , type=_SCREAMING_SNAKE_CASE , default=1 )
parser.add_argument(
"""--max_steps""" , default=-1 , type=_SCREAMING_SNAKE_CASE , help=(
"""If > 0: set total number of training steps to perform. Override num_train_epochs."""
) , )
parser.add_argument(
"""--gradient_accumulation_steps""" , type=_SCREAMING_SNAKE_CASE , default=1 , help="""Number of updates steps to accumulate before performing a backward/update pass.""" , )
parser.add_argument("""--learning_rate""" , type=_SCREAMING_SNAKE_CASE , default=6.2_5E-5 )
parser.add_argument("""--warmup_steps""" , default=0 , type=_SCREAMING_SNAKE_CASE , help="""Linear warmup over warmup_steps.""" )
parser.add_argument("""--lr_schedule""" , type=_SCREAMING_SNAKE_CASE , default="""warmup_linear""" )
parser.add_argument("""--weight_decay""" , type=_SCREAMING_SNAKE_CASE , default=0.01 )
parser.add_argument("""--lm_coef""" , type=_SCREAMING_SNAKE_CASE , default=0.9 )
parser.add_argument("""--n_valid""" , type=_SCREAMING_SNAKE_CASE , default=374 )
parser.add_argument("""--server_ip""" , type=_SCREAMING_SNAKE_CASE , default="""""" , help="""Can be used for distant debugging.""" )
parser.add_argument("""--server_port""" , type=_SCREAMING_SNAKE_CASE , default="""""" , help="""Can be used for distant debugging.""" )
snake_case_ = parser.parse_args()
print(_SCREAMING_SNAKE_CASE )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("""Waiting for debugger attach""" )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=_SCREAMING_SNAKE_CASE )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
snake_case_ = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
snake_case_ = torch.cuda.device_count()
logger.info("""device: {}, n_gpu {}""".format(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
if not args.do_train and not args.do_eval:
raise ValueError("""At least one of `do_train` or `do_eval` must be True.""" )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
snake_case_ = ["""_start_""", """_delimiter_""", """_classify_"""]
snake_case_ = OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(_SCREAMING_SNAKE_CASE )
snake_case_ = tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE )
snake_case_ = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(_SCREAMING_SNAKE_CASE ) )
model.to(_SCREAMING_SNAKE_CASE )
# Load and encode the datasets
def tokenize_and_encode(_SCREAMING_SNAKE_CASE ):
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(_SCREAMING_SNAKE_CASE ) )
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return obj
return [tokenize_and_encode(_SCREAMING_SNAKE_CASE ) for o in obj]
logger.info("""Encoding dataset...""" )
snake_case_ = load_rocstories_dataset(args.train_dataset )
snake_case_ = load_rocstories_dataset(args.eval_dataset )
snake_case_ = (train_dataset, eval_dataset)
snake_case_ = tokenize_and_encode(_SCREAMING_SNAKE_CASE )
# Compute the max input length for the Transformer
snake_case_ = model.config.n_positions // 2 - 2
snake_case_ = max(
len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
snake_case_ = min(_SCREAMING_SNAKE_CASE , model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
snake_case_ = pre_process_datasets(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE )
snake_case_ , snake_case_ = tensor_datasets[0], tensor_datasets[1]
snake_case_ = TensorDataset(*_SCREAMING_SNAKE_CASE )
snake_case_ = RandomSampler(_SCREAMING_SNAKE_CASE )
snake_case_ = DataLoader(_SCREAMING_SNAKE_CASE , sampler=_SCREAMING_SNAKE_CASE , batch_size=args.train_batch_size )
snake_case_ = TensorDataset(*_SCREAMING_SNAKE_CASE )
snake_case_ = SequentialSampler(_SCREAMING_SNAKE_CASE )
snake_case_ = DataLoader(_SCREAMING_SNAKE_CASE , sampler=_SCREAMING_SNAKE_CASE , batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
snake_case_ = args.max_steps
snake_case_ = args.max_steps // (len(_SCREAMING_SNAKE_CASE ) // args.gradient_accumulation_steps) + 1
else:
snake_case_ = len(_SCREAMING_SNAKE_CASE ) // args.gradient_accumulation_steps * args.num_train_epochs
snake_case_ = list(model.named_parameters() )
snake_case_ = ["""bias""", """LayerNorm.bias""", """LayerNorm.weight"""]
snake_case_ = [
{
"""params""": [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
"""weight_decay""": args.weight_decay,
},
{"""params""": [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], """weight_decay""": 0.0},
]
snake_case_ = AdamW(_SCREAMING_SNAKE_CASE , lr=args.learning_rate , eps=args.adam_epsilon )
snake_case_ = get_linear_schedule_with_warmup(
_SCREAMING_SNAKE_CASE , num_warmup_steps=args.warmup_steps , num_training_steps=_SCREAMING_SNAKE_CASE )
if args.do_train:
snake_case_ , snake_case_ , snake_case_ = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) , desc="""Epoch""" ):
snake_case_ = 0
snake_case_ = 0
snake_case_ = tqdm(_SCREAMING_SNAKE_CASE , desc="""Training""" )
for step, batch in enumerate(_SCREAMING_SNAKE_CASE ):
snake_case_ = tuple(t.to(_SCREAMING_SNAKE_CASE ) for t in batch )
snake_case_ , snake_case_ , snake_case_ , snake_case_ = batch
snake_case_ = model(_SCREAMING_SNAKE_CASE , mc_token_ids=_SCREAMING_SNAKE_CASE , lm_labels=_SCREAMING_SNAKE_CASE , mc_labels=_SCREAMING_SNAKE_CASE )
snake_case_ = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
snake_case_ = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
snake_case_ = """Training loss: {:.2e} lr: {:.2e}""".format(_SCREAMING_SNAKE_CASE , scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
snake_case_ = model.module if hasattr(_SCREAMING_SNAKE_CASE , """module""" ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
snake_case_ = os.path.join(args.output_dir , _SCREAMING_SNAKE_CASE )
snake_case_ = os.path.join(args.output_dir , _SCREAMING_SNAKE_CASE )
torch.save(model_to_save.state_dict() , _SCREAMING_SNAKE_CASE )
model_to_save.config.to_json_file(_SCREAMING_SNAKE_CASE )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
snake_case_ = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
snake_case_ = OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(_SCREAMING_SNAKE_CASE )
if args.do_eval:
model.eval()
snake_case_ , snake_case_ = 0, 0
snake_case_ , snake_case_ = 0, 0
for batch in tqdm(_SCREAMING_SNAKE_CASE , desc="""Evaluating""" ):
snake_case_ = tuple(t.to(_SCREAMING_SNAKE_CASE ) for t in batch )
snake_case_ , snake_case_ , snake_case_ , snake_case_ = batch
with torch.no_grad():
snake_case_ , snake_case_ , snake_case_ , snake_case_ = model(
_SCREAMING_SNAKE_CASE , mc_token_ids=_SCREAMING_SNAKE_CASE , lm_labels=_SCREAMING_SNAKE_CASE , mc_labels=_SCREAMING_SNAKE_CASE )
snake_case_ = mc_logits.detach().cpu().numpy()
snake_case_ = mc_labels.to("""cpu""" ).numpy()
snake_case_ = accuracy(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
snake_case_ = eval_loss / nb_eval_steps
snake_case_ = eval_accuracy / nb_eval_examples
snake_case_ = tr_loss / nb_tr_steps if args.do_train else None
snake_case_ = {"""eval_loss""": eval_loss, """eval_accuracy""": eval_accuracy, """train_loss""": train_loss}
snake_case_ = os.path.join(args.output_dir , """eval_results.txt""" )
with open(_SCREAMING_SNAKE_CASE , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key in sorted(result.keys() ):
logger.info(""" %s = %s""" , _SCREAMING_SNAKE_CASE , str(result[key] ) )
writer.write("""%s = %s\n""" % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 347 |
"""simple docstring"""
from __future__ import annotations
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> bool:
snake_case_ = get_failure_array(_SCREAMING_SNAKE_CASE )
# 2) Step through text searching for pattern
snake_case_ , snake_case_ = 0, 0 # index into text, pattern
while i < len(_SCREAMING_SNAKE_CASE ):
if pattern[j] == text[i]:
if j == (len(_SCREAMING_SNAKE_CASE ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
snake_case_ = failure[j - 1]
continue
i += 1
return False
def _a ( _SCREAMING_SNAKE_CASE ) -> list[int]:
snake_case_ = [0]
snake_case_ = 0
snake_case_ = 1
while j < len(_SCREAMING_SNAKE_CASE ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
snake_case_ = failure[i - 1]
continue
j += 1
failure.append(_SCREAMING_SNAKE_CASE )
return failure
if __name__ == "__main__":
# Test 1)
__SCREAMING_SNAKE_CASE : Optional[int] = 'abc1abc12'
__SCREAMING_SNAKE_CASE : Optional[int] = 'alskfjaldsabc1abc1abc12k23adsfabcabc'
__SCREAMING_SNAKE_CASE : List[str] = 'alskfjaldsk23adsfabcabc'
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
__SCREAMING_SNAKE_CASE : int = 'ABABX'
__SCREAMING_SNAKE_CASE : Optional[Any] = 'ABABZABABYABABX'
assert kmp(pattern, text)
# Test 3)
__SCREAMING_SNAKE_CASE : Any = 'AAAB'
__SCREAMING_SNAKE_CASE : List[Any] = 'ABAAAAAB'
assert kmp(pattern, text)
# Test 4)
__SCREAMING_SNAKE_CASE : Optional[int] = 'abcdabcy'
__SCREAMING_SNAKE_CASE : str = 'abcxabcdabxabcdabcdabcy'
assert kmp(pattern, text)
# Test 5)
__SCREAMING_SNAKE_CASE : Any = 'aabaabaaa'
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 347 | 1 |
"""simple docstring"""
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def _a ( _SCREAMING_SNAKE_CASE ) -> Optional[int]:
return EnvironmentCommand()
class __A (snake_case__):
'''simple docstring'''
@staticmethod
def lowerCAmelCase ( UpperCAmelCase_ : ArgumentParser ) ->Any:
"""simple docstring"""
snake_case_ = parser.add_parser("""env""" )
download_parser.set_defaults(func=UpperCAmelCase_ )
def lowerCAmelCase ( self : List[Any] ) ->Optional[Any]:
"""simple docstring"""
snake_case_ = huggingface_hub.__version__
snake_case_ = """not installed"""
snake_case_ = """NA"""
if is_torch_available():
import torch
snake_case_ = torch.__version__
snake_case_ = torch.cuda.is_available()
snake_case_ = """not installed"""
if is_transformers_available():
import transformers
snake_case_ = transformers.__version__
snake_case_ = """not installed"""
if is_accelerate_available():
import accelerate
snake_case_ = accelerate.__version__
snake_case_ = """not installed"""
if is_xformers_available():
import xformers
snake_case_ = xformers.__version__
snake_case_ = {
"""`diffusers` version""": version,
"""Platform""": platform.platform(),
"""Python version""": platform.python_version(),
"""PyTorch version (GPU?)""": F"""{pt_version} ({pt_cuda_available})""",
"""Huggingface_hub version""": hub_version,
"""Transformers version""": transformers_version,
"""Accelerate version""": accelerate_version,
"""xFormers version""": xformers_version,
"""Using GPU in script?""": """<fill in>""",
"""Using distributed or parallel set-up in script?""": """<fill in>""",
}
print("""\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n""" )
print(self.format_dict(UpperCAmelCase_ ) )
return info
@staticmethod
def lowerCAmelCase ( UpperCAmelCase_ : List[str] ) ->List[Any]:
"""simple docstring"""
return "\n".join([F"""- {prop}: {val}""" for prop, val in d.items()] ) + "\n"
| 347 |
"""simple docstring"""
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class __A (snake_case__):
'''simple docstring'''
@slow
@require_torch
def lowerCAmelCase ( self : Union[str, Any] ) ->Dict:
"""simple docstring"""
snake_case_ = EncoderDecoderModel.from_encoder_decoder_pretrained("""prajjwal1/bert-tiny""" , """prajjwal1/bert-tiny""" )
snake_case_ = BertTokenizer.from_pretrained("""bert-base-uncased""" )
snake_case_ = bertabert.config.encoder.vocab_size
snake_case_ = tokenizer.sep_token_id
snake_case_ = tokenizer.cls_token_id
snake_case_ = 128
snake_case_ = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""train[:1%]""" )
snake_case_ = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""validation[:1%]""" )
snake_case_ = train_dataset.select(range(32 ) )
snake_case_ = val_dataset.select(range(16 ) )
snake_case_ = 4
def _map_to_encoder_decoder_inputs(UpperCAmelCase_ : int ):
# Tokenizer will automatically set [BOS] <text> [EOS]
snake_case_ = tokenizer(batch["""article"""] , padding="""max_length""" , truncation=UpperCAmelCase_ , max_length=512 )
snake_case_ = tokenizer(batch["""highlights"""] , padding="""max_length""" , truncation=UpperCAmelCase_ , max_length=128 )
snake_case_ = inputs.input_ids
snake_case_ = inputs.attention_mask
snake_case_ = outputs.input_ids
snake_case_ = outputs.input_ids.copy()
snake_case_ = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["""labels"""]
]
snake_case_ = outputs.attention_mask
assert all(len(UpperCAmelCase_ ) == 512 for x in inputs.input_ids )
assert all(len(UpperCAmelCase_ ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(UpperCAmelCase_ : Union[str, Any] ):
snake_case_ = pred.label_ids
snake_case_ = pred.predictions
# all unnecessary tokens are removed
snake_case_ = tokenizer.batch_decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ )
snake_case_ = tokenizer.batch_decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ )
snake_case_ = sum([int(pred_str[i] == label_str[i] ) for i in range(len(UpperCAmelCase_ ) )] ) / len(UpperCAmelCase_ )
return {"accuracy": accuracy}
# map train dataset
snake_case_ = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=UpperCAmelCase_ , batch_size=UpperCAmelCase_ , remove_columns=["""article""", """highlights"""] , )
train_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
# same for validation dataset
snake_case_ = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=UpperCAmelCase_ , batch_size=UpperCAmelCase_ , remove_columns=["""article""", """highlights"""] , )
val_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
snake_case_ = self.get_auto_remove_tmp_dir()
snake_case_ = SeqaSeqTrainingArguments(
output_dir=UpperCAmelCase_ , per_device_train_batch_size=UpperCAmelCase_ , per_device_eval_batch_size=UpperCAmelCase_ , predict_with_generate=UpperCAmelCase_ , evaluation_strategy="""steps""" , do_train=UpperCAmelCase_ , do_eval=UpperCAmelCase_ , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
snake_case_ = SeqaSeqTrainer(
model=UpperCAmelCase_ , args=UpperCAmelCase_ , compute_metrics=_compute_metrics , train_dataset=UpperCAmelCase_ , eval_dataset=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , )
# start training
trainer.train()
| 347 | 1 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class __A (unittest.TestCase):
'''simple docstring'''
@slow
def lowerCAmelCase ( self : Dict ) ->Dict:
"""simple docstring"""
snake_case_ = AutoModelForSeqaSeqLM.from_pretrained("""google/mt5-small""" , return_dict=UpperCAmelCase_ ).to(UpperCAmelCase_ )
snake_case_ = AutoTokenizer.from_pretrained("""google/mt5-small""" )
snake_case_ = tokenizer("""Hello there""" , return_tensors="""pt""" ).input_ids
snake_case_ = tokenizer("""Hi I am""" , return_tensors="""pt""" ).input_ids
snake_case_ = model(input_ids.to(UpperCAmelCase_ ) , labels=labels.to(UpperCAmelCase_ ) ).loss
snake_case_ = -(labels.shape[-1] * loss.item())
snake_case_ = -84.9_127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 347 |
"""simple docstring"""
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
__SCREAMING_SNAKE_CASE : Tuple = subprocess.check_output('git merge-base main HEAD'.split()).decode('utf-8')
__SCREAMING_SNAKE_CASE : Tuple = subprocess.check_output(f"""git diff --name-only {fork_point_sha}""".split()).decode('utf-8').split()
__SCREAMING_SNAKE_CASE : Any = '|'.join(sys.argv[1:])
__SCREAMING_SNAKE_CASE : Optional[Any] = re.compile(Rf"""^({joined_dirs}).*?\.py$""")
__SCREAMING_SNAKE_CASE : List[str] = [x for x in modified_files if regex.match(x)]
print(' '.join(relevant_modified_files), end='')
| 347 | 1 |
"""simple docstring"""
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class __A (snake_case__):
'''simple docstring'''
def __init__( self : Any , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[str] ) ->Dict:
"""simple docstring"""
snake_case_ = params
snake_case_ = np.array(UpperCAmelCase_ )
snake_case_ = np.array([len(UpperCAmelCase_ ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self : Dict , UpperCAmelCase_ : Optional[Any] ) ->List[Any]:
"""simple docstring"""
return (self.token_ids[index], self.lengths[index])
def __len__( self : Optional[int] ) ->Any:
"""simple docstring"""
return len(self.lengths )
def lowerCAmelCase ( self : str ) ->Union[str, Any]:
"""simple docstring"""
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def lowerCAmelCase ( self : Tuple ) ->Dict:
"""simple docstring"""
snake_case_ = self.params.max_model_input_size
snake_case_ = self.lengths > max_len
logger.info(F"""Splitting {sum(UpperCAmelCase_ )} too long sequences.""" )
def divide_chunks(UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Dict ):
return [l[i : i + n] for i in range(0 , len(UpperCAmelCase_ ) , UpperCAmelCase_ )]
snake_case_ = []
snake_case_ = []
if self.params.mlm:
snake_case_ , snake_case_ = self.params.special_tok_ids["""cls_token"""], self.params.special_tok_ids["""sep_token"""]
else:
snake_case_ , snake_case_ = self.params.special_tok_ids["""bos_token"""], self.params.special_tok_ids["""eos_token"""]
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
snake_case_ = []
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
snake_case_ = np.insert(UpperCAmelCase_ , 0 , UpperCAmelCase_ )
if sub_s[-1] != sep_id:
snake_case_ = np.insert(UpperCAmelCase_ , len(UpperCAmelCase_ ) , UpperCAmelCase_ )
assert len(UpperCAmelCase_ ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(UpperCAmelCase_ )
new_tok_ids.extend(UpperCAmelCase_ )
new_lengths.extend([len(UpperCAmelCase_ ) for l in sub_seqs] )
snake_case_ = np.array(UpperCAmelCase_ )
snake_case_ = np.array(UpperCAmelCase_ )
def lowerCAmelCase ( self : Union[str, Any] ) ->Any:
"""simple docstring"""
snake_case_ = len(self )
snake_case_ = self.lengths > 11
snake_case_ = self.token_ids[indices]
snake_case_ = self.lengths[indices]
snake_case_ = len(self )
logger.info(F"""Remove {init_size - new_size} too short (<=11 tokens) sequences.""" )
def lowerCAmelCase ( self : List[str] ) ->Optional[int]:
"""simple docstring"""
if "unk_token" not in self.params.special_tok_ids:
return
else:
snake_case_ = self.params.special_tok_ids["""unk_token"""]
snake_case_ = len(self )
snake_case_ = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
snake_case_ = (unk_occs / self.lengths) < 0.5
snake_case_ = self.token_ids[indices]
snake_case_ = self.lengths[indices]
snake_case_ = len(self )
logger.info(F"""Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).""" )
def lowerCAmelCase ( self : Optional[Any] ) ->List[str]:
"""simple docstring"""
if not self.params.is_master:
return
logger.info(F"""{len(self )} sequences""" )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def lowerCAmelCase ( self : int , UpperCAmelCase_ : str ) ->List[str]:
"""simple docstring"""
snake_case_ = [t[0] for t in batch]
snake_case_ = [t[1] for t in batch]
assert len(UpperCAmelCase_ ) == len(UpperCAmelCase_ )
# Max for paddings
snake_case_ = max(UpperCAmelCase_ )
# Pad token ids
if self.params.mlm:
snake_case_ = self.params.special_tok_ids["""pad_token"""]
else:
snake_case_ = self.params.special_tok_ids["""unk_token"""]
snake_case_ = [list(t.astype(UpperCAmelCase_ ) ) + [pad_idx] * (max_seq_len_ - len(UpperCAmelCase_ )) for t in token_ids]
assert len(tk_ ) == len(UpperCAmelCase_ )
assert all(len(UpperCAmelCase_ ) == max_seq_len_ for t in tk_ )
snake_case_ = torch.tensor(tk_ ) # (bs, max_seq_len_)
snake_case_ = torch.tensor(UpperCAmelCase_ ) # (bs)
return tk_t, lg_t
| 347 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Tuple = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'adapter_layer': 'encoder.layers.*.adapter_layer',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
'pooling_layer.linear': 'projector',
'pooling_layer.projection': 'classifier',
}
__SCREAMING_SNAKE_CASE : List[Any] = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
'projector',
'classifier',
]
def _a ( _SCREAMING_SNAKE_CASE ) -> List[str]:
snake_case_ = {}
with open(_SCREAMING_SNAKE_CASE , """r""" ) as file:
for line_number, line in enumerate(_SCREAMING_SNAKE_CASE ):
snake_case_ = line.strip()
if line:
snake_case_ = line.split()
snake_case_ = line_number
snake_case_ = words[0]
snake_case_ = value
return result
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
for attribute in key.split(""".""" ):
snake_case_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case_ = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(_SCREAMING_SNAKE_CASE ):
snake_case_ = PARAM_MAPPING[full_name.split(""".""" )[-1]]
snake_case_ = """param"""
if weight_type is not None and weight_type != "param":
snake_case_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).shape
elif weight_type is not None and weight_type == "param":
snake_case_ = hf_pointer
for attribute in hf_param_name.split(""".""" ):
snake_case_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case_ = shape_pointer.shape
# let's reduce dimension
snake_case_ = value[0]
else:
snake_case_ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
snake_case_ = value
elif weight_type == "weight_g":
snake_case_ = value
elif weight_type == "weight_v":
snake_case_ = value
elif weight_type == "bias":
snake_case_ = value
elif weight_type == "param":
for attribute in hf_param_name.split(""".""" ):
snake_case_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case_ = value
else:
snake_case_ = value
logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
snake_case_ = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(_SCREAMING_SNAKE_CASE ):
snake_case_ = PARAM_MAPPING[full_name.split(""".""" )[-1]]
snake_case_ = """param"""
if weight_type is not None and weight_type != "param":
snake_case_ = """.""".join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
snake_case_ = """.""".join([key, hf_param_name] )
else:
snake_case_ = key
snake_case_ = value if """lm_head""" in full_key else value[0]
__SCREAMING_SNAKE_CASE : int = {
'W_a': 'linear_1.weight',
'W_b': 'linear_2.weight',
'b_a': 'linear_1.bias',
'b_b': 'linear_2.bias',
'ln_W': 'norm.weight',
'ln_b': 'norm.bias',
}
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> List[str]:
snake_case_ = False
for key, mapped_key in MAPPING.items():
snake_case_ = """wav2vec2.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
snake_case_ = True
if "*" in mapped_key:
snake_case_ = name.split(_SCREAMING_SNAKE_CASE )[0].split(""".""" )[-2]
snake_case_ = mapped_key.replace("""*""" , _SCREAMING_SNAKE_CASE )
if "weight_g" in name:
snake_case_ = """weight_g"""
elif "weight_v" in name:
snake_case_ = """weight_v"""
elif "bias" in name:
snake_case_ = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
snake_case_ = """weight"""
else:
snake_case_ = None
if hf_dict is not None:
rename_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
set_recursively(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return is_used
return is_used
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
snake_case_ = []
snake_case_ = fairseq_model.state_dict()
snake_case_ = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
snake_case_ = False
if "conv_layers" in name:
load_conv_layer(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == """group""" , )
snake_case_ = True
else:
snake_case_ = load_wavaveca_layer(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if not is_used:
unused_weights.append(_SCREAMING_SNAKE_CASE )
logger.warning(f"""Unused weights: {unused_weights}""" )
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
snake_case_ = full_name.split("""conv_layers.""" )[-1]
snake_case_ = name.split(""".""" )
snake_case_ = int(items[0] )
snake_case_ = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
snake_case_ = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
snake_case_ = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
snake_case_ = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
snake_case_ = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_SCREAMING_SNAKE_CASE )
@torch.no_grad()
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False ) -> int:
if config_path is not None:
snake_case_ = WavaVecaConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
else:
snake_case_ = WavaVecaConfig()
if is_seq_class:
snake_case_ = read_txt_into_dict(_SCREAMING_SNAKE_CASE )
snake_case_ = idalabel
snake_case_ = WavaVecaForSequenceClassification(_SCREAMING_SNAKE_CASE )
snake_case_ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , )
feature_extractor.save_pretrained(_SCREAMING_SNAKE_CASE )
elif is_finetuned:
if dict_path:
snake_case_ = Dictionary.load(_SCREAMING_SNAKE_CASE )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
snake_case_ = target_dict.pad_index
snake_case_ = target_dict.bos_index
snake_case_ = target_dict.eos_index
snake_case_ = len(target_dict.symbols )
snake_case_ = os.path.join(_SCREAMING_SNAKE_CASE , """vocab.json""" )
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(_SCREAMING_SNAKE_CASE ) )
return
os.makedirs(_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE )
snake_case_ = target_dict.indices
# fairseq has the <pad> and <s> switched
snake_case_ = 0
snake_case_ = 1
with open(_SCREAMING_SNAKE_CASE , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case_ = WavaVecaCTCTokenizer(
_SCREAMING_SNAKE_CASE , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=_SCREAMING_SNAKE_CASE , )
snake_case_ = True if config.feat_extract_norm == """layer""" else False
snake_case_ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , )
snake_case_ = WavaVecaProcessor(feature_extractor=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE )
processor.save_pretrained(_SCREAMING_SNAKE_CASE )
snake_case_ = WavaVecaForCTC(_SCREAMING_SNAKE_CASE )
else:
snake_case_ = WavaVecaForPreTraining(_SCREAMING_SNAKE_CASE )
if is_finetuned or is_seq_class:
snake_case_ , snake_case_ , snake_case_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
snake_case_ = argparse.Namespace(task="""audio_pretraining""" )
snake_case_ = fairseq.tasks.setup_task(_SCREAMING_SNAKE_CASE )
snake_case_ , snake_case_ , snake_case_ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=_SCREAMING_SNAKE_CASE )
snake_case_ = model[0].eval()
recursively_load_weights(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , not is_finetuned )
hf_wavavec.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : str = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
parser.add_argument(
'--is_seq_class',
action='store_true',
help='Whether the model to convert is a fine-tuned sequence classification model or not',
)
__SCREAMING_SNAKE_CASE : Any = parser.parse_args()
__SCREAMING_SNAKE_CASE : List[Any] = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 347 | 1 |
"""simple docstring"""
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 347 |
"""simple docstring"""
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class __A :
'''simple docstring'''
def __init__( self : Dict , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Any=14 , UpperCAmelCase_ : Union[str, Any]=7 , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : Union[str, Any]=False , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : str=99 , UpperCAmelCase_ : Union[str, Any]=32 , UpperCAmelCase_ : List[Any]=4 , UpperCAmelCase_ : Optional[int]=4 , UpperCAmelCase_ : int=4 , UpperCAmelCase_ : str=37 , UpperCAmelCase_ : Any="gelu" , UpperCAmelCase_ : str=0.1 , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : int=512 , UpperCAmelCase_ : Tuple=0.02 , ) ->List[str]:
"""simple docstring"""
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = seq_length
snake_case_ = is_training
snake_case_ = use_input_mask
snake_case_ = use_token_type_ids
snake_case_ = use_labels
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = rotary_dim
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = initializer_range
snake_case_ = None
snake_case_ = vocab_size - 1
snake_case_ = vocab_size - 1
snake_case_ = vocab_size - 1
def lowerCAmelCase ( self : int ) ->Optional[int]:
"""simple docstring"""
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ = None
if self.use_input_mask:
snake_case_ = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=UpperCAmelCase_ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def lowerCAmelCase ( self : Dict ) ->Tuple:
"""simple docstring"""
snake_case_ = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ = config_and_inputs
snake_case_ = {"""input_ids""": input_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def lowerCAmelCase ( self : int , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict ) ->Tuple:
"""simple docstring"""
snake_case_ = 20
snake_case_ = model_class_name(UpperCAmelCase_ )
snake_case_ = model.init_cache(input_ids.shape[0] , UpperCAmelCase_ )
snake_case_ = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype="""i4""" )
snake_case_ = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
snake_case_ = model(
input_ids[:, :-1] , attention_mask=UpperCAmelCase_ , past_key_values=UpperCAmelCase_ , position_ids=UpperCAmelCase_ , )
snake_case_ = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""" )
snake_case_ = model(
input_ids[:, -1:] , attention_mask=UpperCAmelCase_ , past_key_values=outputs_cache.past_key_values , position_ids=UpperCAmelCase_ , )
snake_case_ = model(UpperCAmelCase_ )
snake_case_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" )
def lowerCAmelCase ( self : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[Any] ) ->Dict:
"""simple docstring"""
snake_case_ = 20
snake_case_ = model_class_name(UpperCAmelCase_ )
snake_case_ = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
snake_case_ = model.init_cache(input_ids.shape[0] , UpperCAmelCase_ )
snake_case_ = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
snake_case_ = model(
input_ids[:, :-1] , attention_mask=UpperCAmelCase_ , past_key_values=UpperCAmelCase_ , position_ids=UpperCAmelCase_ , )
snake_case_ = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""" )
snake_case_ = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=UpperCAmelCase_ , position_ids=UpperCAmelCase_ , )
snake_case_ = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )
snake_case_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" )
@require_flax
class __A (snake_case__ , snake_case__ , unittest.TestCase):
'''simple docstring'''
__lowercase: Any = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
__lowercase: List[str] = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def lowerCAmelCase ( self : Tuple ) ->List[str]:
"""simple docstring"""
snake_case_ = FlaxGPTJModelTester(self )
def lowerCAmelCase ( self : int ) ->List[Any]:
"""simple docstring"""
for model_class_name in self.all_model_classes:
snake_case_ , snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCAmelCase ( self : List[str] ) ->Any:
"""simple docstring"""
for model_class_name in self.all_model_classes:
snake_case_ , snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
@tooslow
def lowerCAmelCase ( self : List[str] ) ->Optional[Any]:
"""simple docstring"""
snake_case_ = GPTaTokenizer.from_pretrained("""gpt2""" , pad_token="""<|endoftext|>""" , padding_side="""left""" )
snake_case_ = tokenizer(["""Hello this is a long string""", """Hey"""] , return_tensors="""np""" , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ )
snake_case_ = FlaxGPTJForCausalLM.from_pretrained("""EleutherAI/gpt-j-6B""" )
snake_case_ = False
snake_case_ = model.config.eos_token_id
snake_case_ = jax.jit(model.generate )
snake_case_ = jit_generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , pad_token_id=tokenizer.pad_token_id ).sequences
snake_case_ = tokenizer.batch_decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ )
snake_case_ = [
"""Hello this is a long string of text.\n\nI'm trying to get the text of the""",
"""Hey, I'm a little late to the party. I'm going to""",
]
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
@is_pt_flax_cross_test
def lowerCAmelCase ( self : int ) ->str:
"""simple docstring"""
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
snake_case_ = self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
snake_case_ = model_class.__name__[4:] # Skip the "Flax" at the beginning
snake_case_ = getattr(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ , snake_case_ = pt_inputs["""input_ids"""].shape
snake_case_ = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(UpperCAmelCase_ ):
snake_case_ = 0
snake_case_ = 1
snake_case_ = 0
snake_case_ = 1
snake_case_ = pt_model_class(UpperCAmelCase_ ).eval()
snake_case_ = model_class(UpperCAmelCase_ , dtype=jnp.floataa )
snake_case_ = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , UpperCAmelCase_ )
snake_case_ = fx_state
with torch.no_grad():
snake_case_ = pt_model(**UpperCAmelCase_ ).to_tuple()
snake_case_ = fx_model(**UpperCAmelCase_ ).to_tuple()
self.assertEqual(len(UpperCAmelCase_ ) , len(UpperCAmelCase_ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(UpperCAmelCase_ )
snake_case_ = model_class.from_pretrained(UpperCAmelCase_ , from_pt=UpperCAmelCase_ )
snake_case_ = fx_model_loaded(**UpperCAmelCase_ ).to_tuple()
self.assertEqual(
len(UpperCAmelCase_ ) , len(UpperCAmelCase_ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output_loaded, pt_output in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@is_pt_flax_cross_test
def lowerCAmelCase ( self : List[Any] ) ->str:
"""simple docstring"""
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
snake_case_ = self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
snake_case_ = model_class.__name__[4:] # Skip the "Flax" at the beginning
snake_case_ = getattr(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = pt_model_class(UpperCAmelCase_ ).eval()
snake_case_ = model_class(UpperCAmelCase_ , dtype=jnp.floataa )
snake_case_ = load_flax_weights_in_pytorch_model(UpperCAmelCase_ , fx_model.params )
snake_case_ , snake_case_ = pt_inputs["""input_ids"""].shape
snake_case_ = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(UpperCAmelCase_ ):
snake_case_ = 0
snake_case_ = 1
snake_case_ = 0
snake_case_ = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
snake_case_ = pt_model(**UpperCAmelCase_ ).to_tuple()
snake_case_ = fx_model(**UpperCAmelCase_ ).to_tuple()
self.assertEqual(len(UpperCAmelCase_ ) , len(UpperCAmelCase_ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(UpperCAmelCase_ )
snake_case_ = pt_model_class.from_pretrained(UpperCAmelCase_ , from_flax=UpperCAmelCase_ )
with torch.no_grad():
snake_case_ = pt_model_loaded(**UpperCAmelCase_ ).to_tuple()
self.assertEqual(
len(UpperCAmelCase_ ) , len(UpperCAmelCase_ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@tooslow
def lowerCAmelCase ( self : List[Any] ) ->str:
"""simple docstring"""
for model_class_name in self.all_model_classes:
snake_case_ = model_class_name.from_pretrained("""EleutherAI/gpt-j-6B""" )
snake_case_ = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCAmelCase_ )
| 347 | 1 |
"""simple docstring"""
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class __A :
'''simple docstring'''
def __init__( self : Dict , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Any=14 , UpperCAmelCase_ : Union[str, Any]=7 , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : Union[str, Any]=False , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : str=99 , UpperCAmelCase_ : Union[str, Any]=32 , UpperCAmelCase_ : List[Any]=4 , UpperCAmelCase_ : Optional[int]=4 , UpperCAmelCase_ : int=4 , UpperCAmelCase_ : str=37 , UpperCAmelCase_ : Any="gelu" , UpperCAmelCase_ : str=0.1 , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : int=512 , UpperCAmelCase_ : Tuple=0.02 , ) ->List[str]:
"""simple docstring"""
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = seq_length
snake_case_ = is_training
snake_case_ = use_input_mask
snake_case_ = use_token_type_ids
snake_case_ = use_labels
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = rotary_dim
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = initializer_range
snake_case_ = None
snake_case_ = vocab_size - 1
snake_case_ = vocab_size - 1
snake_case_ = vocab_size - 1
def lowerCAmelCase ( self : int ) ->Optional[int]:
"""simple docstring"""
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ = None
if self.use_input_mask:
snake_case_ = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=UpperCAmelCase_ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def lowerCAmelCase ( self : Dict ) ->Tuple:
"""simple docstring"""
snake_case_ = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ = config_and_inputs
snake_case_ = {"""input_ids""": input_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def lowerCAmelCase ( self : int , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict ) ->Tuple:
"""simple docstring"""
snake_case_ = 20
snake_case_ = model_class_name(UpperCAmelCase_ )
snake_case_ = model.init_cache(input_ids.shape[0] , UpperCAmelCase_ )
snake_case_ = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype="""i4""" )
snake_case_ = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
snake_case_ = model(
input_ids[:, :-1] , attention_mask=UpperCAmelCase_ , past_key_values=UpperCAmelCase_ , position_ids=UpperCAmelCase_ , )
snake_case_ = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""" )
snake_case_ = model(
input_ids[:, -1:] , attention_mask=UpperCAmelCase_ , past_key_values=outputs_cache.past_key_values , position_ids=UpperCAmelCase_ , )
snake_case_ = model(UpperCAmelCase_ )
snake_case_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" )
def lowerCAmelCase ( self : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[Any] ) ->Dict:
"""simple docstring"""
snake_case_ = 20
snake_case_ = model_class_name(UpperCAmelCase_ )
snake_case_ = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
snake_case_ = model.init_cache(input_ids.shape[0] , UpperCAmelCase_ )
snake_case_ = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
snake_case_ = model(
input_ids[:, :-1] , attention_mask=UpperCAmelCase_ , past_key_values=UpperCAmelCase_ , position_ids=UpperCAmelCase_ , )
snake_case_ = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""" )
snake_case_ = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=UpperCAmelCase_ , position_ids=UpperCAmelCase_ , )
snake_case_ = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )
snake_case_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" )
@require_flax
class __A (snake_case__ , snake_case__ , unittest.TestCase):
'''simple docstring'''
__lowercase: Any = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
__lowercase: List[str] = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def lowerCAmelCase ( self : Tuple ) ->List[str]:
"""simple docstring"""
snake_case_ = FlaxGPTJModelTester(self )
def lowerCAmelCase ( self : int ) ->List[Any]:
"""simple docstring"""
for model_class_name in self.all_model_classes:
snake_case_ , snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCAmelCase ( self : List[str] ) ->Any:
"""simple docstring"""
for model_class_name in self.all_model_classes:
snake_case_ , snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
@tooslow
def lowerCAmelCase ( self : List[str] ) ->Optional[Any]:
"""simple docstring"""
snake_case_ = GPTaTokenizer.from_pretrained("""gpt2""" , pad_token="""<|endoftext|>""" , padding_side="""left""" )
snake_case_ = tokenizer(["""Hello this is a long string""", """Hey"""] , return_tensors="""np""" , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ )
snake_case_ = FlaxGPTJForCausalLM.from_pretrained("""EleutherAI/gpt-j-6B""" )
snake_case_ = False
snake_case_ = model.config.eos_token_id
snake_case_ = jax.jit(model.generate )
snake_case_ = jit_generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , pad_token_id=tokenizer.pad_token_id ).sequences
snake_case_ = tokenizer.batch_decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ )
snake_case_ = [
"""Hello this is a long string of text.\n\nI'm trying to get the text of the""",
"""Hey, I'm a little late to the party. I'm going to""",
]
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
@is_pt_flax_cross_test
def lowerCAmelCase ( self : int ) ->str:
"""simple docstring"""
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
snake_case_ = self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
snake_case_ = model_class.__name__[4:] # Skip the "Flax" at the beginning
snake_case_ = getattr(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ , snake_case_ = pt_inputs["""input_ids"""].shape
snake_case_ = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(UpperCAmelCase_ ):
snake_case_ = 0
snake_case_ = 1
snake_case_ = 0
snake_case_ = 1
snake_case_ = pt_model_class(UpperCAmelCase_ ).eval()
snake_case_ = model_class(UpperCAmelCase_ , dtype=jnp.floataa )
snake_case_ = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , UpperCAmelCase_ )
snake_case_ = fx_state
with torch.no_grad():
snake_case_ = pt_model(**UpperCAmelCase_ ).to_tuple()
snake_case_ = fx_model(**UpperCAmelCase_ ).to_tuple()
self.assertEqual(len(UpperCAmelCase_ ) , len(UpperCAmelCase_ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(UpperCAmelCase_ )
snake_case_ = model_class.from_pretrained(UpperCAmelCase_ , from_pt=UpperCAmelCase_ )
snake_case_ = fx_model_loaded(**UpperCAmelCase_ ).to_tuple()
self.assertEqual(
len(UpperCAmelCase_ ) , len(UpperCAmelCase_ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output_loaded, pt_output in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@is_pt_flax_cross_test
def lowerCAmelCase ( self : List[Any] ) ->str:
"""simple docstring"""
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
snake_case_ = self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
snake_case_ = model_class.__name__[4:] # Skip the "Flax" at the beginning
snake_case_ = getattr(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = pt_model_class(UpperCAmelCase_ ).eval()
snake_case_ = model_class(UpperCAmelCase_ , dtype=jnp.floataa )
snake_case_ = load_flax_weights_in_pytorch_model(UpperCAmelCase_ , fx_model.params )
snake_case_ , snake_case_ = pt_inputs["""input_ids"""].shape
snake_case_ = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(UpperCAmelCase_ ):
snake_case_ = 0
snake_case_ = 1
snake_case_ = 0
snake_case_ = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
snake_case_ = pt_model(**UpperCAmelCase_ ).to_tuple()
snake_case_ = fx_model(**UpperCAmelCase_ ).to_tuple()
self.assertEqual(len(UpperCAmelCase_ ) , len(UpperCAmelCase_ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(UpperCAmelCase_ )
snake_case_ = pt_model_class.from_pretrained(UpperCAmelCase_ , from_flax=UpperCAmelCase_ )
with torch.no_grad():
snake_case_ = pt_model_loaded(**UpperCAmelCase_ ).to_tuple()
self.assertEqual(
len(UpperCAmelCase_ ) , len(UpperCAmelCase_ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@tooslow
def lowerCAmelCase ( self : List[Any] ) ->str:
"""simple docstring"""
for model_class_name in self.all_model_classes:
snake_case_ = model_class_name.from_pretrained("""EleutherAI/gpt-j-6B""" )
snake_case_ = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCAmelCase_ )
| 347 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
__SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
class __A (snake_case__):
'''simple docstring'''
__lowercase: int = """upernet"""
def __init__( self : str , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : str=512 , UpperCAmelCase_ : int=0.02 , UpperCAmelCase_ : Optional[Any]=[1, 2, 3, 6] , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : Tuple=0.4 , UpperCAmelCase_ : Tuple=384 , UpperCAmelCase_ : Union[str, Any]=256 , UpperCAmelCase_ : str=1 , UpperCAmelCase_ : Tuple=False , UpperCAmelCase_ : Tuple=255 , **UpperCAmelCase_ : Dict , ) ->Union[str, Any]:
"""simple docstring"""
super().__init__(**UpperCAmelCase_ )
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
snake_case_ = CONFIG_MAPPING["""resnet"""](out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] )
elif isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
snake_case_ = backbone_config.get("""model_type""" )
snake_case_ = CONFIG_MAPPING[backbone_model_type]
snake_case_ = config_class.from_dict(UpperCAmelCase_ )
snake_case_ = backbone_config
snake_case_ = hidden_size
snake_case_ = initializer_range
snake_case_ = pool_scales
snake_case_ = use_auxiliary_head
snake_case_ = auxiliary_loss_weight
snake_case_ = auxiliary_in_channels
snake_case_ = auxiliary_channels
snake_case_ = auxiliary_num_convs
snake_case_ = auxiliary_concat_input
snake_case_ = loss_ignore_index
def lowerCAmelCase ( self : str ) ->Optional[Any]:
"""simple docstring"""
snake_case_ = copy.deepcopy(self.__dict__ )
snake_case_ = self.backbone_config.to_dict()
snake_case_ = self.__class__.model_type
return output
| 347 | 1 |
"""simple docstring"""
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __A (snake_case__ , unittest.TestCase):
'''simple docstring'''
__lowercase: Optional[Any] = DebertaTokenizer
__lowercase: Union[str, Any] = True
__lowercase: List[Any] = DebertaTokenizerFast
def lowerCAmelCase ( self : List[Any] ) ->List[str]:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
snake_case_ = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""[UNK]""",
]
snake_case_ = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_ ) ) ) )
snake_case_ = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
snake_case_ = {"""unk_token""": """[UNK]"""}
snake_case_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
snake_case_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(UpperCAmelCase_ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(UpperCAmelCase_ ) )
def lowerCAmelCase ( self : Any , **UpperCAmelCase_ : Any ) ->Optional[Any]:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCAmelCase_ )
def lowerCAmelCase ( self : Tuple , UpperCAmelCase_ : str ) ->Optional[int]:
"""simple docstring"""
snake_case_ = """lower newer"""
snake_case_ = """lower newer"""
return input_text, output_text
def lowerCAmelCase ( self : Optional[int] ) ->Optional[Any]:
"""simple docstring"""
snake_case_ = self.get_tokenizer()
snake_case_ = """lower newer"""
snake_case_ = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
snake_case_ = tokenizer.tokenize(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = tokens + [tokenizer.unk_token]
snake_case_ = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , UpperCAmelCase_ )
def lowerCAmelCase ( self : Optional[int] ) ->List[Any]:
"""simple docstring"""
snake_case_ = self.get_tokenizer()
snake_case_ = tokenizer("""Hello""" , """World""" )
snake_case_ = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd["""token_type_ids"""] , UpperCAmelCase_ )
@slow
def lowerCAmelCase ( self : Optional[Any] ) ->Dict:
"""simple docstring"""
snake_case_ = self.tokenizer_class.from_pretrained("""microsoft/deberta-base""" )
snake_case_ = tokenizer.encode("""sequence builders""" , add_special_tokens=UpperCAmelCase_ )
snake_case_ = tokenizer.encode("""multi-sequence build""" , add_special_tokens=UpperCAmelCase_ )
snake_case_ = tokenizer.encode(
"""sequence builders""" , add_special_tokens=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ )
snake_case_ = tokenizer.encode(
"""sequence builders""" , """multi-sequence build""" , add_special_tokens=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ )
snake_case_ = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ )
snake_case_ = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ , UpperCAmelCase_ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def lowerCAmelCase ( self : int ) ->List[str]:
"""simple docstring"""
snake_case_ = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
snake_case_ = tokenizer_class.from_pretrained("""microsoft/deberta-base""" )
snake_case_ = [
"""ALBERT: A Lite BERT for Self-supervised Learning of Language Representations""",
"""ALBERT incorporates two parameter reduction techniques""",
"""The first one is a factorized embedding parameterization. By decomposing the large vocabulary"""
""" embedding matrix into two small matrices, we separate the size of the hidden layers from the size of"""
""" vocabulary embedding.""",
]
snake_case_ = tokenizer(UpperCAmelCase_ , padding=UpperCAmelCase_ )
snake_case_ = [tokenizer.decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ ) for seq in encoding["""input_ids"""]]
# fmt: off
snake_case_ = {
"""input_ids""": [
[1, 2_118, 11_126, 565, 35, 83, 25_191, 163, 18_854, 13, 12_156, 12, 16_101, 25_376, 13_807, 9, 22_205, 27_893, 1_635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2_118, 11_126, 565, 24_536, 80, 43_797, 4_878, 7_373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 133, 78, 65, 16, 10, 3_724, 1_538, 33_183, 11_303, 43_797, 1_938, 4, 870, 24_165, 29_105, 5, 739, 32_644, 33_183, 11_303, 36_173, 88, 80, 650, 7_821, 45_940, 6, 52, 2_559, 5, 1_836, 9, 5, 7_397, 13_171, 31, 5, 1_836, 9, 32_644, 33_183, 11_303, 4, 2]
],
"""token_type_ids""": [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
"""attention_mask""": [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
snake_case_ = [
"""ALBERT: A Lite BERT for Self-supervised Learning of Language Representations""",
"""ALBERT incorporates two parameter reduction techniques""",
"""The first one is a factorized embedding parameterization. By decomposing the large vocabulary"""
""" embedding matrix into two small matrices, we separate the size of the hidden layers from the size of"""
""" vocabulary embedding.""",
]
self.assertDictEqual(encoding.data , UpperCAmelCase_ )
for expected, decoded in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ )
| 347 |
"""simple docstring"""
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class __A (unittest.TestCase):
'''simple docstring'''
def lowerCAmelCase ( self : Optional[int] ) ->Dict:
"""simple docstring"""
snake_case_ = """ylacombe/bark-small"""
snake_case_ = tempfile.mkdtemp()
snake_case_ = """en_speaker_1"""
snake_case_ = """This is a test string"""
snake_case_ = """speaker_embeddings_path.json"""
snake_case_ = """speaker_embeddings"""
def lowerCAmelCase ( self : List[str] , **UpperCAmelCase_ : str ) ->Optional[int]:
"""simple docstring"""
return AutoTokenizer.from_pretrained(self.checkpoint , **UpperCAmelCase_ )
def lowerCAmelCase ( self : Any ) ->Dict:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase ( self : List[Any] ) ->Dict:
"""simple docstring"""
snake_case_ = self.get_tokenizer()
snake_case_ = BarkProcessor(tokenizer=UpperCAmelCase_ )
processor.save_pretrained(self.tmpdirname )
snake_case_ = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def lowerCAmelCase ( self : Dict ) ->int:
"""simple docstring"""
snake_case_ = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
snake_case_ = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
snake_case_ = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="""(BOS)""" , eos_token="""(EOS)""" , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def lowerCAmelCase ( self : Optional[Any] ) ->Any:
"""simple docstring"""
snake_case_ = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
snake_case_ = 35
snake_case_ = 2
snake_case_ = 8
snake_case_ = {
"""semantic_prompt""": np.ones(UpperCAmelCase_ ),
"""coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ),
"""fine_prompt""": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
snake_case_ = processor(text=self.input_string , voice_preset=UpperCAmelCase_ )
snake_case_ = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(UpperCAmelCase_ , np.array([] ) ).tolist() )
# test loading voice preset from npz file
snake_case_ = os.path.join(self.tmpdirname , """file.npz""" )
np.savez(UpperCAmelCase_ , **UpperCAmelCase_ )
snake_case_ = processor(text=self.input_string , voice_preset=UpperCAmelCase_ )
snake_case_ = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(UpperCAmelCase_ , np.array([] ) ).tolist() )
# test loading voice preset from the hub
snake_case_ = processor(text=self.input_string , voice_preset=self.voice_preset )
def lowerCAmelCase ( self : Tuple ) ->Dict:
"""simple docstring"""
snake_case_ = self.get_tokenizer()
snake_case_ = BarkProcessor(tokenizer=UpperCAmelCase_ )
snake_case_ = processor(text=self.input_string )
snake_case_ = tokenizer(
self.input_string , padding="""max_length""" , max_length=256 , add_special_tokens=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , return_token_type_ids=UpperCAmelCase_ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 347 | 1 |
"""simple docstring"""
import sys
from collections import defaultdict
class __A :
'''simple docstring'''
def __init__( self : List[str] ) ->Optional[int]:
"""simple docstring"""
snake_case_ = []
def lowerCAmelCase ( self : Optional[Any] , UpperCAmelCase_ : int ) ->List[Any]:
"""simple docstring"""
return self.node_position[vertex]
def lowerCAmelCase ( self : List[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : str ) ->Union[str, Any]:
"""simple docstring"""
snake_case_ = pos
def lowerCAmelCase ( self : List[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Union[str, Any] ) ->Optional[Any]:
"""simple docstring"""
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
snake_case_ = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
snake_case_ = 2 * start + 1
else:
snake_case_ = 2 * start + 2
if heap[smallest_child] < heap[start]:
snake_case_ , snake_case_ = heap[smallest_child], positions[smallest_child]
snake_case_ , snake_case_ = (
heap[start],
positions[start],
)
snake_case_ , snake_case_ = temp, tempa
snake_case_ = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , UpperCAmelCase_ )
self.top_to_bottom(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCAmelCase ( self : int , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any] ) ->str:
"""simple docstring"""
snake_case_ = position[index]
while index != 0:
snake_case_ = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
snake_case_ = heap[parent]
snake_case_ = position[parent]
self.set_position(position[parent] , UpperCAmelCase_ )
else:
snake_case_ = val
snake_case_ = temp
self.set_position(UpperCAmelCase_ , UpperCAmelCase_ )
break
snake_case_ = parent
else:
snake_case_ = val
snake_case_ = temp
self.set_position(UpperCAmelCase_ , 0 )
def lowerCAmelCase ( self : Tuple , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Union[str, Any] ) ->int:
"""simple docstring"""
snake_case_ = len(UpperCAmelCase_ ) // 2 - 1
for i in range(UpperCAmelCase_ , -1 , -1 ):
self.top_to_bottom(UpperCAmelCase_ , UpperCAmelCase_ , len(UpperCAmelCase_ ) , UpperCAmelCase_ )
def lowerCAmelCase ( self : List[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Union[str, Any] ) ->Optional[Any]:
"""simple docstring"""
snake_case_ = positions[0]
snake_case_ = sys.maxsize
self.top_to_bottom(UpperCAmelCase_ , 0 , len(UpperCAmelCase_ ) , UpperCAmelCase_ )
return temp
def _a ( _SCREAMING_SNAKE_CASE ) -> Any:
snake_case_ = Heap()
snake_case_ = [0] * len(_SCREAMING_SNAKE_CASE )
snake_case_ = [-1] * len(_SCREAMING_SNAKE_CASE ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
snake_case_ = [] # Heap of Distance of vertices from their neighboring vertex
snake_case_ = []
for vertex in range(len(_SCREAMING_SNAKE_CASE ) ):
distance_tv.append(sys.maxsize )
positions.append(_SCREAMING_SNAKE_CASE )
heap.node_position.append(_SCREAMING_SNAKE_CASE )
snake_case_ = []
snake_case_ = 1
snake_case_ = sys.maxsize
for neighbor, distance in adjacency_list[0]:
snake_case_ = 0
snake_case_ = distance
heap.heapify(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for _ in range(1 , len(_SCREAMING_SNAKE_CASE ) ):
snake_case_ = heap.delete_minimum(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
snake_case_ = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(_SCREAMING_SNAKE_CASE )]
):
snake_case_ = distance
heap.bottom_to_top(
_SCREAMING_SNAKE_CASE , heap.get_position(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case_ = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
__SCREAMING_SNAKE_CASE : Dict = int(input('Enter number of edges: ').strip())
__SCREAMING_SNAKE_CASE : Optional[Any] = defaultdict(list)
for _ in range(edges_number):
__SCREAMING_SNAKE_CASE : Dict = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 347 |
"""simple docstring"""
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
__SCREAMING_SNAKE_CASE : int = sys.version_info >= (3, 10)
def _a ( _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> Tuple:
return field(default_factory=lambda: default , metadata=_SCREAMING_SNAKE_CASE )
@dataclass
class __A :
'''simple docstring'''
__lowercase: int
__lowercase: float
__lowercase: str
__lowercase: bool
@dataclass
class __A :
'''simple docstring'''
__lowercase: int = 42
__lowercase: str = field(default="""toto""" , metadata={"""help""": """help message"""})
@dataclass
class __A :
'''simple docstring'''
__lowercase: bool = False
__lowercase: bool = True
__lowercase: Optional[bool] = None
class __A (snake_case__):
'''simple docstring'''
__lowercase: str = """titi"""
__lowercase: Any = """toto"""
class __A (snake_case__):
'''simple docstring'''
__lowercase: int = """titi"""
__lowercase: Optional[Any] = """toto"""
__lowercase: List[Any] = 42
@dataclass
class __A :
'''simple docstring'''
__lowercase: BasicEnum = "toto"
def lowerCAmelCase ( self : int ) ->List[Any]:
"""simple docstring"""
snake_case_ = BasicEnum(self.foo )
@dataclass
class __A :
'''simple docstring'''
__lowercase: MixedTypeEnum = "toto"
def lowerCAmelCase ( self : Optional[int] ) ->Optional[Any]:
"""simple docstring"""
snake_case_ = MixedTypeEnum(self.foo )
@dataclass
class __A :
'''simple docstring'''
__lowercase: Optional[int] = None
__lowercase: Optional[float] = field(default=snake_case__ , metadata={"""help""": """help message"""})
__lowercase: Optional[str] = None
__lowercase: Optional[List[str]] = list_field(default=[])
__lowercase: Optional[List[int]] = list_field(default=[])
@dataclass
class __A :
'''simple docstring'''
__lowercase: List[int] = list_field(default=[])
__lowercase: List[int] = list_field(default=[1, 2, 3])
__lowercase: List[str] = list_field(default=["""Hallo""", """Bonjour""", """Hello"""])
__lowercase: List[float] = list_field(default=[0.1, 0.2, 0.3])
@dataclass
class __A :
'''simple docstring'''
__lowercase: List[int] = field()
__lowercase: str = field()
__lowercase: BasicEnum = field()
def lowerCAmelCase ( self : Any ) ->str:
"""simple docstring"""
snake_case_ = BasicEnum(self.required_enum )
@dataclass
class __A :
'''simple docstring'''
__lowercase: int
__lowercase: "BasicEnum" = field()
__lowercase: "Optional[bool]" = None
__lowercase: "str" = field(default="""toto""" , metadata={"""help""": """help message"""})
__lowercase: "List[str]" = list_field(default=["""Hallo""", """Bonjour""", """Hello"""])
if is_python_no_less_than_3_10:
@dataclass
class __A :
'''simple docstring'''
__lowercase: bool = False
__lowercase: bool = True
__lowercase: bool | None = None
@dataclass
class __A :
'''simple docstring'''
__lowercase: int | None = None
__lowercase: float | None = field(default=snake_case__ , metadata={"""help""": """help message"""})
__lowercase: str | None = None
__lowercase: list[str] | None = list_field(default=[])
__lowercase: list[int] | None = list_field(default=[])
class __A (unittest.TestCase):
'''simple docstring'''
def lowerCAmelCase ( self : Optional[int] , UpperCAmelCase_ : argparse.ArgumentParser , UpperCAmelCase_ : argparse.ArgumentParser ) ->Optional[int]:
"""simple docstring"""
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
snake_case_ = {k: v for k, v in vars(UpperCAmelCase_ ).items() if k != """container"""}
snake_case_ = {k: v for k, v in vars(UpperCAmelCase_ ).items() if k != """container"""}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get("""choices""" , UpperCAmelCase_ ) and yy.get("""choices""" , UpperCAmelCase_ ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx["""type"""](UpperCAmelCase_ ) , yy["""type"""](UpperCAmelCase_ ) )
del xx["type"], yy["type"]
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCAmelCase ( self : int ) ->Any:
"""simple docstring"""
snake_case_ = HfArgumentParser(UpperCAmelCase_ )
snake_case_ = argparse.ArgumentParser()
expected.add_argument("""--foo""" , type=UpperCAmelCase_ , required=UpperCAmelCase_ )
expected.add_argument("""--bar""" , type=UpperCAmelCase_ , required=UpperCAmelCase_ )
expected.add_argument("""--baz""" , type=UpperCAmelCase_ , required=UpperCAmelCase_ )
expected.add_argument("""--flag""" , type=UpperCAmelCase_ , default=UpperCAmelCase_ , const=UpperCAmelCase_ , nargs="""?""" )
self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = ["""--foo""", """1""", """--baz""", """quux""", """--bar""", """0.5"""]
((snake_case_) , ) = parser.parse_args_into_dataclasses(UpperCAmelCase_ , look_for_args_file=UpperCAmelCase_ )
self.assertFalse(example.flag )
def lowerCAmelCase ( self : Optional[Any] ) ->List[Any]:
"""simple docstring"""
snake_case_ = HfArgumentParser(UpperCAmelCase_ )
snake_case_ = argparse.ArgumentParser()
expected.add_argument("""--foo""" , default=42 , type=UpperCAmelCase_ )
expected.add_argument("""--baz""" , default="""toto""" , type=UpperCAmelCase_ , help="""help message""" )
self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCAmelCase ( self : List[Any] ) ->Union[str, Any]:
"""simple docstring"""
snake_case_ = argparse.ArgumentParser()
expected.add_argument("""--foo""" , type=UpperCAmelCase_ , default=UpperCAmelCase_ , const=UpperCAmelCase_ , nargs="""?""" )
expected.add_argument("""--baz""" , type=UpperCAmelCase_ , default=UpperCAmelCase_ , const=UpperCAmelCase_ , nargs="""?""" )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument("""--no_baz""" , action="""store_false""" , default=UpperCAmelCase_ , dest="""baz""" )
expected.add_argument("""--opt""" , type=UpperCAmelCase_ , default=UpperCAmelCase_ )
snake_case_ = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(UpperCAmelCase_ )
for dataclass_type in dataclass_types:
snake_case_ = HfArgumentParser(UpperCAmelCase_ )
self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = parser.parse_args([] )
self.assertEqual(UpperCAmelCase_ , Namespace(foo=UpperCAmelCase_ , baz=UpperCAmelCase_ , opt=UpperCAmelCase_ ) )
snake_case_ = parser.parse_args(["""--foo""", """--no_baz"""] )
self.assertEqual(UpperCAmelCase_ , Namespace(foo=UpperCAmelCase_ , baz=UpperCAmelCase_ , opt=UpperCAmelCase_ ) )
snake_case_ = parser.parse_args(["""--foo""", """--baz"""] )
self.assertEqual(UpperCAmelCase_ , Namespace(foo=UpperCAmelCase_ , baz=UpperCAmelCase_ , opt=UpperCAmelCase_ ) )
snake_case_ = parser.parse_args(["""--foo""", """True""", """--baz""", """True""", """--opt""", """True"""] )
self.assertEqual(UpperCAmelCase_ , Namespace(foo=UpperCAmelCase_ , baz=UpperCAmelCase_ , opt=UpperCAmelCase_ ) )
snake_case_ = parser.parse_args(["""--foo""", """False""", """--baz""", """False""", """--opt""", """False"""] )
self.assertEqual(UpperCAmelCase_ , Namespace(foo=UpperCAmelCase_ , baz=UpperCAmelCase_ , opt=UpperCAmelCase_ ) )
def lowerCAmelCase ( self : int ) ->List[str]:
"""simple docstring"""
snake_case_ = HfArgumentParser(UpperCAmelCase_ )
snake_case_ = argparse.ArgumentParser()
expected.add_argument(
"""--foo""" , default="""toto""" , choices=["""titi""", """toto""", 42] , type=make_choice_type_function(["""titi""", """toto""", 42] ) , )
self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = parser.parse_args([] )
self.assertEqual(args.foo , """toto""" )
snake_case_ = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
snake_case_ = parser.parse_args(["""--foo""", """titi"""] )
self.assertEqual(args.foo , """titi""" )
snake_case_ = parser.parse_args_into_dataclasses(["""--foo""", """titi"""] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
snake_case_ = parser.parse_args(["""--foo""", """42"""] )
self.assertEqual(args.foo , 42 )
snake_case_ = parser.parse_args_into_dataclasses(["""--foo""", """42"""] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def lowerCAmelCase ( self : Dict ) ->str:
"""simple docstring"""
@dataclass
class __A :
'''simple docstring'''
__lowercase: Literal["titi", "toto", 42] = "toto"
snake_case_ = HfArgumentParser(UpperCAmelCase_ )
snake_case_ = argparse.ArgumentParser()
expected.add_argument(
"""--foo""" , default="""toto""" , choices=("""titi""", """toto""", 42) , type=make_choice_type_function(["""titi""", """toto""", 42] ) , )
self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = parser.parse_args([] )
self.assertEqual(args.foo , """toto""" )
snake_case_ = parser.parse_args(["""--foo""", """titi"""] )
self.assertEqual(args.foo , """titi""" )
snake_case_ = parser.parse_args(["""--foo""", """42"""] )
self.assertEqual(args.foo , 42 )
def lowerCAmelCase ( self : Optional[int] ) ->Dict:
"""simple docstring"""
snake_case_ = HfArgumentParser(UpperCAmelCase_ )
snake_case_ = argparse.ArgumentParser()
expected.add_argument("""--foo_int""" , nargs="""+""" , default=[] , type=UpperCAmelCase_ )
expected.add_argument("""--bar_int""" , nargs="""+""" , default=[1, 2, 3] , type=UpperCAmelCase_ )
expected.add_argument("""--foo_str""" , nargs="""+""" , default=["""Hallo""", """Bonjour""", """Hello"""] , type=UpperCAmelCase_ )
expected.add_argument("""--foo_float""" , nargs="""+""" , default=[0.1, 0.2, 0.3] , type=UpperCAmelCase_ )
self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = parser.parse_args([] )
self.assertEqual(
UpperCAmelCase_ , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=["""Hallo""", """Bonjour""", """Hello"""] , foo_float=[0.1, 0.2, 0.3] ) , )
snake_case_ = parser.parse_args("""--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7""".split() )
self.assertEqual(UpperCAmelCase_ , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=["""a""", """b""", """c"""] , foo_float=[0.1, 0.7] ) )
def lowerCAmelCase ( self : Optional[int] ) ->List[Any]:
"""simple docstring"""
snake_case_ = argparse.ArgumentParser()
expected.add_argument("""--foo""" , default=UpperCAmelCase_ , type=UpperCAmelCase_ )
expected.add_argument("""--bar""" , default=UpperCAmelCase_ , type=UpperCAmelCase_ , help="""help message""" )
expected.add_argument("""--baz""" , default=UpperCAmelCase_ , type=UpperCAmelCase_ )
expected.add_argument("""--ces""" , nargs="""+""" , default=[] , type=UpperCAmelCase_ )
expected.add_argument("""--des""" , nargs="""+""" , default=[] , type=UpperCAmelCase_ )
snake_case_ = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(UpperCAmelCase_ )
for dataclass_type in dataclass_types:
snake_case_ = HfArgumentParser(UpperCAmelCase_ )
self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = parser.parse_args([] )
self.assertEqual(UpperCAmelCase_ , Namespace(foo=UpperCAmelCase_ , bar=UpperCAmelCase_ , baz=UpperCAmelCase_ , ces=[] , des=[] ) )
snake_case_ = parser.parse_args("""--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3""".split() )
self.assertEqual(UpperCAmelCase_ , Namespace(foo=12 , bar=3.14 , baz="""42""" , ces=["""a""", """b""", """c"""] , des=[1, 2, 3] ) )
def lowerCAmelCase ( self : Optional[Any] ) ->Optional[Any]:
"""simple docstring"""
snake_case_ = HfArgumentParser(UpperCAmelCase_ )
snake_case_ = argparse.ArgumentParser()
expected.add_argument("""--required_list""" , nargs="""+""" , type=UpperCAmelCase_ , required=UpperCAmelCase_ )
expected.add_argument("""--required_str""" , type=UpperCAmelCase_ , required=UpperCAmelCase_ )
expected.add_argument(
"""--required_enum""" , type=make_choice_type_function(["""titi""", """toto"""] ) , choices=["""titi""", """toto"""] , required=UpperCAmelCase_ , )
self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCAmelCase ( self : Optional[int] ) ->List[Any]:
"""simple docstring"""
snake_case_ = HfArgumentParser(UpperCAmelCase_ )
snake_case_ = argparse.ArgumentParser()
expected.add_argument("""--foo""" , type=UpperCAmelCase_ , required=UpperCAmelCase_ )
expected.add_argument(
"""--required_enum""" , type=make_choice_type_function(["""titi""", """toto"""] ) , choices=["""titi""", """toto"""] , required=UpperCAmelCase_ , )
expected.add_argument("""--opt""" , type=UpperCAmelCase_ , default=UpperCAmelCase_ )
expected.add_argument("""--baz""" , default="""toto""" , type=UpperCAmelCase_ , help="""help message""" )
expected.add_argument("""--foo_str""" , nargs="""+""" , default=["""Hallo""", """Bonjour""", """Hello"""] , type=UpperCAmelCase_ )
self.argparsersEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCAmelCase ( self : Dict ) ->Tuple:
"""simple docstring"""
snake_case_ = HfArgumentParser(UpperCAmelCase_ )
snake_case_ = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
}
snake_case_ = parser.parse_dict(UpperCAmelCase_ )[0]
snake_case_ = BasicExample(**UpperCAmelCase_ )
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCAmelCase ( self : Optional[Any] ) ->List[Any]:
"""simple docstring"""
snake_case_ = HfArgumentParser(UpperCAmelCase_ )
snake_case_ = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
"""extra""": 42,
}
self.assertRaises(UpperCAmelCase_ , parser.parse_dict , UpperCAmelCase_ , allow_extra_keys=UpperCAmelCase_ )
def lowerCAmelCase ( self : Any ) ->Dict:
"""simple docstring"""
snake_case_ = HfArgumentParser(UpperCAmelCase_ )
snake_case_ = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case_ = os.path.join(UpperCAmelCase_ , """temp_json""" )
os.mkdir(UpperCAmelCase_ )
with open(temp_local_path + """.json""" , """w+""" ) as f:
json.dump(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = parser.parse_yaml_file(Path(temp_local_path + """.json""" ) )[0]
snake_case_ = BasicExample(**UpperCAmelCase_ )
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCAmelCase ( self : Optional[int] ) ->List[str]:
"""simple docstring"""
snake_case_ = HfArgumentParser(UpperCAmelCase_ )
snake_case_ = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case_ = os.path.join(UpperCAmelCase_ , """temp_yaml""" )
os.mkdir(UpperCAmelCase_ )
with open(temp_local_path + """.yaml""" , """w+""" ) as f:
yaml.dump(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = parser.parse_yaml_file(Path(temp_local_path + """.yaml""" ) )[0]
snake_case_ = BasicExample(**UpperCAmelCase_ )
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCAmelCase ( self : Dict ) ->Any:
"""simple docstring"""
snake_case_ = HfArgumentParser(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
| 347 | 1 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Callable
__SCREAMING_SNAKE_CASE : Dict = list[list[float | int]]
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Matrix:
snake_case_ = len(_SCREAMING_SNAKE_CASE )
snake_case_ = [[0 for _ in range(size + 1 )] for _ in range(_SCREAMING_SNAKE_CASE )]
snake_case_ = 42
snake_case_ = 42
snake_case_ = 42
snake_case_ = 42
snake_case_ = 42
snake_case_ = 42
for row in range(_SCREAMING_SNAKE_CASE ):
for col in range(_SCREAMING_SNAKE_CASE ):
snake_case_ = matrix[row][col]
snake_case_ = vector[row][0]
snake_case_ = 0
snake_case_ = 0
while row < size and col < size:
# pivoting
snake_case_ = max((abs(augmented[rowa][col] ), rowa) for rowa in range(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
snake_case_ , snake_case_ = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , _SCREAMING_SNAKE_CASE ):
snake_case_ = augmented[rowa][col] / augmented[row][col]
snake_case_ = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , _SCREAMING_SNAKE_CASE ):
for row in range(_SCREAMING_SNAKE_CASE ):
snake_case_ = augmented[row][col] / augmented[col][col]
for cola in range(_SCREAMING_SNAKE_CASE , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(_SCREAMING_SNAKE_CASE )
]
def _a ( _SCREAMING_SNAKE_CASE ) -> Callable[[int], int]:
snake_case_ = len(_SCREAMING_SNAKE_CASE )
snake_case_ = [[0 for _ in range(_SCREAMING_SNAKE_CASE )] for _ in range(_SCREAMING_SNAKE_CASE )]
snake_case_ = [[0] for _ in range(_SCREAMING_SNAKE_CASE )]
snake_case_ = 42
snake_case_ = 42
snake_case_ = 42
snake_case_ = 42
for x_val, y_val in enumerate(_SCREAMING_SNAKE_CASE ):
for col in range(_SCREAMING_SNAKE_CASE ):
snake_case_ = (x_val + 1) ** (size - col - 1)
snake_case_ = y_val
snake_case_ = solve(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def interpolated_func(_SCREAMING_SNAKE_CASE ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(_SCREAMING_SNAKE_CASE ) )
return interpolated_func
def _a ( _SCREAMING_SNAKE_CASE ) -> int:
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def _a ( _SCREAMING_SNAKE_CASE = question_function , _SCREAMING_SNAKE_CASE = 10 ) -> int:
snake_case_ = [func(_SCREAMING_SNAKE_CASE ) for x_val in range(1 , order + 1 )]
snake_case_ = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
snake_case_ = 0
snake_case_ = 42
snake_case_ = 42
for poly in polynomials:
snake_case_ = 1
while func(_SCREAMING_SNAKE_CASE ) == poly(_SCREAMING_SNAKE_CASE ):
x_val += 1
ret += poly(_SCREAMING_SNAKE_CASE )
return ret
if __name__ == "__main__":
print(f"""{solution() = }""")
| 347 |
"""simple docstring"""
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
__SCREAMING_SNAKE_CASE : Any = logging.getLogger(__name__)
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False , ) -> Optional[Any]:
snake_case_ = bnb_quantization_config.load_in_abit
snake_case_ = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
"""You have a version of `bitsandbytes` that is not compatible with 8bit quantization,"""
""" make sure you have the latest version of `bitsandbytes` installed.""" )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
"""You have a version of `bitsandbytes` that is not compatible with 4bit quantization,"""
"""make sure you have the latest version of `bitsandbytes` installed.""" )
snake_case_ = []
# custom device map
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and len(device_map.keys() ) > 1:
snake_case_ = [key for key, value in device_map.items() if value in ["""disk""", """cpu"""]]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
snake_case_ = get_keys_to_not_convert(_SCREAMING_SNAKE_CASE )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(_SCREAMING_SNAKE_CASE )
snake_case_ = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
snake_case_ = []
snake_case_ = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(_SCREAMING_SNAKE_CASE )
# compatibility with peft
snake_case_ = load_in_abit
snake_case_ = load_in_abit
snake_case_ = get_parameter_device(_SCREAMING_SNAKE_CASE )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
"""It is not recommended to quantize a loaded model. """
"""The model should be instantiated under the `init_empty_weights` context manager.""" )
snake_case_ = replace_with_bnb_layers(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , modules_to_not_convert=_SCREAMING_SNAKE_CASE )
# convert param to the right dtype
snake_case_ = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
snake_case_ = name.replace(""".weight""" , """""" ).replace(""".bias""" , """""" )
snake_case_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(_SCREAMING_SNAKE_CASE ):
param.to(_SCREAMING_SNAKE_CASE )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" )
logger.info(
f"""The model device type is {model_device.type}. However, cuda is needed for quantization."""
"""We move the model to cuda.""" )
return model
elif weights_location is None:
raise RuntimeError(
f"""`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} """ )
else:
with init_empty_weights():
snake_case_ = replace_with_bnb_layers(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , modules_to_not_convert=_SCREAMING_SNAKE_CASE )
snake_case_ = get_quantized_model_device_map(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , max_memory=_SCREAMING_SNAKE_CASE , no_split_module_classes=_SCREAMING_SNAKE_CASE , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
snake_case_ = True
snake_case_ = any(x in list(device_map.values() ) for x in ["""cpu""", """disk"""] )
load_checkpoint_in_model(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , dtype=bnb_quantization_config.torch_dtype , offload_folder=_SCREAMING_SNAKE_CASE , offload_state_dict=_SCREAMING_SNAKE_CASE , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(_SCREAMING_SNAKE_CASE , device_map=_SCREAMING_SNAKE_CASE , offload_dir=_SCREAMING_SNAKE_CASE )
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> Tuple:
if device_map is None:
if torch.cuda.is_available():
snake_case_ = {"""""": torch.cuda.current_device()}
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" )
logger.info("""The device_map was not initialized.""" """Setting device_map to `{'':torch.cuda.current_device()}`.""" )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
"""If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or """
"""'sequential'.""" )
snake_case_ = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
snake_case_ = {}
snake_case_ = special_dtypes
snake_case_ = no_split_module_classes
snake_case_ = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
snake_case_ = get_balanced_memory(
_SCREAMING_SNAKE_CASE , low_zero=(device_map == """balanced_low_0""") , max_memory=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
snake_case_ = max_memory
snake_case_ = infer_auto_device_map(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
# check if don't have any quantized module on the cpu
snake_case_ = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
snake_case_ = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
"""
Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit
the quantized model. If you want to dispatch the model on the CPU or the disk while keeping
these modules in `torch_dtype`, you need to pass a custom `device_map` to
`load_and_quantize_model`. Check
https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk
for more details.
""" )
else:
logger.info(
"""Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit""" )
del device_map_without_some_modules
return device_map
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> Tuple:
if modules_to_not_convert is None:
snake_case_ = []
snake_case_ , snake_case_ = _replace_with_bnb_layers(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if not has_been_replaced:
logger.warning(
"""You are loading your model in 8bit or 4bit but no linear modules were found in your model."""
""" this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers."""
""" Please double check your model architecture, or submit an issue on github if you think this is"""
""" a bug.""" )
return model
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , ) -> List[Any]:
snake_case_ = False
for name, module in model.named_children():
if current_key_name is None:
snake_case_ = []
current_key_name.append(_SCREAMING_SNAKE_CASE )
if isinstance(_SCREAMING_SNAKE_CASE , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
snake_case_ = """.""".join(_SCREAMING_SNAKE_CASE )
snake_case_ = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
snake_case_ = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
snake_case_ = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=_SCREAMING_SNAKE_CASE , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
snake_case_ = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError("""load_in_8bit and load_in_4bit can't be both False""" )
snake_case_ = module.weight.data
if module.bias is not None:
snake_case_ = module.bias.data
bnb_module.requires_grad_(_SCREAMING_SNAKE_CASE )
setattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case_ = True
if len(list(module.children() ) ) > 0:
snake_case_ , snake_case_ = _replace_with_bnb_layers(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case_ = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def _a ( _SCREAMING_SNAKE_CASE ) -> Any:
# Create a copy of the model
with init_empty_weights():
snake_case_ = deepcopy(_SCREAMING_SNAKE_CASE ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
snake_case_ = find_tied_parameters(_SCREAMING_SNAKE_CASE )
# For compatibility with Accelerate < 0.18
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
snake_case_ = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
snake_case_ = sum(_SCREAMING_SNAKE_CASE , [] )
snake_case_ = len(_SCREAMING_SNAKE_CASE ) > 0
# Check if it is a base model
snake_case_ = False
if hasattr(_SCREAMING_SNAKE_CASE , """base_model_prefix""" ):
snake_case_ = not hasattr(_SCREAMING_SNAKE_CASE , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
snake_case_ = list(model.named_children() )
snake_case_ = [list_modules[-1][0]]
# add last module together with tied weights
snake_case_ = set(_SCREAMING_SNAKE_CASE ) - set(_SCREAMING_SNAKE_CASE )
snake_case_ = list(set(_SCREAMING_SNAKE_CASE ) ) + list(_SCREAMING_SNAKE_CASE )
# remove ".weight" from the keys
snake_case_ = [""".weight""", """.bias"""]
snake_case_ = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
snake_case_ = name.replace(_SCREAMING_SNAKE_CASE , """""" )
filtered_module_names.append(_SCREAMING_SNAKE_CASE )
return filtered_module_names
def _a ( _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
for m in model.modules():
if isinstance(_SCREAMING_SNAKE_CASE , bnb.nn.Linearabit ):
return True
return False
def _a ( _SCREAMING_SNAKE_CASE ) -> Optional[int]:
return next(parameter.parameters() ).device
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
# if it is not quantized, we quantize and offload the quantized weights and the SCB stats
if fpaa_statistics is None:
set_module_tensor_to_device(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 0 , dtype=_SCREAMING_SNAKE_CASE , value=_SCREAMING_SNAKE_CASE )
snake_case_ = param_name
snake_case_ = model
if "." in tensor_name:
snake_case_ = tensor_name.split(""".""" )
for split in splits[:-1]:
snake_case_ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if new_module is None:
raise ValueError(f"""{module} has no attribute {split}.""" )
snake_case_ = new_module
snake_case_ = splits[-1]
# offload weights
snake_case_ = False
offload_weight(module._parameters[tensor_name] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE )
if hasattr(module._parameters[tensor_name] , """SCB""" ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace("""weight""" , """SCB""" ) , _SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE , )
else:
offload_weight(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE )
offload_weight(_SCREAMING_SNAKE_CASE , param_name.replace("""weight""" , """SCB""" ) , _SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE )
set_module_tensor_to_device(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , """meta""" , dtype=_SCREAMING_SNAKE_CASE , value=torch.empty(*param.size() ) )
| 347 | 1 |
"""simple docstring"""
def _a ( _SCREAMING_SNAKE_CASE ) -> bool:
return number & 1 == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 347 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Tuple = {
'microsoft/beit-base-patch16-224-pt22k': (
'https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json'
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class __A (snake_case__):
'''simple docstring'''
__lowercase: Optional[int] = """beit"""
def __init__( self : List[str] , UpperCAmelCase_ : List[Any]=8_192 , UpperCAmelCase_ : Dict=768 , UpperCAmelCase_ : int=12 , UpperCAmelCase_ : Tuple=12 , UpperCAmelCase_ : List[Any]=3_072 , UpperCAmelCase_ : Tuple="gelu" , UpperCAmelCase_ : Dict=0.0 , UpperCAmelCase_ : List[str]=0.0 , UpperCAmelCase_ : Any=0.02 , UpperCAmelCase_ : Optional[Any]=1E-12 , UpperCAmelCase_ : int=224 , UpperCAmelCase_ : Tuple=16 , UpperCAmelCase_ : List[str]=3 , UpperCAmelCase_ : int=False , UpperCAmelCase_ : List[str]=False , UpperCAmelCase_ : Tuple=False , UpperCAmelCase_ : int=False , UpperCAmelCase_ : List[Any]=0.1 , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : Dict=[3, 5, 7, 11] , UpperCAmelCase_ : Tuple=[1, 2, 3, 6] , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : List[Any]=0.4 , UpperCAmelCase_ : Optional[Any]=256 , UpperCAmelCase_ : Optional[Any]=1 , UpperCAmelCase_ : int=False , UpperCAmelCase_ : Tuple=255 , **UpperCAmelCase_ : List[str] , ) ->Optional[Any]:
"""simple docstring"""
super().__init__(**UpperCAmelCase_ )
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = initializer_range
snake_case_ = layer_norm_eps
snake_case_ = image_size
snake_case_ = patch_size
snake_case_ = num_channels
snake_case_ = use_mask_token
snake_case_ = use_absolute_position_embeddings
snake_case_ = use_relative_position_bias
snake_case_ = use_shared_relative_position_bias
snake_case_ = layer_scale_init_value
snake_case_ = drop_path_rate
snake_case_ = use_mean_pooling
# decode head attributes (semantic segmentation)
snake_case_ = out_indices
snake_case_ = pool_scales
# auxiliary head attributes (semantic segmentation)
snake_case_ = use_auxiliary_head
snake_case_ = auxiliary_loss_weight
snake_case_ = auxiliary_channels
snake_case_ = auxiliary_num_convs
snake_case_ = auxiliary_concat_input
snake_case_ = semantic_loss_ignore_index
class __A (snake_case__):
'''simple docstring'''
__lowercase: List[Any] = version.parse("""1.11""")
@property
def lowerCAmelCase ( self : Dict ) ->Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCAmelCase ( self : Any ) ->float:
"""simple docstring"""
return 1E-4
| 347 | 1 |
"""simple docstring"""
import argparse
from collections import defaultdict
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]:
snake_case_ = f"""{file}_{class_name}_{test_name}"""
done_test[_id] += 1
with open(_SCREAMING_SNAKE_CASE , """r""" ) as f:
snake_case_ = f.readlines()
snake_case_ = f"""class {class_name}("""
snake_case_ = f"""{4 * " "}def {test_name}("""
snake_case_ = f"""{8 * " "}{correct_line.split()[0]}"""
snake_case_ = f"""{16 * " "}{correct_line.split()[0]}"""
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = 0
snake_case_ = 0
snake_case_ = []
for line in lines:
if line.startswith(_SCREAMING_SNAKE_CASE ):
snake_case_ = True
elif in_class and line.startswith(_SCREAMING_SNAKE_CASE ):
snake_case_ = True
elif in_class and in_func and (line.startswith(_SCREAMING_SNAKE_CASE ) or line.startswith(_SCREAMING_SNAKE_CASE )):
snake_case_ = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
snake_case_ = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
snake_case_ = True
if in_class and in_func and in_line and insert_line:
new_lines.append(f"""{spaces * " "}{correct_line}""" )
snake_case_ = snake_case_ = snake_case_ = snake_case_ = False
else:
new_lines.append(_SCREAMING_SNAKE_CASE )
with open(_SCREAMING_SNAKE_CASE , """w""" ) as f:
for line in new_lines:
f.write(_SCREAMING_SNAKE_CASE )
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> Any:
if fail is not None:
with open(_SCREAMING_SNAKE_CASE , """r""" ) as f:
snake_case_ = {l.strip() for l in f.readlines()}
else:
snake_case_ = None
with open(_SCREAMING_SNAKE_CASE , """r""" ) as f:
snake_case_ = f.readlines()
snake_case_ = defaultdict(_SCREAMING_SNAKE_CASE )
for line in correct_lines:
snake_case_ , snake_case_ , snake_case_ , snake_case_ = line.split(""";""" )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser()
parser.add_argument('--correct_filename', help='filename of tests with expected result')
parser.add_argument('--fail_filename', help='filename of test failures', type=str, default=None)
__SCREAMING_SNAKE_CASE : Tuple = parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 347 |
"""simple docstring"""
import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
__SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : List[Any] = {'vocab_file': 'spiece.model'}
__SCREAMING_SNAKE_CASE : int = {
'vocab_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/spiece.model',
't5-base': 'https://huggingface.co/t5-base/resolve/main/spiece.model',
't5-large': 'https://huggingface.co/t5-large/resolve/main/spiece.model',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/spiece.model',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/spiece.model',
}
}
# TODO(PVP) - this should be removed in Transformers v5
__SCREAMING_SNAKE_CASE : Dict = {
't5-small': 512,
't5-base': 512,
't5-large': 512,
't5-3b': 512,
't5-11b': 512,
}
__SCREAMING_SNAKE_CASE : Optional[int] = '▁'
class __A (snake_case__):
'''simple docstring'''
__lowercase: Optional[int] = VOCAB_FILES_NAMES
__lowercase: Any = PRETRAINED_VOCAB_FILES_MAP
__lowercase: Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase: List[str] = ["""input_ids""", """attention_mask"""]
def __init__( self : Optional[int] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any]="</s>" , UpperCAmelCase_ : Optional[Any]="<unk>" , UpperCAmelCase_ : Any="<pad>" , UpperCAmelCase_ : Tuple=100 , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : Optional[Dict[str, Any]] = None , UpperCAmelCase_ : Optional[int]=True , **UpperCAmelCase_ : Dict , ) ->None:
"""simple docstring"""
if extra_ids > 0 and additional_special_tokens is None:
snake_case_ = [F"""<extra_id_{i}>""" for i in range(UpperCAmelCase_ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
snake_case_ = len(set(filter(lambda UpperCAmelCase_ : bool("""extra_id""" in str(UpperCAmelCase_ ) ) , UpperCAmelCase_ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F"""Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"""
""" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"""
""" tokens""" )
if legacy:
logger.warning_once(
F"""You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to"""
""" read the related pull request available at https://github.com/huggingface/transformers/pull/24565""" )
snake_case_ = legacy
snake_case_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , extra_ids=UpperCAmelCase_ , additional_special_tokens=UpperCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , legacy=UpperCAmelCase_ , **UpperCAmelCase_ , )
snake_case_ = vocab_file
snake_case_ = extra_ids
snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCAmelCase_ )
@staticmethod
def lowerCAmelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[Any] ) ->Union[str, Any]:
"""simple docstring"""
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
snake_case_ = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"""This tokenizer was incorrectly instantiated with a model max length of"""
F""" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this"""
""" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"""
""" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"""
F""" {pretrained_model_name_or_path} automatically truncating your input to"""
F""" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences"""
F""" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with"""
""" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"""
""" instantiate this tokenizer with `model_max_length` set to your preferred value.""" , UpperCAmelCase_ , )
return max_model_length
@property
def lowerCAmelCase ( self : Optional[Any] ) ->Optional[Any]:
"""simple docstring"""
return self.sp_model.get_piece_size() + self._extra_ids
def lowerCAmelCase ( self : Any ) ->Optional[int]:
"""simple docstring"""
snake_case_ = {self.convert_ids_to_tokens(UpperCAmelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCAmelCase ( self : List[str] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None , UpperCAmelCase_ : bool = False ) ->List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase_ , token_ids_a=UpperCAmelCase_ , already_has_special_tokens=UpperCAmelCase_ )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(UpperCAmelCase_ )) + [1]
return ([0] * len(UpperCAmelCase_ )) + [1] + ([0] * len(UpperCAmelCase_ )) + [1]
def lowerCAmelCase ( self : Any ) ->List[str]:
"""simple docstring"""
return list(
set(filter(lambda UpperCAmelCase_ : bool(re.search(R"""<extra_id_\d+>""" , UpperCAmelCase_ ) ) is not None , self.additional_special_tokens ) ) )
def lowerCAmelCase ( self : Dict ) ->str:
"""simple docstring"""
return [self._convert_token_to_id(UpperCAmelCase_ ) for token in self.get_sentinel_tokens()]
def lowerCAmelCase ( self : Dict , UpperCAmelCase_ : List[int] ) ->List[int]:
"""simple docstring"""
if len(UpperCAmelCase_ ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
F"""This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated"""
""" eos tokens being added.""" )
return token_ids
else:
return token_ids + [self.eos_token_id]
def lowerCAmelCase ( self : str , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None ) ->List[int]:
"""simple docstring"""
snake_case_ = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def lowerCAmelCase ( self : Optional[int] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None ) ->List[int]:
"""simple docstring"""
snake_case_ = self._add_eos_if_not_present(UpperCAmelCase_ )
if token_ids_a is None:
return token_ids_a
else:
snake_case_ = self._add_eos_if_not_present(UpperCAmelCase_ )
return token_ids_a + token_ids_a
def __getstate__( self : Optional[Any] ) ->Tuple:
"""simple docstring"""
snake_case_ = self.__dict__.copy()
snake_case_ = None
return state
def __setstate__( self : Optional[Any] , UpperCAmelCase_ : List[Any] ) ->List[Any]:
"""simple docstring"""
snake_case_ = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
snake_case_ = {}
snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCAmelCase ( self : int , UpperCAmelCase_ : "TextInput" , **UpperCAmelCase_ : Tuple ) ->List[str]:
"""simple docstring"""
if not self.legacy:
snake_case_ = SPIECE_UNDERLINE + text.replace(UpperCAmelCase_ , """ """ )
return super().tokenize(UpperCAmelCase_ , **UpperCAmelCase_ )
def lowerCAmelCase ( self : Dict , UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Any ) ->Tuple:
"""simple docstring"""
if not self.legacy:
snake_case_ = text.startswith(UpperCAmelCase_ )
if is_first:
snake_case_ = text[1:]
snake_case_ = self.sp_model.encode(UpperCAmelCase_ , out_type=UpperCAmelCase_ )
if not self.legacy and not is_first and not text.startswith(""" """ ) and tokens[0].startswith(UpperCAmelCase_ ):
snake_case_ = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:]
return tokens
def lowerCAmelCase ( self : List[str] , UpperCAmelCase_ : List[Any] ) ->Tuple:
"""simple docstring"""
if token.startswith("""<extra_id_""" ):
snake_case_ = re.match(R"""<extra_id_(\d+)>""" , UpperCAmelCase_ )
snake_case_ = int(match.group(1 ) )
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(UpperCAmelCase_ )
def lowerCAmelCase ( self : List[str] , UpperCAmelCase_ : Optional[Any] ) ->List[Any]:
"""simple docstring"""
if index < self.sp_model.get_piece_size():
snake_case_ = self.sp_model.IdToPiece(UpperCAmelCase_ )
else:
snake_case_ = F"""<extra_id_{self.vocab_size - 1 - index}>"""
return token
def lowerCAmelCase ( self : List[Any] , UpperCAmelCase_ : List[str] ) ->Optional[Any]:
"""simple docstring"""
snake_case_ = []
snake_case_ = """"""
snake_case_ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(UpperCAmelCase_ ) + token
snake_case_ = True
snake_case_ = []
else:
current_sub_tokens.append(UpperCAmelCase_ )
snake_case_ = False
out_string += self.sp_model.decode(UpperCAmelCase_ )
return out_string.strip()
def lowerCAmelCase ( self : str , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None ) ->Tuple[str]:
"""simple docstring"""
if not os.path.isdir(UpperCAmelCase_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case_ = os.path.join(
UpperCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCAmelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCAmelCase_ , """wb""" ) as fi:
snake_case_ = self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase_ )
return (out_vocab_file,)
| 347 | 1 |
"""simple docstring"""
import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
__SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : List[Any] = {'vocab_file': 'spiece.model'}
__SCREAMING_SNAKE_CASE : int = {
'vocab_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/spiece.model',
't5-base': 'https://huggingface.co/t5-base/resolve/main/spiece.model',
't5-large': 'https://huggingface.co/t5-large/resolve/main/spiece.model',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/spiece.model',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/spiece.model',
}
}
# TODO(PVP) - this should be removed in Transformers v5
__SCREAMING_SNAKE_CASE : Dict = {
't5-small': 512,
't5-base': 512,
't5-large': 512,
't5-3b': 512,
't5-11b': 512,
}
__SCREAMING_SNAKE_CASE : Optional[int] = '▁'
class __A (snake_case__):
'''simple docstring'''
__lowercase: Optional[int] = VOCAB_FILES_NAMES
__lowercase: Any = PRETRAINED_VOCAB_FILES_MAP
__lowercase: Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase: List[str] = ["""input_ids""", """attention_mask"""]
def __init__( self : Optional[int] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any]="</s>" , UpperCAmelCase_ : Optional[Any]="<unk>" , UpperCAmelCase_ : Any="<pad>" , UpperCAmelCase_ : Tuple=100 , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : Optional[Dict[str, Any]] = None , UpperCAmelCase_ : Optional[int]=True , **UpperCAmelCase_ : Dict , ) ->None:
"""simple docstring"""
if extra_ids > 0 and additional_special_tokens is None:
snake_case_ = [F"""<extra_id_{i}>""" for i in range(UpperCAmelCase_ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
snake_case_ = len(set(filter(lambda UpperCAmelCase_ : bool("""extra_id""" in str(UpperCAmelCase_ ) ) , UpperCAmelCase_ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F"""Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"""
""" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"""
""" tokens""" )
if legacy:
logger.warning_once(
F"""You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to"""
""" read the related pull request available at https://github.com/huggingface/transformers/pull/24565""" )
snake_case_ = legacy
snake_case_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , extra_ids=UpperCAmelCase_ , additional_special_tokens=UpperCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , legacy=UpperCAmelCase_ , **UpperCAmelCase_ , )
snake_case_ = vocab_file
snake_case_ = extra_ids
snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCAmelCase_ )
@staticmethod
def lowerCAmelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[Any] ) ->Union[str, Any]:
"""simple docstring"""
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
snake_case_ = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"""This tokenizer was incorrectly instantiated with a model max length of"""
F""" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this"""
""" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"""
""" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"""
F""" {pretrained_model_name_or_path} automatically truncating your input to"""
F""" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences"""
F""" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with"""
""" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"""
""" instantiate this tokenizer with `model_max_length` set to your preferred value.""" , UpperCAmelCase_ , )
return max_model_length
@property
def lowerCAmelCase ( self : Optional[Any] ) ->Optional[Any]:
"""simple docstring"""
return self.sp_model.get_piece_size() + self._extra_ids
def lowerCAmelCase ( self : Any ) ->Optional[int]:
"""simple docstring"""
snake_case_ = {self.convert_ids_to_tokens(UpperCAmelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCAmelCase ( self : List[str] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None , UpperCAmelCase_ : bool = False ) ->List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase_ , token_ids_a=UpperCAmelCase_ , already_has_special_tokens=UpperCAmelCase_ )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(UpperCAmelCase_ )) + [1]
return ([0] * len(UpperCAmelCase_ )) + [1] + ([0] * len(UpperCAmelCase_ )) + [1]
def lowerCAmelCase ( self : Any ) ->List[str]:
"""simple docstring"""
return list(
set(filter(lambda UpperCAmelCase_ : bool(re.search(R"""<extra_id_\d+>""" , UpperCAmelCase_ ) ) is not None , self.additional_special_tokens ) ) )
def lowerCAmelCase ( self : Dict ) ->str:
"""simple docstring"""
return [self._convert_token_to_id(UpperCAmelCase_ ) for token in self.get_sentinel_tokens()]
def lowerCAmelCase ( self : Dict , UpperCAmelCase_ : List[int] ) ->List[int]:
"""simple docstring"""
if len(UpperCAmelCase_ ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
F"""This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated"""
""" eos tokens being added.""" )
return token_ids
else:
return token_ids + [self.eos_token_id]
def lowerCAmelCase ( self : str , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None ) ->List[int]:
"""simple docstring"""
snake_case_ = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def lowerCAmelCase ( self : Optional[int] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None ) ->List[int]:
"""simple docstring"""
snake_case_ = self._add_eos_if_not_present(UpperCAmelCase_ )
if token_ids_a is None:
return token_ids_a
else:
snake_case_ = self._add_eos_if_not_present(UpperCAmelCase_ )
return token_ids_a + token_ids_a
def __getstate__( self : Optional[Any] ) ->Tuple:
"""simple docstring"""
snake_case_ = self.__dict__.copy()
snake_case_ = None
return state
def __setstate__( self : Optional[Any] , UpperCAmelCase_ : List[Any] ) ->List[Any]:
"""simple docstring"""
snake_case_ = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
snake_case_ = {}
snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCAmelCase ( self : int , UpperCAmelCase_ : "TextInput" , **UpperCAmelCase_ : Tuple ) ->List[str]:
"""simple docstring"""
if not self.legacy:
snake_case_ = SPIECE_UNDERLINE + text.replace(UpperCAmelCase_ , """ """ )
return super().tokenize(UpperCAmelCase_ , **UpperCAmelCase_ )
def lowerCAmelCase ( self : Dict , UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Any ) ->Tuple:
"""simple docstring"""
if not self.legacy:
snake_case_ = text.startswith(UpperCAmelCase_ )
if is_first:
snake_case_ = text[1:]
snake_case_ = self.sp_model.encode(UpperCAmelCase_ , out_type=UpperCAmelCase_ )
if not self.legacy and not is_first and not text.startswith(""" """ ) and tokens[0].startswith(UpperCAmelCase_ ):
snake_case_ = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:]
return tokens
def lowerCAmelCase ( self : List[str] , UpperCAmelCase_ : List[Any] ) ->Tuple:
"""simple docstring"""
if token.startswith("""<extra_id_""" ):
snake_case_ = re.match(R"""<extra_id_(\d+)>""" , UpperCAmelCase_ )
snake_case_ = int(match.group(1 ) )
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(UpperCAmelCase_ )
def lowerCAmelCase ( self : List[str] , UpperCAmelCase_ : Optional[Any] ) ->List[Any]:
"""simple docstring"""
if index < self.sp_model.get_piece_size():
snake_case_ = self.sp_model.IdToPiece(UpperCAmelCase_ )
else:
snake_case_ = F"""<extra_id_{self.vocab_size - 1 - index}>"""
return token
def lowerCAmelCase ( self : List[Any] , UpperCAmelCase_ : List[str] ) ->Optional[Any]:
"""simple docstring"""
snake_case_ = []
snake_case_ = """"""
snake_case_ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(UpperCAmelCase_ ) + token
snake_case_ = True
snake_case_ = []
else:
current_sub_tokens.append(UpperCAmelCase_ )
snake_case_ = False
out_string += self.sp_model.decode(UpperCAmelCase_ )
return out_string.strip()
def lowerCAmelCase ( self : str , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None ) ->Tuple[str]:
"""simple docstring"""
if not os.path.isdir(UpperCAmelCase_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case_ = os.path.join(
UpperCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCAmelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCAmelCase_ , """wb""" ) as fi:
snake_case_ = self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase_ )
return (out_vocab_file,)
| 347 |
"""simple docstring"""
def _a ( _SCREAMING_SNAKE_CASE = 1_000_000 ) -> int:
snake_case_ = [i - 1 for i in range(limit + 1 )]
for i in range(2 , limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i , limit + 1 , _SCREAMING_SNAKE_CASE ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 347 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__SCREAMING_SNAKE_CASE : str = {
'configuration_swiftformer': [
'SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SwiftFormerConfig',
'SwiftFormerOnnxConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Optional[int] = [
'SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'SwiftFormerForImageClassification',
'SwiftFormerModel',
'SwiftFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 347 |
"""simple docstring"""
from __future__ import annotations
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]:
print(f"""Vertex\tShortest Distance from vertex {src}""" )
for i, d in enumerate(_SCREAMING_SNAKE_CASE ):
print(f"""{i}\t\t{d}""" )
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
for j in range(_SCREAMING_SNAKE_CASE ):
snake_case_ , snake_case_ , snake_case_ = (graph[j][k] for k in ["""src""", """dst""", """weight"""])
if distance[u] != float("""inf""" ) and distance[u] + w < distance[v]:
return True
return False
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> list[float]:
snake_case_ = [float("""inf""" )] * vertex_count
snake_case_ = 0.0
for _ in range(vertex_count - 1 ):
for j in range(_SCREAMING_SNAKE_CASE ):
snake_case_ , snake_case_ , snake_case_ = (graph[j][k] for k in ["""src""", """dst""", """weight"""])
if distance[u] != float("""inf""" ) and distance[u] + w < distance[v]:
snake_case_ = distance[u] + w
snake_case_ = check_negative_cycle(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if negative_cycle_exists:
raise Exception("""Negative cycle found""" )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
__SCREAMING_SNAKE_CASE : int = int(input('Enter number of vertices: ').strip())
__SCREAMING_SNAKE_CASE : Dict = int(input('Enter number of edges: ').strip())
__SCREAMING_SNAKE_CASE : list[dict[str, int]] = [{} for _ in range(E)]
for i in range(E):
print('Edge ', i + 1)
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = (
int(x)
for x in input('Enter source, destination, weight: ').strip().split(' ')
)
__SCREAMING_SNAKE_CASE : Union[str, Any] = {'src': src, 'dst': dest, 'weight': weight}
__SCREAMING_SNAKE_CASE : Union[str, Any] = int(input('\nEnter shortest path source:').strip())
__SCREAMING_SNAKE_CASE : str = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 347 | 1 |
"""simple docstring"""
from __future__ import annotations
import queue
class __A :
'''simple docstring'''
def __init__( self : Tuple , UpperCAmelCase_ : Any ) ->Optional[int]:
"""simple docstring"""
snake_case_ = data
snake_case_ = None
snake_case_ = None
def _a ( ) -> TreeNode:
print("""\n********Press N to stop entering at any point of time********\n""" )
snake_case_ = input("""Enter the value of the root node: """ ).strip().lower()
snake_case_ = queue.Queue()
snake_case_ = TreeNode(int(_SCREAMING_SNAKE_CASE ) )
q.put(_SCREAMING_SNAKE_CASE )
while not q.empty():
snake_case_ = q.get()
snake_case_ = f"""Enter the left node of {node_found.data}: """
snake_case_ = input(_SCREAMING_SNAKE_CASE ).strip().lower() or """n"""
if check == "n":
return tree_node
snake_case_ = TreeNode(int(_SCREAMING_SNAKE_CASE ) )
snake_case_ = left_node
q.put(_SCREAMING_SNAKE_CASE )
snake_case_ = f"""Enter the right node of {node_found.data}: """
snake_case_ = input(_SCREAMING_SNAKE_CASE ).strip().lower() or """n"""
if check == "n":
return tree_node
snake_case_ = TreeNode(int(_SCREAMING_SNAKE_CASE ) )
snake_case_ = right_node
q.put(_SCREAMING_SNAKE_CASE )
raise
def _a ( _SCREAMING_SNAKE_CASE ) -> None:
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or not node:
return
print(node.data , end=""",""" )
pre_order(node.left )
pre_order(node.right )
def _a ( _SCREAMING_SNAKE_CASE ) -> None:
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or not node:
return
in_order(node.left )
print(node.data , end=""",""" )
in_order(node.right )
def _a ( _SCREAMING_SNAKE_CASE ) -> None:
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end=""",""" )
def _a ( _SCREAMING_SNAKE_CASE ) -> None:
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or not node:
return
snake_case_ = queue.Queue()
q.put(_SCREAMING_SNAKE_CASE )
while not q.empty():
snake_case_ = q.get()
print(node_dequeued.data , end=""",""" )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def _a ( _SCREAMING_SNAKE_CASE ) -> None:
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or not node:
return
snake_case_ = queue.Queue()
q.put(_SCREAMING_SNAKE_CASE )
while not q.empty():
snake_case_ = []
while not q.empty():
snake_case_ = q.get()
print(node_dequeued.data , end=""",""" )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(_SCREAMING_SNAKE_CASE )
def _a ( _SCREAMING_SNAKE_CASE ) -> None:
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or not node:
return
snake_case_ = []
snake_case_ = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end=""",""" )
stack.append(_SCREAMING_SNAKE_CASE )
snake_case_ = n.left
# end of while means current node doesn't have left child
snake_case_ = stack.pop()
# start to traverse its right child
snake_case_ = n.right
def _a ( _SCREAMING_SNAKE_CASE ) -> None:
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or not node:
return
snake_case_ = []
snake_case_ = node
while n or stack:
while n:
stack.append(_SCREAMING_SNAKE_CASE )
snake_case_ = n.left
snake_case_ = stack.pop()
print(n.data , end=""",""" )
snake_case_ = n.right
def _a ( _SCREAMING_SNAKE_CASE ) -> None:
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or not node:
return
snake_case_ , snake_case_ = [], []
snake_case_ = node
stacka.append(_SCREAMING_SNAKE_CASE )
while stacka: # to find the reversed order of post order, store it in stack2
snake_case_ = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(_SCREAMING_SNAKE_CASE )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end=""",""" )
def _a ( _SCREAMING_SNAKE_CASE = "" , _SCREAMING_SNAKE_CASE=50 , _SCREAMING_SNAKE_CASE="*" ) -> str:
if not s:
return "\n" + width * char
snake_case_ , snake_case_ = divmod(width - len(_SCREAMING_SNAKE_CASE ) - 2 , 2 )
return f"""{left * char} {s} {(left + extra) * char}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt('Binary Tree Traversals'))
__SCREAMING_SNAKE_CASE : TreeNode = build_tree()
print(prompt('Pre Order Traversal'))
pre_order(node)
print(prompt() + '\n')
print(prompt('In Order Traversal'))
in_order(node)
print(prompt() + '\n')
print(prompt('Post Order Traversal'))
post_order(node)
print(prompt() + '\n')
print(prompt('Level Order Traversal'))
level_order(node)
print(prompt() + '\n')
print(prompt('Actual Level Order Traversal'))
level_order_actual(node)
print('*' * 50 + '\n')
print(prompt('Pre Order Traversal - Iteration Version'))
pre_order_iter(node)
print(prompt() + '\n')
print(prompt('In Order Traversal - Iteration Version'))
in_order_iter(node)
print(prompt() + '\n')
print(prompt('Post Order Traversal - Iteration Version'))
post_order_iter(node)
print(prompt())
| 347 |
"""simple docstring"""
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
__SCREAMING_SNAKE_CASE : List[str] = logging.getLogger(__name__)
__SCREAMING_SNAKE_CASE : str = tf.data.AUTOTUNE
def _a ( ) -> List[str]:
snake_case_ = argparse.ArgumentParser(description="""Train a masked language model on TPU.""" )
parser.add_argument(
"""--pretrained_model_config""" , type=_SCREAMING_SNAKE_CASE , default="""roberta-base""" , help="""The model config to use. Note that we don't copy the model's weights, only the config!""" , )
parser.add_argument(
"""--tokenizer""" , type=_SCREAMING_SNAKE_CASE , default="""unigram-tokenizer-wikitext""" , help="""The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model's vocab size.""" , )
parser.add_argument(
"""--per_replica_batch_size""" , type=_SCREAMING_SNAKE_CASE , default=8 , help="""Batch size per TPU core.""" , )
parser.add_argument(
"""--no_tpu""" , action="""store_true""" , help="""If set, run on CPU and don't try to initialize a TPU. Useful for debugging on non-TPU instances.""" , )
parser.add_argument(
"""--tpu_name""" , type=_SCREAMING_SNAKE_CASE , help="""Name of TPU resource to initialize. Should be blank on Colab, and 'local' on TPU VMs.""" , default="""local""" , )
parser.add_argument(
"""--tpu_zone""" , type=_SCREAMING_SNAKE_CASE , help="""Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.""" , )
parser.add_argument(
"""--gcp_project""" , type=_SCREAMING_SNAKE_CASE , help="""Google cloud project name. Only used for non-Colab TPU nodes.""" )
parser.add_argument(
"""--bfloat16""" , action="""store_true""" , help="""Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.""" , )
parser.add_argument(
"""--train_dataset""" , type=_SCREAMING_SNAKE_CASE , help="""Path to training dataset to load. If the path begins with `gs://`"""
""" then the dataset will be loaded from a Google Cloud Storage bucket.""" , )
parser.add_argument(
"""--shuffle_buffer_size""" , type=_SCREAMING_SNAKE_CASE , default=2**18 , help="""Size of the shuffle buffer (in samples)""" , )
parser.add_argument(
"""--eval_dataset""" , type=_SCREAMING_SNAKE_CASE , help="""Path to evaluation dataset to load. If the path begins with `gs://`"""
""" then the dataset will be loaded from a Google Cloud Storage bucket.""" , )
parser.add_argument(
"""--num_epochs""" , type=_SCREAMING_SNAKE_CASE , default=1 , help="""Number of epochs to train for.""" , )
parser.add_argument(
"""--learning_rate""" , type=_SCREAMING_SNAKE_CASE , default=1E-4 , help="""Learning rate to use for training.""" , )
parser.add_argument(
"""--weight_decay_rate""" , type=_SCREAMING_SNAKE_CASE , default=1E-3 , help="""Weight decay rate to use for training.""" , )
parser.add_argument(
"""--max_length""" , type=_SCREAMING_SNAKE_CASE , default=512 , help="""Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py""" , )
parser.add_argument(
"""--mlm_probability""" , type=_SCREAMING_SNAKE_CASE , default=0.15 , help="""Fraction of tokens to mask during training.""" , )
parser.add_argument("""--output_dir""" , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , help="""Path to save model checkpoints to.""" )
parser.add_argument("""--hub_model_id""" , type=_SCREAMING_SNAKE_CASE , help="""Model ID to upload to on the Hugging Face Hub.""" )
snake_case_ = parser.parse_args()
return args
def _a ( _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
try:
if args.tpu_name:
snake_case_ = tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name , zone=args.tpu_zone , project=args.gcp_project )
else:
snake_case_ = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
"""Couldn't connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or """
"""--gcp_project. When running on a TPU VM, use --tpu_name local.""" )
tf.config.experimental_connect_to_cluster(_SCREAMING_SNAKE_CASE )
tf.tpu.experimental.initialize_tpu_system(_SCREAMING_SNAKE_CASE )
return tpu
def _a ( _SCREAMING_SNAKE_CASE ) -> List[str]:
snake_case_ = 0
for file in file_list:
snake_case_ = file.split("""/""" )[-1]
snake_case_ = re.search(r"""-\d+-(\d+)\.tfrecord""" , _SCREAMING_SNAKE_CASE ).group(1 )
snake_case_ = int(_SCREAMING_SNAKE_CASE )
num_samples += sample_count
return num_samples
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> Union[str, Any]:
snake_case_ = count_samples(_SCREAMING_SNAKE_CASE )
snake_case_ = tf.data.Dataset.from_tensor_slices(_SCREAMING_SNAKE_CASE )
if shuffle:
snake_case_ = dataset.shuffle(len(_SCREAMING_SNAKE_CASE ) )
snake_case_ = tf.data.TFRecordDataset(_SCREAMING_SNAKE_CASE , num_parallel_reads=_SCREAMING_SNAKE_CASE )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
snake_case_ = dataset.apply(tf.data.experimental.assert_cardinality(_SCREAMING_SNAKE_CASE ) )
snake_case_ = dataset.map(_SCREAMING_SNAKE_CASE , num_parallel_calls=_SCREAMING_SNAKE_CASE )
if shuffle:
assert shuffle_buffer_size is not None
snake_case_ = dataset.shuffle(args.shuffle_buffer_size )
snake_case_ = dataset.batch(_SCREAMING_SNAKE_CASE , drop_remainder=_SCREAMING_SNAKE_CASE )
snake_case_ = dataset.map(_SCREAMING_SNAKE_CASE , num_parallel_calls=_SCREAMING_SNAKE_CASE )
snake_case_ = dataset.prefetch(_SCREAMING_SNAKE_CASE )
return dataset
def _a ( _SCREAMING_SNAKE_CASE ) -> List[Any]:
if not args.no_tpu:
snake_case_ = initialize_tpu(_SCREAMING_SNAKE_CASE )
snake_case_ = tf.distribute.TPUStrategy(_SCREAMING_SNAKE_CASE )
else:
snake_case_ = tf.distribute.OneDeviceStrategy(device="""/gpu:0""" )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy("""mixed_bfloat16""" )
snake_case_ = AutoTokenizer.from_pretrained(args.tokenizer )
snake_case_ = AutoConfig.from_pretrained(args.pretrained_model_config )
snake_case_ = tokenizer.vocab_size
snake_case_ = tf.io.gfile.glob(os.path.join(args.train_dataset , """*.tfrecord""" ) )
if not training_records:
raise ValueError(f"""No .tfrecord files found in {args.train_dataset}.""" )
snake_case_ = tf.io.gfile.glob(os.path.join(args.eval_dataset , """*.tfrecord""" ) )
if not eval_records:
raise ValueError(f"""No .tfrecord files found in {args.eval_dataset}.""" )
snake_case_ = count_samples(_SCREAMING_SNAKE_CASE )
snake_case_ = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
snake_case_ = steps_per_epoch * args.num_epochs
with strategy.scope():
snake_case_ = TFAutoModelForMaskedLM.from_config(_SCREAMING_SNAKE_CASE )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
snake_case_ , snake_case_ = create_optimizer(
num_train_steps=_SCREAMING_SNAKE_CASE , num_warmup_steps=total_train_steps // 20 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=_SCREAMING_SNAKE_CASE , metrics=["""accuracy"""] )
def decode_fn(_SCREAMING_SNAKE_CASE ):
snake_case_ = {
"""input_ids""": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
"""attention_mask""": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
}
return tf.io.parse_single_example(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
snake_case_ = DataCollatorForLanguageModeling(
tokenizer=_SCREAMING_SNAKE_CASE , mlm_probability=args.mlm_probability , mlm=_SCREAMING_SNAKE_CASE , return_tensors="""tf""" )
def mask_with_collator(_SCREAMING_SNAKE_CASE ):
# TF really needs an isin() function
snake_case_ = (
~tf.cast(batch["""attention_mask"""] , tf.bool )
| (batch["""input_ids"""] == tokenizer.cls_token_id)
| (batch["""input_ids"""] == tokenizer.sep_token_id)
)
snake_case_ , snake_case_ = data_collator.tf_mask_tokens(
batch["""input_ids"""] , vocab_size=len(_SCREAMING_SNAKE_CASE ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=_SCREAMING_SNAKE_CASE , )
return batch
snake_case_ = args.per_replica_batch_size * strategy.num_replicas_in_sync
snake_case_ = prepare_dataset(
_SCREAMING_SNAKE_CASE , decode_fn=_SCREAMING_SNAKE_CASE , mask_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , shuffle=_SCREAMING_SNAKE_CASE , shuffle_buffer_size=args.shuffle_buffer_size , )
snake_case_ = prepare_dataset(
_SCREAMING_SNAKE_CASE , decode_fn=_SCREAMING_SNAKE_CASE , mask_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , shuffle=_SCREAMING_SNAKE_CASE , )
snake_case_ = []
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=_SCREAMING_SNAKE_CASE ) )
model.fit(
_SCREAMING_SNAKE_CASE , validation_data=_SCREAMING_SNAKE_CASE , epochs=args.num_epochs , callbacks=_SCREAMING_SNAKE_CASE , )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Union[str, Any] = parse_args()
main(args)
| 347 | 1 |
"""simple docstring"""
def _a ( _SCREAMING_SNAKE_CASE ) -> bool:
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise ValueError("""Input series is not valid, valid series - [2, 4, 6]""" )
if len(_SCREAMING_SNAKE_CASE ) == 0:
raise ValueError("""Input list must be a non empty list""" )
if len(_SCREAMING_SNAKE_CASE ) == 1:
return True
snake_case_ = series[1] - series[0]
for index in range(len(_SCREAMING_SNAKE_CASE ) - 1 ):
if series[index + 1] - series[index] != common_diff:
return False
return True
def _a ( _SCREAMING_SNAKE_CASE ) -> float:
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise ValueError("""Input series is not valid, valid series - [2, 4, 6]""" )
if len(_SCREAMING_SNAKE_CASE ) == 0:
raise ValueError("""Input list must be a non empty list""" )
snake_case_ = 0
for val in series:
answer += val
return answer / len(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 347 |
"""simple docstring"""
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
if density <= 0:
raise ValueError("""Impossible fluid density""" )
if bulk_modulus <= 0:
raise ValueError("""Impossible bulk modulus""" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 347 | 1 |
"""simple docstring"""
from collections import Counter
from timeit import timeit
def _a ( _SCREAMING_SNAKE_CASE = "" , ) -> bool:
return sum(c % 2 for c in Counter(input_str.replace(""" """ , """""" ).lower() ).values() ) < 2
def _a ( _SCREAMING_SNAKE_CASE = "" ) -> bool:
if len(_SCREAMING_SNAKE_CASE ) == 0:
return True
snake_case_ = input_str.replace(""" """ , """""" ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
snake_case_ = {}
for character in lower_case_input_str:
snake_case_ = character_freq_dict.get(_SCREAMING_SNAKE_CASE , 0 ) + 1
snake_case_ = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def _a ( _SCREAMING_SNAKE_CASE = "" ) -> None:
print("""\nFor string = """ , _SCREAMING_SNAKE_CASE , """:""" )
print(
"""> can_string_be_rearranged_as_palindrome_counter()""" , """\tans =""" , can_string_be_rearranged_as_palindrome_counter(_SCREAMING_SNAKE_CASE ) , """\ttime =""" , timeit(
"""z.can_string_be_rearranged_as_palindrome_counter(z.check_str)""" , setup="""import __main__ as z""" , ) , """seconds""" , )
print(
"""> can_string_be_rearranged_as_palindrome()""" , """\tans =""" , can_string_be_rearranged_as_palindrome(_SCREAMING_SNAKE_CASE ) , """\ttime =""" , timeit(
"""z.can_string_be_rearranged_as_palindrome(z.check_str)""" , setup="""import __main__ as z""" , ) , """seconds""" , )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Tuple = input(
'Enter string to determine if it can be rearranged as a palindrome or not: '
).strip()
benchmark(check_str)
__SCREAMING_SNAKE_CASE : Dict = can_string_be_rearranged_as_palindrome_counter(check_str)
print(f"""{check_str} can {'' if status else 'not '}be rearranged as a palindrome""")
| 347 |
"""simple docstring"""
def _a ( _SCREAMING_SNAKE_CASE ) -> bool:
if num < 0:
return False
snake_case_ = num
snake_case_ = 0
while num > 0:
snake_case_ = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 347 | 1 |
"""simple docstring"""
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
__SCREAMING_SNAKE_CASE : List[Any] = datasets.utils.logging.get_logger(__name__)
@dataclass
class __A (datasets.BuilderConfig):
'''simple docstring'''
__lowercase: Optional[datasets.Features] = None
__lowercase: str = "utf-8"
__lowercase: Optional[str] = None
__lowercase: Optional[str] = None
__lowercase: bool = True # deprecated
__lowercase: Optional[int] = None # deprecated
__lowercase: int = 10 << 20 # 10MB
__lowercase: Optional[bool] = None
class __A (datasets.ArrowBasedBuilder):
'''simple docstring'''
__lowercase: Dict = JsonConfig
def lowerCAmelCase ( self : str ) ->Any:
"""simple docstring"""
if self.config.block_size is not None:
logger.warning("""The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead""" )
snake_case_ = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
"""The JSON loader parameter `use_threads` is deprecated and doesn't have any effect anymore.""" )
if self.config.newlines_in_values is not None:
raise ValueError("""The JSON loader parameter `newlines_in_values` is no longer supported""" )
return datasets.DatasetInfo(features=self.config.features )
def lowerCAmelCase ( self : List[str] , UpperCAmelCase_ : int ) ->Tuple:
"""simple docstring"""
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
snake_case_ = dl_manager.download_and_extract(self.config.data_files )
if isinstance(UpperCAmelCase_ , (str, list, tuple) ):
snake_case_ = data_files
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
snake_case_ = [files]
snake_case_ = [dl_manager.iter_files(UpperCAmelCase_ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
snake_case_ = []
for split_name, files in data_files.items():
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
snake_case_ = [files]
snake_case_ = [dl_manager.iter_files(UpperCAmelCase_ ) for file in files]
splits.append(datasets.SplitGenerator(name=UpperCAmelCase_ , gen_kwargs={"""files""": files} ) )
return splits
def lowerCAmelCase ( self : List[str] , UpperCAmelCase_ : pa.Table ) ->pa.Table:
"""simple docstring"""
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
snake_case_ = self.config.features.arrow_schema.field(UpperCAmelCase_ ).type
snake_case_ = pa_table.append_column(UpperCAmelCase_ , pa.array([None] * len(UpperCAmelCase_ ) , type=UpperCAmelCase_ ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
snake_case_ = table_cast(UpperCAmelCase_ , self.config.features.arrow_schema )
return pa_table
def lowerCAmelCase ( self : List[Any] , UpperCAmelCase_ : Any ) ->str:
"""simple docstring"""
for file_idx, file in enumerate(itertools.chain.from_iterable(UpperCAmelCase_ ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(UpperCAmelCase_ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
snake_case_ = json.load(UpperCAmelCase_ )
# We keep only the field we are interested in
snake_case_ = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(UpperCAmelCase_ , (list, tuple) ):
snake_case_ = set().union(*[row.keys() for row in dataset] )
snake_case_ = {col: [row.get(UpperCAmelCase_ ) for row in dataset] for col in keys}
else:
snake_case_ = dataset
snake_case_ = pa.Table.from_pydict(UpperCAmelCase_ )
yield file_idx, self._cast_table(UpperCAmelCase_ )
# If the file has one json object per line
else:
with open(UpperCAmelCase_ , """rb""" ) as f:
snake_case_ = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
snake_case_ = max(self.config.chunksize // 32 , 16 << 10 )
snake_case_ = (
self.config.encoding_errors if self.config.encoding_errors is not None else """strict"""
)
while True:
snake_case_ = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(UpperCAmelCase_ )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
snake_case_ = batch.decode(self.config.encoding , errors=UpperCAmelCase_ ).encode("""utf-8""" )
try:
while True:
try:
snake_case_ = paj.read_json(
io.BytesIO(UpperCAmelCase_ ) , read_options=paj.ReadOptions(block_size=UpperCAmelCase_ ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(UpperCAmelCase_ , pa.ArrowInvalid )
and "straddling" not in str(UpperCAmelCase_ )
or block_size > len(UpperCAmelCase_ )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
F"""Batch of {len(UpperCAmelCase_ )} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.""" )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
UpperCAmelCase_ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
snake_case_ = json.load(UpperCAmelCase_ )
except json.JSONDecodeError:
logger.error(F"""Failed to read file '{file}' with error {type(UpperCAmelCase_ )}: {e}""" )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): # list is the only sequence type supported in JSON
try:
snake_case_ = set().union(*[row.keys() for row in dataset] )
snake_case_ = {col: [row.get(UpperCAmelCase_ ) for row in dataset] for col in keys}
snake_case_ = pa.Table.from_pydict(UpperCAmelCase_ )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(F"""Failed to read file '{file}' with error {type(UpperCAmelCase_ )}: {e}""" )
raise ValueError(F"""Not able to read records in the JSON file at {file}.""" ) from None
yield file_idx, self._cast_table(UpperCAmelCase_ )
break
else:
logger.error(F"""Failed to read file '{file}' with error {type(UpperCAmelCase_ )}: {e}""" )
raise ValueError(
F"""Not able to read records in the JSON file at {file}. """
F"""You should probably indicate the field of the JSON file containing your records. """
F"""This JSON file contain the following fields: {str(list(dataset.keys() ) )}. """
F"""Select the correct one and provide it as `field='XXX'` to the dataset loading method. """ ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(UpperCAmelCase_ )
batch_idx += 1
| 347 |
"""simple docstring"""
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
__SCREAMING_SNAKE_CASE : Tuple = get_tests_dir('fixtures/test_sentencepiece_bpe_char.model')
@require_sentencepiece
@require_tokenizers
class __A (snake_case__ , unittest.TestCase):
'''simple docstring'''
__lowercase: Tuple = SpeechTaTokenizer
__lowercase: int = False
__lowercase: List[str] = True
def lowerCAmelCase ( self : Any ) ->str:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
snake_case_ = SpeechTaTokenizer(UpperCAmelCase_ )
snake_case_ = AddedToken("""<mask>""" , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ )
snake_case_ = mask_token
tokenizer.add_special_tokens({"""mask_token""": mask_token} )
tokenizer.add_tokens(["""<ctc_blank>"""] )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase ( self : Optional[Any] , UpperCAmelCase_ : Optional[Any] ) ->Dict:
"""simple docstring"""
snake_case_ = """this is a test"""
snake_case_ = """this is a test"""
return input_text, output_text
def lowerCAmelCase ( self : str , UpperCAmelCase_ : int , UpperCAmelCase_ : Any=False , UpperCAmelCase_ : Tuple=20 , UpperCAmelCase_ : Dict=5 ) ->List[Any]:
"""simple docstring"""
snake_case_ , snake_case_ = self.get_input_output_texts(UpperCAmelCase_ )
snake_case_ = tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ )
snake_case_ = tokenizer.decode(UpperCAmelCase_ , clean_up_tokenization_spaces=UpperCAmelCase_ )
return text, ids
def lowerCAmelCase ( self : Union[str, Any] ) ->Optional[Any]:
"""simple docstring"""
snake_case_ = """<pad>"""
snake_case_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase_ ) , UpperCAmelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase_ ) , UpperCAmelCase_ )
def lowerCAmelCase ( self : int ) ->str:
"""simple docstring"""
snake_case_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-4] , """œ""" )
self.assertEqual(vocab_keys[-2] , """<mask>""" )
self.assertEqual(vocab_keys[-1] , """<ctc_blank>""" )
self.assertEqual(len(UpperCAmelCase_ ) , 81 )
def lowerCAmelCase ( self : Optional[int] ) ->int:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 79 )
def lowerCAmelCase ( self : Optional[int] ) ->List[Any]:
"""simple docstring"""
snake_case_ = self.get_tokenizers(do_lower_case=UpperCAmelCase_ )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
snake_case_ = tokenizer.vocab_size
snake_case_ = len(UpperCAmelCase_ )
self.assertNotEqual(UpperCAmelCase_ , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
snake_case_ = ["""aaaaa bbbbbb""", """cccccccccdddddddd"""]
snake_case_ = tokenizer.add_tokens(UpperCAmelCase_ )
snake_case_ = tokenizer.vocab_size
snake_case_ = len(UpperCAmelCase_ )
self.assertNotEqual(UpperCAmelCase_ , 0 )
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ )
self.assertEqual(UpperCAmelCase_ , len(UpperCAmelCase_ ) )
self.assertEqual(UpperCAmelCase_ , all_size + len(UpperCAmelCase_ ) )
snake_case_ = tokenizer.encode("""aaaaa bbbbbb low cccccccccdddddddd l""" , add_special_tokens=UpperCAmelCase_ )
self.assertGreaterEqual(len(UpperCAmelCase_ ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
snake_case_ = {"""eos_token""": """>>>>|||<||<<|<<""", """pad_token""": """<<<<<|||>|>>>>|>"""}
snake_case_ = tokenizer.add_special_tokens(UpperCAmelCase_ )
snake_case_ = tokenizer.vocab_size
snake_case_ = len(UpperCAmelCase_ )
self.assertNotEqual(UpperCAmelCase_ , 0 )
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ )
self.assertEqual(UpperCAmelCase_ , len(UpperCAmelCase_ ) )
self.assertEqual(UpperCAmelCase_ , all_size_a + len(UpperCAmelCase_ ) )
snake_case_ = tokenizer.encode(
""">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l""" , add_special_tokens=UpperCAmelCase_ )
self.assertGreaterEqual(len(UpperCAmelCase_ ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
def lowerCAmelCase ( self : Optional[Any] ) ->Tuple:
"""simple docstring"""
pass
def lowerCAmelCase ( self : List[str] ) ->Optional[Any]:
"""simple docstring"""
pass
def lowerCAmelCase ( self : List[str] ) ->List[str]:
"""simple docstring"""
snake_case_ = self.get_tokenizer()
snake_case_ = tokenizer.tokenize("""This is a test""" )
# fmt: off
self.assertListEqual(UpperCAmelCase_ , [SPIECE_UNDERLINE, """T""", """h""", """i""", """s""", SPIECE_UNDERLINE, """i""", """s""", SPIECE_UNDERLINE, """a""", SPIECE_UNDERLINE, """t""", """e""", """s""", """t"""] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , )
snake_case_ = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
UpperCAmelCase_ , [SPIECE_UNDERLINE, """I""", SPIECE_UNDERLINE, """w""", """a""", """s""", SPIECE_UNDERLINE, """b""", """o""", """r""", """n""", SPIECE_UNDERLINE, """i""", """n""", SPIECE_UNDERLINE, """92000""", """,""", SPIECE_UNDERLINE, """a""", """n""", """d""", SPIECE_UNDERLINE, """t""", """h""", """i""", """s""", SPIECE_UNDERLINE, """i""", """s""", SPIECE_UNDERLINE, """f""", """a""", """l""", """s""", """é""", """."""] )
snake_case_ = tokenizer.convert_tokens_to_ids(UpperCAmelCase_ )
# fmt: off
self.assertListEqual(UpperCAmelCase_ , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] )
# fmt: on
snake_case_ = tokenizer.convert_ids_to_tokens(UpperCAmelCase_ )
self.assertListEqual(
UpperCAmelCase_ , [SPIECE_UNDERLINE, """I""", SPIECE_UNDERLINE, """w""", """a""", """s""", SPIECE_UNDERLINE, """b""", """o""", """r""", """n""", SPIECE_UNDERLINE, """i""", """n""", SPIECE_UNDERLINE, """<unk>""", """,""", SPIECE_UNDERLINE, """a""", """n""", """d""", SPIECE_UNDERLINE, """t""", """h""", """i""", """s""", SPIECE_UNDERLINE, """i""", """s""", SPIECE_UNDERLINE, """f""", """a""", """l""", """s""", """é""", """."""] )
@slow
def lowerCAmelCase ( self : str ) ->Dict:
"""simple docstring"""
snake_case_ = [
"""Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides """
"""general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural """
"""Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained """
"""models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.""",
"""BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly """
"""conditioning on both left and right context in all layers.""",
"""The quick brown fox jumps over the lazy dog.""",
]
# fmt: off
snake_case_ = {
"""input_ids""": [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
"""attention_mask""": [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase_ , model_name="""microsoft/speecht5_asr""" , revision="""c5ef64c71905caeccde0e4462ef3f9077224c524""" , sequences=UpperCAmelCase_ , )
| 347 | 1 |
"""simple docstring"""
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxCrossAttnUpBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
FlaxUpBlockaD,
)
@flax.struct.dataclass
class __A (snake_case__):
'''simple docstring'''
__lowercase: jnp.ndarray
@flax_register_to_config
class __A (nn.Module , snake_case__ , snake_case__):
'''simple docstring'''
__lowercase: int = 32
__lowercase: int = 4
__lowercase: int = 4
__lowercase: Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
__lowercase: Tuple[str] = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")
__lowercase: Union[bool, Tuple[bool]] = False
__lowercase: Tuple[int] = (3_20, 6_40, 12_80, 12_80)
__lowercase: int = 2
__lowercase: Union[int, Tuple[int]] = 8
__lowercase: Optional[Union[int, Tuple[int]]] = None
__lowercase: int = 12_80
__lowercase: float = 0.0
__lowercase: bool = False
__lowercase: jnp.dtype = jnp.floataa
__lowercase: bool = True
__lowercase: int = 0
__lowercase: bool = False
def lowerCAmelCase ( self : int , UpperCAmelCase_ : jax.random.KeyArray ) ->FrozenDict:
"""simple docstring"""
snake_case_ = (1, self.in_channels, self.sample_size, self.sample_size)
snake_case_ = jnp.zeros(UpperCAmelCase_ , dtype=jnp.floataa )
snake_case_ = jnp.ones((1,) , dtype=jnp.intaa )
snake_case_ = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
snake_case_ , snake_case_ = jax.random.split(UpperCAmelCase_ )
snake_case_ = {"""params""": params_rng, """dropout""": dropout_rng}
return self.init(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )["params"]
def lowerCAmelCase ( self : Any ) ->List[Any]:
"""simple docstring"""
snake_case_ = self.block_out_channels
snake_case_ = block_out_channels[0] * 4
if self.num_attention_heads is not None:
raise ValueError(
"""At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.""" )
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
snake_case_ = self.num_attention_heads or self.attention_head_dim
# input
snake_case_ = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
snake_case_ = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
snake_case_ = FlaxTimestepEmbedding(UpperCAmelCase_ , dtype=self.dtype )
snake_case_ = self.only_cross_attention
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
snake_case_ = (only_cross_attention,) * len(self.down_block_types )
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
snake_case_ = (num_attention_heads,) * len(self.down_block_types )
# down
snake_case_ = []
snake_case_ = block_out_channels[0]
for i, down_block_type in enumerate(self.down_block_types ):
snake_case_ = output_channel
snake_case_ = block_out_channels[i]
snake_case_ = i == len(UpperCAmelCase_ ) - 1
if down_block_type == "CrossAttnDownBlock2D":
snake_case_ = FlaxCrossAttnDownBlockaD(
in_channels=UpperCAmelCase_ , out_channels=UpperCAmelCase_ , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
snake_case_ = FlaxDownBlockaD(
in_channels=UpperCAmelCase_ , out_channels=UpperCAmelCase_ , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(UpperCAmelCase_ )
snake_case_ = down_blocks
# mid
snake_case_ = FlaxUNetMidBlockaDCrossAttn(
in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
# up
snake_case_ = []
snake_case_ = list(reversed(UpperCAmelCase_ ) )
snake_case_ = list(reversed(UpperCAmelCase_ ) )
snake_case_ = list(reversed(UpperCAmelCase_ ) )
snake_case_ = reversed_block_out_channels[0]
for i, up_block_type in enumerate(self.up_block_types ):
snake_case_ = output_channel
snake_case_ = reversed_block_out_channels[i]
snake_case_ = reversed_block_out_channels[min(i + 1 , len(UpperCAmelCase_ ) - 1 )]
snake_case_ = i == len(UpperCAmelCase_ ) - 1
if up_block_type == "CrossAttnUpBlock2D":
snake_case_ = FlaxCrossAttnUpBlockaD(
in_channels=UpperCAmelCase_ , out_channels=UpperCAmelCase_ , prev_output_channel=UpperCAmelCase_ , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
snake_case_ = FlaxUpBlockaD(
in_channels=UpperCAmelCase_ , out_channels=UpperCAmelCase_ , prev_output_channel=UpperCAmelCase_ , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , )
up_blocks.append(UpperCAmelCase_ )
snake_case_ = output_channel
snake_case_ = up_blocks
# out
snake_case_ = nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
snake_case_ = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : str , UpperCAmelCase_ : str , UpperCAmelCase_ : Any , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : List[Any]=None , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : bool = False , ) ->Union[FlaxUNetaDConditionOutput, Tuple]:
"""simple docstring"""
if not isinstance(UpperCAmelCase_ , jnp.ndarray ):
snake_case_ = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(UpperCAmelCase_ , jnp.ndarray ) and len(timesteps.shape ) == 0:
snake_case_ = timesteps.astype(dtype=jnp.floataa )
snake_case_ = jnp.expand_dims(UpperCAmelCase_ , 0 )
snake_case_ = self.time_proj(UpperCAmelCase_ )
snake_case_ = self.time_embedding(UpperCAmelCase_ )
# 2. pre-process
snake_case_ = jnp.transpose(UpperCAmelCase_ , (0, 2, 3, 1) )
snake_case_ = self.conv_in(UpperCAmelCase_ )
# 3. down
snake_case_ = (sample,)
for down_block in self.down_blocks:
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
snake_case_ , snake_case_ = down_block(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , deterministic=not train )
else:
snake_case_ , snake_case_ = down_block(UpperCAmelCase_ , UpperCAmelCase_ , deterministic=not train )
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
snake_case_ = ()
for down_block_res_sample, down_block_additional_residual in zip(
UpperCAmelCase_ , UpperCAmelCase_ ):
down_block_res_sample += down_block_additional_residual
new_down_block_res_samples += (down_block_res_sample,)
snake_case_ = new_down_block_res_samples
# 4. mid
snake_case_ = self.mid_block(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , deterministic=not train )
if mid_block_additional_residual is not None:
sample += mid_block_additional_residual
# 5. up
for up_block in self.up_blocks:
snake_case_ = down_block_res_samples[-(self.layers_per_block + 1) :]
snake_case_ = down_block_res_samples[: -(self.layers_per_block + 1)]
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
snake_case_ = up_block(
UpperCAmelCase_ , temb=UpperCAmelCase_ , encoder_hidden_states=UpperCAmelCase_ , res_hidden_states_tuple=UpperCAmelCase_ , deterministic=not train , )
else:
snake_case_ = up_block(UpperCAmelCase_ , temb=UpperCAmelCase_ , res_hidden_states_tuple=UpperCAmelCase_ , deterministic=not train )
# 6. post-process
snake_case_ = self.conv_norm_out(UpperCAmelCase_ )
snake_case_ = nn.silu(UpperCAmelCase_ )
snake_case_ = self.conv_out(UpperCAmelCase_ )
snake_case_ = jnp.transpose(UpperCAmelCase_ , (0, 3, 1, 2) )
if not return_dict:
return (sample,)
return FlaxUNetaDConditionOutput(sample=UpperCAmelCase_ )
| 347 |
"""simple docstring"""
import datasets
__SCREAMING_SNAKE_CASE : Tuple = '\\n@InProceedings{conneau2018xnli,\n author = "Conneau, Alexis\n and Rinott, Ruty\n and Lample, Guillaume\n and Williams, Adina\n and Bowman, Samuel R.\n and Schwenk, Holger\n and Stoyanov, Veselin",\n title = "XNLI: Evaluating Cross-lingual Sentence Representations",\n booktitle = "Proceedings of the 2018 Conference on Empirical Methods\n in Natural Language Processing",\n year = "2018",\n publisher = "Association for Computational Linguistics",\n location = "Brussels, Belgium",\n}\n'
__SCREAMING_SNAKE_CASE : Dict = '\\nXNLI is a subset of a few thousand examples from MNLI which has been translated\ninto a 14 different languages (some low-ish resource). As with MNLI, the goal is\nto predict textual entailment (does sentence A imply/contradict/neither sentence\nB) and is a classification task (given two sentences, predict one of three\nlabels).\n'
__SCREAMING_SNAKE_CASE : List[str] = '\nComputes XNLI score which is just simple accuracy.\nArgs:\n predictions: Predicted labels.\n references: Ground truth labels.\nReturns:\n \'accuracy\': accuracy\nExamples:\n\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> xnli_metric = datasets.load_metric("xnli")\n >>> results = xnli_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n'
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]:
return (preds == labels).mean()
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class __A (datasets.Metric):
'''simple docstring'''
def lowerCAmelCase ( self : str ) ->Any:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""int64""" if self.config_name != """sts-b""" else """float32""" ),
"""references""": datasets.Value("""int64""" if self.config_name != """sts-b""" else """float32""" ),
} ) , codebase_urls=[] , reference_urls=[] , format="""numpy""" , )
def lowerCAmelCase ( self : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any ) ->int:
"""simple docstring"""
return {"accuracy": simple_accuracy(UpperCAmelCase_ , UpperCAmelCase_ )}
| 347 | 1 |
"""simple docstring"""
def _a ( _SCREAMING_SNAKE_CASE ) -> bool:
snake_case_ = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def _a ( _SCREAMING_SNAKE_CASE = 5_000 ) -> int:
snake_case_ = [(i * (3 * i - 1)) // 2 for i in range(1 , _SCREAMING_SNAKE_CASE )]
for i, pentagonal_i in enumerate(_SCREAMING_SNAKE_CASE ):
for j in range(_SCREAMING_SNAKE_CASE , len(_SCREAMING_SNAKE_CASE ) ):
snake_case_ = pentagonal_nums[j]
snake_case_ = pentagonal_i + pentagonal_j
snake_case_ = pentagonal_j - pentagonal_i
if is_pentagonal(_SCREAMING_SNAKE_CASE ) and is_pentagonal(_SCREAMING_SNAKE_CASE ):
return b
return -1
if __name__ == "__main__":
print(f"""{solution() = }""")
| 347 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: List[Any] = ["""sentencepiece"""]
def __init__( self : int , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : List[str] ) ->List[Any]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: Optional[int] = ["""sentencepiece"""]
def __init__( self : Optional[int] , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Tuple ) ->Dict:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: Any = ["""sentencepiece"""]
def __init__( self : Any , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : List[Any] ) ->List[Any]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: Dict = ["""sentencepiece"""]
def __init__( self : List[str] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : int ) ->Optional[int]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: List[str] = ["""sentencepiece"""]
def __init__( self : Dict , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : int ) ->List[str]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: int = ["""sentencepiece"""]
def __init__( self : Tuple , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Dict ) ->Optional[int]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: int = ["""sentencepiece"""]
def __init__( self : int , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Union[str, Any] ) ->Dict:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: Optional[Any] = ["""sentencepiece"""]
def __init__( self : List[str] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : Optional[Any] ) ->Union[str, Any]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: Optional[int] = ["""sentencepiece"""]
def __init__( self : Optional[Any] , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : List[Any] ) ->Tuple:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: Any = ["""sentencepiece"""]
def __init__( self : List[Any] , *UpperCAmelCase_ : str , **UpperCAmelCase_ : int ) ->List[Any]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: Any = ["""sentencepiece"""]
def __init__( self : int , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : List[Any] ) ->Tuple:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: int = ["""sentencepiece"""]
def __init__( self : Optional[int] , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : Union[str, Any] ) ->Union[str, Any]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: Tuple = ["""sentencepiece"""]
def __init__( self : Dict , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : List[Any] ) ->Dict:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: Union[str, Any] = ["""sentencepiece"""]
def __init__( self : List[Any] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : List[Any] ) ->Optional[int]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: int = ["""sentencepiece"""]
def __init__( self : int , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : List[str] ) ->Union[str, Any]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: Tuple = ["""sentencepiece"""]
def __init__( self : int , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Optional[Any] ) ->str:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: List[Any] = ["""sentencepiece"""]
def __init__( self : Dict , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Optional[Any] ) ->int:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: Union[str, Any] = ["""sentencepiece"""]
def __init__( self : Optional[Any] , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Optional[int] ) ->Optional[int]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: Any = ["""sentencepiece"""]
def __init__( self : Optional[Any] , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Dict ) ->Dict:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: Any = ["""sentencepiece"""]
def __init__( self : List[Any] , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : List[str] ) ->List[Any]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: int = ["""sentencepiece"""]
def __init__( self : Union[str, Any] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Optional[Any] ) ->List[str]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: int = ["""sentencepiece"""]
def __init__( self : Union[str, Any] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : List[Any] ) ->Union[str, Any]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: List[Any] = ["""sentencepiece"""]
def __init__( self : Any , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Optional[Any] ) ->List[str]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: Dict = ["""sentencepiece"""]
def __init__( self : int , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Union[str, Any] ) ->List[Any]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: List[Any] = ["""sentencepiece"""]
def __init__( self : List[Any] , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Optional[int] ) ->Optional[Any]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: Union[str, Any] = ["""sentencepiece"""]
def __init__( self : Union[str, Any] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : str ) ->Optional[int]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: Optional[int] = ["""sentencepiece"""]
def __init__( self : Tuple , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Optional[int] ) ->Dict:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: Dict = ["""sentencepiece"""]
def __init__( self : Optional[int] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : List[str] ) ->Optional[Any]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: int = ["""sentencepiece"""]
def __init__( self : Dict , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Optional[int] ) ->Any:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: List[str] = ["""sentencepiece"""]
def __init__( self : List[str] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Union[str, Any] ) ->Optional[Any]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: Any = ["""sentencepiece"""]
def __init__( self : List[str] , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Optional[int] ) ->str:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
| 347 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import GLPNImageProcessor
class __A (unittest.TestCase):
'''simple docstring'''
def __init__( self : Optional[int] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Union[str, Any]=7 , UpperCAmelCase_ : Optional[Any]=3 , UpperCAmelCase_ : Tuple=18 , UpperCAmelCase_ : List[str]=30 , UpperCAmelCase_ : Optional[int]=400 , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : int=32 , UpperCAmelCase_ : List[Any]=True , ) ->Union[str, Any]:
"""simple docstring"""
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = num_channels
snake_case_ = image_size
snake_case_ = min_resolution
snake_case_ = max_resolution
snake_case_ = do_resize
snake_case_ = size_divisor
snake_case_ = do_rescale
def lowerCAmelCase ( self : Union[str, Any] ) ->Dict:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size_divisor": self.size_divisor,
"do_rescale": self.do_rescale,
}
@require_torch
@require_vision
class __A (snake_case__ , unittest.TestCase):
'''simple docstring'''
__lowercase: Dict = GLPNImageProcessor if is_vision_available() else None
def lowerCAmelCase ( self : Any ) ->str:
"""simple docstring"""
snake_case_ = GLPNImageProcessingTester(self )
@property
def lowerCAmelCase ( self : Dict ) ->Union[str, Any]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase ( self : Tuple ) ->Optional[Any]:
"""simple docstring"""
snake_case_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase_ , """do_resize""" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , """size_divisor""" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , """resample""" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , """do_rescale""" ) )
def lowerCAmelCase ( self : str ) ->str:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Optional[int] ) ->Optional[Any]:
"""simple docstring"""
snake_case_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , Image.Image )
# Test not batched input (GLPNImageProcessor doesn't support batching)
snake_case_ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def lowerCAmelCase ( self : Tuple ) ->Optional[Any]:
"""simple docstring"""
snake_case_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , numpify=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , np.ndarray )
# Test not batched input (GLPNImageProcessor doesn't support batching)
snake_case_ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def lowerCAmelCase ( self : Tuple ) ->Any:
"""simple docstring"""
snake_case_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , torchify=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , torch.Tensor )
# Test not batched input (GLPNImageProcessor doesn't support batching)
snake_case_ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
| 347 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
__SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
class __A (snake_case__):
'''simple docstring'''
def __init__( self : str , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : int ) ->None:
"""simple docstring"""
warnings.warn(
"""The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use MobileViTImageProcessor instead.""" , UpperCAmelCase_ , )
super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_ )
| 347 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__SCREAMING_SNAKE_CASE : Any = {
'configuration_blenderbot': [
'BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BlenderbotConfig',
'BlenderbotOnnxConfig',
],
'tokenization_blenderbot': ['BlenderbotTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Any = ['BlenderbotTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Union[str, Any] = [
'BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BlenderbotForCausalLM',
'BlenderbotForConditionalGeneration',
'BlenderbotModel',
'BlenderbotPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : List[str] = [
'TFBlenderbotForConditionalGeneration',
'TFBlenderbotModel',
'TFBlenderbotPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : List[str] = [
'FlaxBlenderbotForConditionalGeneration',
'FlaxBlenderbotModel',
'FlaxBlenderbotPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 347 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Any:
snake_case_ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""blocks.{i}.norm1.weight""", f"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""blocks.{i}.norm1.bias""", f"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((f"""blocks.{i}.attn.proj.weight""", f"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.attn.proj.bias""", f"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""blocks.{i}.norm2.weight""", f"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""blocks.{i}.norm2.bias""", f"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.weight""", f"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.bias""", f"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.weight""", f"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.bias""", f"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
("""cls_token""", """vit.embeddings.cls_token"""),
("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""),
("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""),
("""pos_embed""", """vit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
("""pre_logits.fc.weight""", """pooler.dense.weight"""),
("""pre_logits.fc.bias""", """pooler.dense.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
snake_case_ = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Tuple:
for i in range(config.num_hidden_layers ):
if base_model:
snake_case_ = """"""
else:
snake_case_ = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
snake_case_ = state_dict.pop(f"""blocks.{i}.attn.qkv.weight""" )
snake_case_ = state_dict.pop(f"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
snake_case_ = in_proj_weight[
: config.hidden_size, :
]
snake_case_ = in_proj_bias[: config.hidden_size]
snake_case_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case_ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
snake_case_ = in_proj_weight[
-config.hidden_size :, :
]
snake_case_ = in_proj_bias[-config.hidden_size :]
def _a ( _SCREAMING_SNAKE_CASE ) -> List[str]:
snake_case_ = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
snake_case_ = dct.pop(_SCREAMING_SNAKE_CASE )
snake_case_ = val
def _a ( ) -> Any:
snake_case_ = """http://images.cocodataset.org/val2017/000000039769.jpg"""
snake_case_ = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
snake_case_ = ViTConfig()
snake_case_ = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
snake_case_ = True
snake_case_ = int(vit_name[-12:-10] )
snake_case_ = int(vit_name[-9:-6] )
else:
snake_case_ = 1_000
snake_case_ = """huggingface/label-files"""
snake_case_ = """imagenet-1k-id2label.json"""
snake_case_ = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="""dataset""" ) , """r""" ) )
snake_case_ = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
snake_case_ = idalabel
snake_case_ = {v: k for k, v in idalabel.items()}
snake_case_ = int(vit_name[-6:-4] )
snake_case_ = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith("""tiny""" ):
snake_case_ = 192
snake_case_ = 768
snake_case_ = 12
snake_case_ = 3
elif vit_name[9:].startswith("""small""" ):
snake_case_ = 384
snake_case_ = 1_536
snake_case_ = 12
snake_case_ = 6
else:
pass
else:
if vit_name[4:].startswith("""small""" ):
snake_case_ = 768
snake_case_ = 2_304
snake_case_ = 8
snake_case_ = 8
elif vit_name[4:].startswith("""base""" ):
pass
elif vit_name[4:].startswith("""large""" ):
snake_case_ = 1_024
snake_case_ = 4_096
snake_case_ = 24
snake_case_ = 16
elif vit_name[4:].startswith("""huge""" ):
snake_case_ = 1_280
snake_case_ = 5_120
snake_case_ = 32
snake_case_ = 16
# load original model from timm
snake_case_ = timm.create_model(_SCREAMING_SNAKE_CASE , pretrained=_SCREAMING_SNAKE_CASE )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
snake_case_ = timm_model.state_dict()
if base_model:
remove_classification_head_(_SCREAMING_SNAKE_CASE )
snake_case_ = create_rename_keys(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for src, dest in rename_keys:
rename_key(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
read_in_q_k_v(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# load HuggingFace model
if vit_name[-5:] == "in21k":
snake_case_ = ViTModel(_SCREAMING_SNAKE_CASE ).eval()
else:
snake_case_ = ViTForImageClassification(_SCREAMING_SNAKE_CASE ).eval()
model.load_state_dict(_SCREAMING_SNAKE_CASE )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
snake_case_ = DeiTImageProcessor(size=config.image_size )
else:
snake_case_ = ViTImageProcessor(size=config.image_size )
snake_case_ = image_processor(images=prepare_img() , return_tensors="""pt""" )
snake_case_ = encoding["""pixel_values"""]
snake_case_ = model(_SCREAMING_SNAKE_CASE )
if base_model:
snake_case_ = timm_model.forward_features(_SCREAMING_SNAKE_CASE )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(_SCREAMING_SNAKE_CASE , outputs.pooler_output , atol=1E-3 )
else:
snake_case_ = timm_model(_SCREAMING_SNAKE_CASE )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_SCREAMING_SNAKE_CASE , outputs.logits , atol=1E-3 )
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
print(f"""Saving model {vit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--vit_name',
default='vit_base_patch16_224',
type=str,
help='Name of the ViT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
__SCREAMING_SNAKE_CASE : int = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 347 | 1 |
"""simple docstring"""
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : List[str] = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
parser.add_argument(
'--original_config_file',
type=str,
required=True,
help='The YAML config file corresponding to the original architecture.',
)
parser.add_argument(
'--num_in_channels',
default=None,
type=int,
help='The number of input channels. If `None` number of input channels will be automatically inferred.',
)
parser.add_argument(
'--image_size',
default=512,
type=int,
help=(
'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'
' Base. Use 768 for Stable Diffusion v2.'
),
)
parser.add_argument(
'--extract_ema',
action='store_true',
help=(
'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'
' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'
' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'
),
)
parser.add_argument(
'--upcast_attention',
action='store_true',
help=(
'Whether the attention computation should always be upcasted. This is necessary when running stable'
' diffusion 2.1.'
),
)
parser.add_argument(
'--from_safetensors',
action='store_true',
help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.',
)
parser.add_argument(
'--to_safetensors',
action='store_true',
help='Whether to store pipeline in safetensors format or not.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)')
def _a ( _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(f"""could not parse string as bool {string}""" )
parser.add_argument(
'--use_linear_projection', help='Override for use linear projection', required=False, type=parse_bool
)
parser.add_argument('--cross_attention_dim', help='Override for cross attention_dim', required=False, type=int)
__SCREAMING_SNAKE_CASE : Any = parser.parse_args()
__SCREAMING_SNAKE_CASE : str = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 347 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class __A (unittest.TestCase):
'''simple docstring'''
def __init__( self : List[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple=13 , UpperCAmelCase_ : List[Any]=7 , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Dict=True , UpperCAmelCase_ : int=True , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Dict=99 , UpperCAmelCase_ : str=32 , UpperCAmelCase_ : Tuple=5 , UpperCAmelCase_ : Union[str, Any]=4 , UpperCAmelCase_ : Any=37 , UpperCAmelCase_ : int="gelu" , UpperCAmelCase_ : Any=0.1 , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : Dict=512 , UpperCAmelCase_ : Optional[Any]=16 , UpperCAmelCase_ : Dict=2 , UpperCAmelCase_ : str=0.02 , UpperCAmelCase_ : str=4 , ) ->Tuple:
"""simple docstring"""
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = seq_length
snake_case_ = is_training
snake_case_ = use_attention_mask
snake_case_ = use_token_type_ids
snake_case_ = use_labels
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = type_sequence_label_size
snake_case_ = initializer_range
snake_case_ = num_choices
def lowerCAmelCase ( self : Optional[int] ) ->str:
"""simple docstring"""
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ = None
if self.use_attention_mask:
snake_case_ = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ = None
if self.use_token_type_ids:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase_ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCAmelCase ( self : List[str] ) ->Dict:
"""simple docstring"""
snake_case_ = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ , snake_case_ = config_and_inputs
snake_case_ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class __A (snake_case__ , unittest.TestCase):
'''simple docstring'''
__lowercase: Union[str, Any] = True
__lowercase: int = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCAmelCase ( self : Optional[Any] ) ->Tuple:
"""simple docstring"""
snake_case_ = FlaxRoFormerModelTester(self )
@slow
def lowerCAmelCase ( self : Any ) ->List[str]:
"""simple docstring"""
for model_class_name in self.all_model_classes:
snake_case_ = model_class_name.from_pretrained("""junnyu/roformer_chinese_small""" , from_pt=UpperCAmelCase_ )
snake_case_ = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCAmelCase_ )
@require_flax
class __A (unittest.TestCase):
'''simple docstring'''
@slow
def lowerCAmelCase ( self : str ) ->Dict:
"""simple docstring"""
snake_case_ = FlaxRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" )
snake_case_ = jnp.array([[0, 1, 2, 3, 4, 5]] )
snake_case_ = model(UpperCAmelCase_ )[0]
snake_case_ = 50_000
snake_case_ = (1, 6, vocab_size)
self.assertEqual(output.shape , UpperCAmelCase_ )
snake_case_ = jnp.array(
[[[-0.1_205, -1.0_265, 0.2_922], [-1.5_134, 0.1_974, 0.1_519], [-5.0_135, -3.9_003, -0.8_404]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , UpperCAmelCase_ , atol=1E-4 ) )
| 347 | 1 |
"""simple docstring"""
from collections.abc import Callable
import numpy as np
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> np.array:
snake_case_ = int(np.ceil((x_end - xa) / step_size ) )
snake_case_ = np.zeros((n + 1,) )
snake_case_ = ya
snake_case_ = xa
for k in range(_SCREAMING_SNAKE_CASE ):
snake_case_ = y[k] + step_size * ode_func(_SCREAMING_SNAKE_CASE , y[k] )
snake_case_ = y[k] + (
(step_size / 2) * (ode_func(_SCREAMING_SNAKE_CASE , y[k] ) + ode_func(x + step_size , _SCREAMING_SNAKE_CASE ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 347 |
"""simple docstring"""
from __future__ import annotations
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> bool:
snake_case_ = get_failure_array(_SCREAMING_SNAKE_CASE )
# 2) Step through text searching for pattern
snake_case_ , snake_case_ = 0, 0 # index into text, pattern
while i < len(_SCREAMING_SNAKE_CASE ):
if pattern[j] == text[i]:
if j == (len(_SCREAMING_SNAKE_CASE ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
snake_case_ = failure[j - 1]
continue
i += 1
return False
def _a ( _SCREAMING_SNAKE_CASE ) -> list[int]:
snake_case_ = [0]
snake_case_ = 0
snake_case_ = 1
while j < len(_SCREAMING_SNAKE_CASE ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
snake_case_ = failure[i - 1]
continue
j += 1
failure.append(_SCREAMING_SNAKE_CASE )
return failure
if __name__ == "__main__":
# Test 1)
__SCREAMING_SNAKE_CASE : Optional[int] = 'abc1abc12'
__SCREAMING_SNAKE_CASE : Optional[int] = 'alskfjaldsabc1abc1abc12k23adsfabcabc'
__SCREAMING_SNAKE_CASE : List[str] = 'alskfjaldsk23adsfabcabc'
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
__SCREAMING_SNAKE_CASE : int = 'ABABX'
__SCREAMING_SNAKE_CASE : Optional[Any] = 'ABABZABABYABABX'
assert kmp(pattern, text)
# Test 3)
__SCREAMING_SNAKE_CASE : Any = 'AAAB'
__SCREAMING_SNAKE_CASE : List[Any] = 'ABAAAAAB'
assert kmp(pattern, text)
# Test 4)
__SCREAMING_SNAKE_CASE : Optional[int] = 'abcdabcy'
__SCREAMING_SNAKE_CASE : str = 'abcxabcdabxabcdabcdabcy'
assert kmp(pattern, text)
# Test 5)
__SCREAMING_SNAKE_CASE : Any = 'aabaabaaa'
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 347 | 1 |