code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
SCREAMING_SNAKE_CASE = get_tests_dir('fixtures')
SCREAMING_SNAKE_CASE = get_tests_dir('fixtures/dummy_feature_extractor_config.json')
SCREAMING_SNAKE_CASE = get_tests_dir('fixtures/dummy-config.json')
class __UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case_ ( self ):
__a = 0
def snake_case_ ( self ):
__a = AutoFeatureExtractor.from_pretrained("""facebook/wav2vec2-base-960h""" )
self.assertIsInstance(__A , __A )
def snake_case_ ( self ):
__a = AutoFeatureExtractor.from_pretrained(__A )
self.assertIsInstance(__A , __A )
def snake_case_ ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
__a = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
__a = AutoFeatureExtractor.from_pretrained(__A ).to_dict()
config_dict.pop("""feature_extractor_type""" )
__a = WavaVecaFeatureExtractor(**__A )
# save in new folder
model_config.save_pretrained(__A )
config.save_pretrained(__A )
__a = AutoFeatureExtractor.from_pretrained(__A )
# make sure private variable is not incorrectly saved
__a = json.loads(config.to_json_string() )
self.assertTrue("""_processor_class""" not in dict_as_saved )
self.assertIsInstance(__A , __A )
def snake_case_ ( self ):
__a = AutoFeatureExtractor.from_pretrained(__A )
self.assertIsInstance(__A , __A )
def snake_case_ ( self ):
with self.assertRaisesRegex(
__A , """bert-base is not a local folder and is not a valid model identifier""" ):
__a = AutoFeatureExtractor.from_pretrained("""bert-base""" )
def snake_case_ ( self ):
with self.assertRaisesRegex(
__A , R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
__a = AutoFeatureExtractor.from_pretrained(__A , revision="""aaaaaa""" )
def snake_case_ ( self ):
with self.assertRaisesRegex(
__A , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ):
__a = AutoFeatureExtractor.from_pretrained("""hf-internal-testing/config-no-model""" )
def snake_case_ ( self ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__A ):
__a = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__A ):
__a = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=__A )
__a = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=__A )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(__A )
__a = AutoFeatureExtractor.from_pretrained(__A , trust_remote_code=__A )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
def snake_case_ ( self ):
try:
AutoConfig.register("""custom""" , __A )
AutoFeatureExtractor.register(__A , __A )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__A ):
AutoFeatureExtractor.register(__A , __A )
# Now that the config is registered, it can be used as any other config with the auto-API
__a = CustomFeatureExtractor.from_pretrained(__A )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(__A )
__a = AutoFeatureExtractor.from_pretrained(__A )
self.assertIsInstance(__A , __A )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def snake_case_ ( self ):
class __UpperCAmelCase ( __A ):
"""simple docstring"""
_lowerCamelCase = True
try:
AutoConfig.register("""custom""" , __A )
AutoFeatureExtractor.register(__A , __A )
# If remote code is not set, the default is to use local
__a = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
__a = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=__A )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
__a = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=__A )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(not hasattr(__A , """is_local""" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 99 |
import collections
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = 'โ'
SCREAMING_SNAKE_CASE = {'vocab_file': 'prophetnet.tokenizer'}
SCREAMING_SNAKE_CASE = {
'vocab_file': {
'microsoft/xprophetnet-large-wiki100-cased': (
'https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer'
),
}
}
SCREAMING_SNAKE_CASE = {
'microsoft/xprophetnet-large-wiki100-cased': {'do_lower_case': False},
}
SCREAMING_SNAKE_CASE = {
'microsoft/xprophetnet-large-wiki100-cased': 5_1_2,
}
def a (lowerCAmelCase__ ):
__a = collections.OrderedDict()
with open(lowerCAmelCase__ , """r""" , encoding="""utf-8""" ) as reader:
__a = reader.readlines()
for index, token in enumerate(lowerCAmelCase__ ):
__a = token.rstrip("""\n""" )
__a = index
return vocab
class __UpperCAmelCase ( __A ):
"""simple docstring"""
_lowerCamelCase = VOCAB_FILES_NAMES
_lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase = ["""input_ids""", """attention_mask"""]
def __init__( self , __A , __A="[SEP]" , __A="[SEP]" , __A="[SEP]" , __A="[UNK]" , __A="[PAD]" , __A="[CLS]" , __A="[MASK]" , __A = None , **__A , ):
__a = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__A , eos_token=__A , sep_token=__A , unk_token=__A , pad_token=__A , cls_token=__A , mask_token=__A , sp_model_kwargs=self.sp_model_kwargs , **__A , )
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"""You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"""
""" pip install sentencepiece""" )
raise
__a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__A ) )
__a = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | 'โ' | 's' | 'โde' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | 'โ' | 's' | 'โde' | '-' | 'โa'
# put special tokens and [unused] tokens into the vocab
__a = {"""[PAD]""": 0, """[CLS]""": 1, """[SEP]""": 2, """[UNK]""": 3, """[MASK]""": 4}
for i in range(10 ):
__a = f'''[unused{i}]'''
__a = 5 + i
# The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab
__a = 12
__a = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
for k in self.fairseq_tokens_to_ids.keys():
self.unique_no_split_tokens.append(__A )
def __getstate__( self ):
__a = self.__dict__.copy()
__a = None
return state
def __setstate__( self , __A ):
__a = d
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"""You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"""
""" pip install sentencepiece""" )
raise
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
__a = {}
__a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def snake_case_ ( self , __A , __A = None , __A = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__A , token_ids_a=__A , already_has_special_tokens=__A )
if token_ids_a is None:
return ([0] * len(__A )) + [1]
return ([0] * len(__A )) + [1] + ([0] * len(__A )) + [1]
def snake_case_ ( self , __A , __A = None ):
__a = [self.sep_token_id]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0]
return len(token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def snake_case_ ( self ):
return len(self.sp_model ) + self.fairseq_offset
def snake_case_ ( self ):
__a = {self.convert_ids_to_tokens(__A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def snake_case_ ( self , __A ):
return self.sp_model.encode(__A , out_type=__A )
def snake_case_ ( self , __A ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__a = self.sp_model.PieceToId(__A )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def snake_case_ ( self , __A ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def snake_case_ ( self , __A ):
__a = """""".join(__A ).replace(__A , """ """ ).strip()
return out_string
def snake_case_ ( self , __A , __A = None ):
if not os.path.isdir(__A ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__a = os.path.join(
__A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __A )
elif not os.path.isfile(self.vocab_file ):
with open(__A , """wb""" ) as fi:
__a = self.sp_model.serialized_model_proto()
fi.write(__A )
return (out_vocab_file,)
def snake_case_ ( self , __A , __A = None ):
if token_ids_a is None:
return token_ids_a + [self.sep_token_id]
__a = [self.sep_token_id]
return token_ids_a + sep + token_ids_a + sep
| 99 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase : List[Any] = {
"configuration_luke": ["LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP", "LukeConfig"],
"tokenization_luke": ["LukeTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : str = [
"LUKE_PRETRAINED_MODEL_ARCHIVE_LIST",
"LukeForEntityClassification",
"LukeForEntityPairClassification",
"LukeForEntitySpanClassification",
"LukeForMultipleChoice",
"LukeForQuestionAnswering",
"LukeForSequenceClassification",
"LukeForTokenClassification",
"LukeForMaskedLM",
"LukeModel",
"LukePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig
from .tokenization_luke import LukeTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_luke import (
LUKE_PRETRAINED_MODEL_ARCHIVE_LIST,
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
LukeForMaskedLM,
LukeForMultipleChoice,
LukeForQuestionAnswering,
LukeForSequenceClassification,
LukeForTokenClassification,
LukeModel,
LukePreTrainedModel,
)
else:
import sys
lowercase : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 708 |
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class __A( __UpperCAmelCase ):
def _UpperCamelCase ( self ):
"""simple docstring"""
_UpperCamelCase = SMALL_MODEL_IDENTIFIER
_UpperCamelCase = '''pt'''
_UpperCamelCase = '''tf'''
def _UpperCamelCase ( self, A ):
"""simple docstring"""
_UpperCamelCase = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(A )
def _UpperCamelCase ( self, A ):
"""simple docstring"""
_UpperCamelCase = TFAutoModel.from_pretrained(self.test_model, from_pt=A )
model_tf.save_pretrained(A )
def _UpperCamelCase ( self ):
"""simple docstring"""
_UpperCamelCase = '''mock_framework'''
# Framework provided - return whatever the user provides
_UpperCamelCase = FeaturesManager.determine_framework(self.test_model, A )
self.assertEqual(A, A )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(A )
_UpperCamelCase = FeaturesManager.determine_framework(A, A )
self.assertEqual(A, A )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(A )
_UpperCamelCase = FeaturesManager.determine_framework(A, A )
self.assertEqual(A, A )
def _UpperCamelCase ( self ):
"""simple docstring"""
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(A )
_UpperCamelCase = FeaturesManager.determine_framework(A )
self.assertEqual(A, self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(A )
_UpperCamelCase = FeaturesManager.determine_framework(A )
self.assertEqual(A, self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(A ):
_UpperCamelCase = FeaturesManager.determine_framework(A )
def _UpperCamelCase ( self ):
"""simple docstring"""
_UpperCamelCase = MagicMock(return_value=A )
with patch('''transformers.onnx.features.is_tf_available''', A ):
_UpperCamelCase = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(A, self.framework_pt )
# PyTorch not in environment -> use TensorFlow
_UpperCamelCase = MagicMock(return_value=A )
with patch('''transformers.onnx.features.is_torch_available''', A ):
_UpperCamelCase = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(A, self.framework_tf )
# Both in environment -> use PyTorch
_UpperCamelCase = MagicMock(return_value=A )
_UpperCamelCase = MagicMock(return_value=A )
with patch('''transformers.onnx.features.is_tf_available''', A ), patch(
'''transformers.onnx.features.is_torch_available''', A ):
_UpperCamelCase = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(A, self.framework_pt )
# Both not in environment -> raise error
_UpperCamelCase = MagicMock(return_value=A )
_UpperCamelCase = MagicMock(return_value=A )
with patch('''transformers.onnx.features.is_tf_available''', A ), patch(
'''transformers.onnx.features.is_torch_available''', A ):
with self.assertRaises(A ):
_UpperCamelCase = FeaturesManager.determine_framework(self.test_model )
| 105 | 0 |
'''simple docstring'''
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
lowerCamelCase : List[str] = logging.getLogger(__name__)
lowerCamelCase : int = tf.data.AUTOTUNE
def _lowerCAmelCase ( ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =argparse.ArgumentParser(description='Train a masked language model on TPU.' )
parser.add_argument(
'--pretrained_model_config' , type=_UpperCamelCase , default='roberta-base' , help='The model config to use. Note that we don\'t copy the model\'s weights, only the config!' , )
parser.add_argument(
'--tokenizer' , type=_UpperCamelCase , default='unigram-tokenizer-wikitext' , help='The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model\'s vocab size.' , )
parser.add_argument(
'--per_replica_batch_size' , type=_UpperCamelCase , default=8 , help='Batch size per TPU core.' , )
parser.add_argument(
'--no_tpu' , action='store_true' , help='If set, run on CPU and don\'t try to initialize a TPU. Useful for debugging on non-TPU instances.' , )
parser.add_argument(
'--tpu_name' , type=_UpperCamelCase , help='Name of TPU resource to initialize. Should be blank on Colab, and \'local\' on TPU VMs.' , default='local' , )
parser.add_argument(
'--tpu_zone' , type=_UpperCamelCase , help='Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.' , )
parser.add_argument(
'--gcp_project' , type=_UpperCamelCase , help='Google cloud project name. Only used for non-Colab TPU nodes.' )
parser.add_argument(
'--bfloat16' , action='store_true' , help='Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.' , )
parser.add_argument(
'--train_dataset' , type=_UpperCamelCase , help='Path to training dataset to load. If the path begins with `gs://`'
' then the dataset will be loaded from a Google Cloud Storage bucket.' , )
parser.add_argument(
'--shuffle_buffer_size' , type=_UpperCamelCase , default=2**18 , help='Size of the shuffle buffer (in samples)' , )
parser.add_argument(
'--eval_dataset' , type=_UpperCamelCase , help='Path to evaluation dataset to load. If the path begins with `gs://`'
' then the dataset will be loaded from a Google Cloud Storage bucket.' , )
parser.add_argument(
'--num_epochs' , type=_UpperCamelCase , default=1 , help='Number of epochs to train for.' , )
parser.add_argument(
'--learning_rate' , type=_UpperCamelCase , default=1E-4 , help='Learning rate to use for training.' , )
parser.add_argument(
'--weight_decay_rate' , type=_UpperCamelCase , default=1E-3 , help='Weight decay rate to use for training.' , )
parser.add_argument(
'--max_length' , type=_UpperCamelCase , default=5_12 , help='Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py' , )
parser.add_argument(
'--mlm_probability' , type=_UpperCamelCase , default=0.15 , help='Fraction of tokens to mask during training.' , )
parser.add_argument('--output_dir' , type=_UpperCamelCase , required=_UpperCamelCase , help='Path to save model checkpoints to.' )
parser.add_argument('--hub_model_id' , type=_UpperCamelCase , help='Model ID to upload to on the Hugging Face Hub.' )
_SCREAMING_SNAKE_CASE =parser.parse_args()
return args
def _lowerCAmelCase ( _UpperCamelCase : Optional[int] ) -> Tuple:
"""simple docstring"""
try:
if args.tpu_name:
_SCREAMING_SNAKE_CASE =tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name , zone=args.tpu_zone , project=args.gcp_project )
else:
_SCREAMING_SNAKE_CASE =tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
'Couldn\'t connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or '
'--gcp_project. When running on a TPU VM, use --tpu_name local.' )
tf.config.experimental_connect_to_cluster(_UpperCamelCase )
tf.tpu.experimental.initialize_tpu_system(_UpperCamelCase )
return tpu
def _lowerCAmelCase ( _UpperCamelCase : Dict ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =0
for file in file_list:
_SCREAMING_SNAKE_CASE =file.split('/' )[-1]
_SCREAMING_SNAKE_CASE =re.search(r'-\d+-(\d+)\.tfrecord' , _UpperCamelCase ).group(1 )
_SCREAMING_SNAKE_CASE =int(_UpperCamelCase )
num_samples += sample_count
return num_samples
def _lowerCAmelCase ( _UpperCamelCase : Tuple , _UpperCamelCase : int , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : Optional[int] , _UpperCamelCase : Tuple=None ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =count_samples(_UpperCamelCase )
_SCREAMING_SNAKE_CASE =tf.data.Dataset.from_tensor_slices(_UpperCamelCase )
if shuffle:
_SCREAMING_SNAKE_CASE =dataset.shuffle(len(_UpperCamelCase ) )
_SCREAMING_SNAKE_CASE =tf.data.TFRecordDataset(_UpperCamelCase , num_parallel_reads=_UpperCamelCase )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
_SCREAMING_SNAKE_CASE =dataset.apply(tf.data.experimental.assert_cardinality(_UpperCamelCase ) )
_SCREAMING_SNAKE_CASE =dataset.map(_UpperCamelCase , num_parallel_calls=_UpperCamelCase )
if shuffle:
assert shuffle_buffer_size is not None
_SCREAMING_SNAKE_CASE =dataset.shuffle(args.shuffle_buffer_size )
_SCREAMING_SNAKE_CASE =dataset.batch(_UpperCamelCase , drop_remainder=_UpperCamelCase )
_SCREAMING_SNAKE_CASE =dataset.map(_UpperCamelCase , num_parallel_calls=_UpperCamelCase )
_SCREAMING_SNAKE_CASE =dataset.prefetch(_UpperCamelCase )
return dataset
def _lowerCAmelCase ( _UpperCamelCase : Union[str, Any] ) -> int:
"""simple docstring"""
if not args.no_tpu:
_SCREAMING_SNAKE_CASE =initialize_tpu(_UpperCamelCase )
_SCREAMING_SNAKE_CASE =tf.distribute.TPUStrategy(_UpperCamelCase )
else:
_SCREAMING_SNAKE_CASE =tf.distribute.OneDeviceStrategy(device='/gpu:0' )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy('mixed_bfloat16' )
_SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained(args.tokenizer )
_SCREAMING_SNAKE_CASE =AutoConfig.from_pretrained(args.pretrained_model_config )
_SCREAMING_SNAKE_CASE =tokenizer.vocab_size
_SCREAMING_SNAKE_CASE =tf.io.gfile.glob(os.path.join(args.train_dataset , '*.tfrecord' ) )
if not training_records:
raise ValueError(f"No .tfrecord files found in {args.train_dataset}." )
_SCREAMING_SNAKE_CASE =tf.io.gfile.glob(os.path.join(args.eval_dataset , '*.tfrecord' ) )
if not eval_records:
raise ValueError(f"No .tfrecord files found in {args.eval_dataset}." )
_SCREAMING_SNAKE_CASE =count_samples(_UpperCamelCase )
_SCREAMING_SNAKE_CASE =num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
_SCREAMING_SNAKE_CASE =steps_per_epoch * args.num_epochs
with strategy.scope():
_SCREAMING_SNAKE_CASE =TFAutoModelForMaskedLM.from_config(_UpperCamelCase )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =create_optimizer(
num_train_steps=_UpperCamelCase , num_warmup_steps=total_train_steps // 20 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=_UpperCamelCase , metrics=['accuracy'] )
def decode_fn(_UpperCamelCase : int ):
_SCREAMING_SNAKE_CASE ={
'input_ids': tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
'attention_mask': tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
}
return tf.io.parse_single_example(_UpperCamelCase , _UpperCamelCase )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
_SCREAMING_SNAKE_CASE =DataCollatorForLanguageModeling(
tokenizer=_UpperCamelCase , mlm_probability=args.mlm_probability , mlm=_UpperCamelCase , return_tensors='tf' )
def mask_with_collator(_UpperCamelCase : Optional[int] ):
# TF really needs an isin() function
_SCREAMING_SNAKE_CASE =(
~tf.cast(batch['attention_mask'] , tf.bool )
| (batch['input_ids'] == tokenizer.cls_token_id)
| (batch['input_ids'] == tokenizer.sep_token_id)
)
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =data_collator.tf_mask_tokens(
batch['input_ids'] , vocab_size=len(_UpperCamelCase ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=_UpperCamelCase , )
return batch
_SCREAMING_SNAKE_CASE =args.per_replica_batch_size * strategy.num_replicas_in_sync
_SCREAMING_SNAKE_CASE =prepare_dataset(
_UpperCamelCase , decode_fn=_UpperCamelCase , mask_fn=_UpperCamelCase , batch_size=_UpperCamelCase , shuffle=_UpperCamelCase , shuffle_buffer_size=args.shuffle_buffer_size , )
_SCREAMING_SNAKE_CASE =prepare_dataset(
_UpperCamelCase , decode_fn=_UpperCamelCase , mask_fn=_UpperCamelCase , batch_size=_UpperCamelCase , shuffle=_UpperCamelCase , )
_SCREAMING_SNAKE_CASE =[]
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=_UpperCamelCase ) )
model.fit(
_UpperCamelCase , validation_data=_UpperCamelCase , epochs=args.num_epochs , callbacks=_UpperCamelCase , )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
lowerCamelCase : Any = parse_args()
main(args)
| 405 |
'''simple docstring'''
from __future__ import annotations
class A__ :
def __init__( self : Optional[int] , _a : int ) -> None:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =order
# a_{0} ... a_{k}
_SCREAMING_SNAKE_CASE =[1.0] + [0.0] * order
# b_{0} ... b_{k}
_SCREAMING_SNAKE_CASE =[1.0] + [0.0] * order
# x[n-1] ... x[n-k]
_SCREAMING_SNAKE_CASE =[0.0] * self.order
# y[n-1] ... y[n-k]
_SCREAMING_SNAKE_CASE =[0.0] * self.order
def A ( self : List[Any] , _a : list[float] , _a : list[float] ) -> None:
'''simple docstring'''
if len(_a ) < self.order:
_SCREAMING_SNAKE_CASE =[1.0, *a_coeffs]
if len(_a ) != self.order + 1:
_SCREAMING_SNAKE_CASE =(
f"Expected a_coeffs to have {self.order + 1} elements "
f"for {self.order}-order filter, got {len(_a )}"
)
raise ValueError(_a )
if len(_a ) != self.order + 1:
_SCREAMING_SNAKE_CASE =(
f"Expected b_coeffs to have {self.order + 1} elements "
f"for {self.order}-order filter, got {len(_a )}"
)
raise ValueError(_a )
_SCREAMING_SNAKE_CASE =a_coeffs
_SCREAMING_SNAKE_CASE =b_coeffs
def A ( self : Union[str, Any] , _a : float ) -> float:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
_SCREAMING_SNAKE_CASE =(result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
_SCREAMING_SNAKE_CASE =self.input_history[:-1]
_SCREAMING_SNAKE_CASE =self.output_history[:-1]
_SCREAMING_SNAKE_CASE =sample
_SCREAMING_SNAKE_CASE =result
return result
| 405 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections import Counter
from random import random
class _a :
"""simple docstring"""
def __init__( self ) -> Any:
_SCREAMING_SNAKE_CASE = {}
def UpperCamelCase ( self , A__ ) -> List[str]:
_SCREAMING_SNAKE_CASE = {}
def UpperCamelCase ( self , A__ , A__ , A__ ) -> Any:
if nodea not in self.connections:
self.add_node(UpperCamelCase_ )
if nodea not in self.connections:
self.add_node(UpperCamelCase_ )
_SCREAMING_SNAKE_CASE = probability
def UpperCamelCase ( self ) -> Optional[int]:
return list(self.connections )
def UpperCamelCase ( self , A__ ) -> str:
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> dict[str, int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
_SCREAMING_SNAKE_CASE = Counter(graph.get_nodes() )
_SCREAMING_SNAKE_CASE = start
for _ in range(lowerCamelCase__ ):
_SCREAMING_SNAKE_CASE = graph.transition(lowerCamelCase__ )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 709 |
'''simple docstring'''
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = XCLIPTextConfig()
# derive patch size from model name
_SCREAMING_SNAKE_CASE = model_name.find("""patch""" )
_SCREAMING_SNAKE_CASE = int(model_name[start_idx + len("""patch""" ) : start_idx + len("""patch""" ) + 2] )
_SCREAMING_SNAKE_CASE = XCLIPVisionConfig(patch_size=SCREAMING_SNAKE_CASE_ , num_frames=SCREAMING_SNAKE_CASE_ )
if "large" in model_name:
_SCREAMING_SNAKE_CASE = 7_68
_SCREAMING_SNAKE_CASE = 30_72
_SCREAMING_SNAKE_CASE = 12
_SCREAMING_SNAKE_CASE = 10_24
_SCREAMING_SNAKE_CASE = 40_96
_SCREAMING_SNAKE_CASE = 16
_SCREAMING_SNAKE_CASE = 24
_SCREAMING_SNAKE_CASE = 7_68
_SCREAMING_SNAKE_CASE = 30_72
if model_name == "xclip-large-patch14-16-frames":
_SCREAMING_SNAKE_CASE = 3_36
_SCREAMING_SNAKE_CASE = XCLIPConfig.from_text_vision_configs(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if "large" in model_name:
_SCREAMING_SNAKE_CASE = 7_68
return config
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> Dict:
"""simple docstring"""
# text encoder
if name == "token_embedding.weight":
_SCREAMING_SNAKE_CASE = name.replace("""token_embedding.weight""" , """text_model.embeddings.token_embedding.weight""" )
if name == "positional_embedding":
_SCREAMING_SNAKE_CASE = name.replace("""positional_embedding""" , """text_model.embeddings.position_embedding.weight""" )
if "ln_1" in name:
_SCREAMING_SNAKE_CASE = name.replace("""ln_1""" , """layer_norm1""" )
if "ln_2" in name:
_SCREAMING_SNAKE_CASE = name.replace("""ln_2""" , """layer_norm2""" )
if "c_fc" in name:
_SCREAMING_SNAKE_CASE = name.replace("""c_fc""" , """fc1""" )
if "c_proj" in name:
_SCREAMING_SNAKE_CASE = name.replace("""c_proj""" , """fc2""" )
if name.startswith("""transformer.resblocks""" ):
_SCREAMING_SNAKE_CASE = name.replace("""transformer.resblocks""" , """text_model.encoder.layers""" )
if "attn.out_proj" in name and "message" not in name:
_SCREAMING_SNAKE_CASE = name.replace("""attn.out_proj""" , """self_attn.out_proj""" )
if "ln_final" in name:
_SCREAMING_SNAKE_CASE = name.replace("""ln_final""" , """text_model.final_layer_norm""" )
# visual encoder
if name == "visual.class_embedding":
_SCREAMING_SNAKE_CASE = name.replace("""visual.class_embedding""" , """vision_model.embeddings.class_embedding""" )
if name == "visual.positional_embedding":
_SCREAMING_SNAKE_CASE = name.replace("""visual.positional_embedding""" , """vision_model.embeddings.position_embedding.weight""" )
if name.startswith("""visual.transformer.resblocks""" ):
_SCREAMING_SNAKE_CASE = name.replace("""visual.transformer.resblocks""" , """vision_model.encoder.layers""" )
if "visual.conv1" in name:
_SCREAMING_SNAKE_CASE = name.replace("""visual.conv1""" , """vision_model.embeddings.patch_embedding""" )
if "visual.ln_pre" in name:
_SCREAMING_SNAKE_CASE = name.replace("""visual.ln_pre""" , """vision_model.pre_layernorm""" )
if "visual.ln_post" in name:
_SCREAMING_SNAKE_CASE = name.replace("""visual.ln_post""" , """vision_model.post_layernorm""" )
if "visual.proj" in name:
_SCREAMING_SNAKE_CASE = name.replace("""visual.proj""" , """visual_projection.weight""" )
if "text_projection" in name:
_SCREAMING_SNAKE_CASE = name.replace("""text_projection""" , """text_projection.weight""" )
# things on top
if "prompts_visual_proj" in name:
_SCREAMING_SNAKE_CASE = name.replace("""prompts_visual_proj""" , """prompts_visual_projection""" )
if "prompts_visual_ln" in name:
_SCREAMING_SNAKE_CASE = name.replace("""prompts_visual_ln""" , """prompts_visual_layernorm""" )
# mit
if name == "mit.positional_embedding":
_SCREAMING_SNAKE_CASE = name.replace("""positional""" , """position""" )
if name.startswith("""mit.resblocks""" ):
_SCREAMING_SNAKE_CASE = name.replace("""mit.resblocks""" , """mit.encoder.layers""" )
# prompts generator
if name.startswith("""prompts_generator.norm""" ):
_SCREAMING_SNAKE_CASE = name.replace("""prompts_generator.norm""" , """prompts_generator.layernorm""" )
return name
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
_SCREAMING_SNAKE_CASE = orig_state_dict.pop(SCREAMING_SNAKE_CASE_ )
if "attn.in_proj" in key:
_SCREAMING_SNAKE_CASE = key.split(""".""" )
if key.startswith("""visual""" ):
_SCREAMING_SNAKE_CASE = key_split[3]
_SCREAMING_SNAKE_CASE = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
_SCREAMING_SNAKE_CASE = val[
:dim, :
]
_SCREAMING_SNAKE_CASE = val[
dim : dim * 2, :
]
_SCREAMING_SNAKE_CASE = val[
-dim:, :
]
else:
_SCREAMING_SNAKE_CASE = val[
:dim
]
_SCREAMING_SNAKE_CASE = val[
dim : dim * 2
]
_SCREAMING_SNAKE_CASE = val[
-dim:
]
else:
if "weight" in key:
_SCREAMING_SNAKE_CASE = val[
:dim, :
]
_SCREAMING_SNAKE_CASE = val[
dim : dim * 2, :
]
_SCREAMING_SNAKE_CASE = val[
-dim:, :
]
else:
_SCREAMING_SNAKE_CASE = val[:dim]
_SCREAMING_SNAKE_CASE = val[
dim : dim * 2
]
_SCREAMING_SNAKE_CASE = val[-dim:]
elif key.startswith("""mit""" ):
_SCREAMING_SNAKE_CASE = key_split[2]
_SCREAMING_SNAKE_CASE = config.vision_config.mit_hidden_size
if "weight" in key:
_SCREAMING_SNAKE_CASE = val[:dim, :]
_SCREAMING_SNAKE_CASE = val[dim : dim * 2, :]
_SCREAMING_SNAKE_CASE = val[-dim:, :]
else:
_SCREAMING_SNAKE_CASE = val[:dim]
_SCREAMING_SNAKE_CASE = val[dim : dim * 2]
_SCREAMING_SNAKE_CASE = val[-dim:]
else:
_SCREAMING_SNAKE_CASE = key_split[2]
_SCREAMING_SNAKE_CASE = config.text_config.hidden_size
if "weight" in key:
_SCREAMING_SNAKE_CASE = val[:dim, :]
_SCREAMING_SNAKE_CASE = val[
dim : dim * 2, :
]
_SCREAMING_SNAKE_CASE = val[-dim:, :]
else:
_SCREAMING_SNAKE_CASE = val[:dim]
_SCREAMING_SNAKE_CASE = val[
dim : dim * 2
]
_SCREAMING_SNAKE_CASE = val[-dim:]
else:
_SCREAMING_SNAKE_CASE = rename_key(SCREAMING_SNAKE_CASE_ )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
_SCREAMING_SNAKE_CASE = val.T
_SCREAMING_SNAKE_CASE = val
return orig_state_dict
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
"""simple docstring"""
if num_frames == 8:
_SCREAMING_SNAKE_CASE = """eating_spaghetti_8_frames.npy"""
elif num_frames == 16:
_SCREAMING_SNAKE_CASE = """eating_spaghetti.npy"""
elif num_frames == 32:
_SCREAMING_SNAKE_CASE = """eating_spaghetti_32_frames.npy"""
_SCREAMING_SNAKE_CASE = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""" , filename=SCREAMING_SNAKE_CASE_ , repo_type="""dataset""" , )
_SCREAMING_SNAKE_CASE = np.load(SCREAMING_SNAKE_CASE_ )
return list(SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=False ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = {
# fully supervised kinetics-400 checkpoints
"""xclip-base-patch32""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth""",
"""xclip-base-patch32-16-frames""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth"""
),
"""xclip-base-patch16""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth""",
"""xclip-base-patch16-16-frames""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth"""
),
"""xclip-large-patch14""": """https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb""",
"""xclip-large-patch14-16-frames""": """https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f""",
# fully supervised kinetics-600 checkpoints
"""xclip-base-patch16-kinetics-600""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth"""
),
"""xclip-base-patch16-kinetics-600-16-frames""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth"""
),
"""xclip-large-patch14-kinetics-600""": """https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be""",
# few shot
"""xclip-base-patch16-hmdb-2-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth"""
),
"""xclip-base-patch16-hmdb-4-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth"""
),
"""xclip-base-patch16-hmdb-8-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth"""
),
"""xclip-base-patch16-hmdb-16-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth"""
),
"""xclip-base-patch16-ucf-2-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth"""
),
"""xclip-base-patch16-ucf-4-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth"""
),
"""xclip-base-patch16-ucf-8-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth"""
),
"""xclip-base-patch16-ucf-16-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth"""
),
# zero shot
"""xclip-base-patch16-zero-shot""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth""",
}
_SCREAMING_SNAKE_CASE = model_to_url[model_name]
_SCREAMING_SNAKE_CASE = 8
if "16-frames" in model_name:
_SCREAMING_SNAKE_CASE = 16
elif "shot" in model_name:
_SCREAMING_SNAKE_CASE = 32
_SCREAMING_SNAKE_CASE = get_xclip_config(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = XCLIPModel(SCREAMING_SNAKE_CASE_ )
model.eval()
if "drive" in checkpoint_url:
_SCREAMING_SNAKE_CASE = """pytorch_model.bin"""
gdown.cached_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , quiet=SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = torch.load(SCREAMING_SNAKE_CASE_ , map_location="""cpu""" )["""model"""]
else:
_SCREAMING_SNAKE_CASE = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE_ )["""model"""]
_SCREAMING_SNAKE_CASE = convert_state_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = XCLIPModel(SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = model.load_state_dict(SCREAMING_SNAKE_CASE_ , strict=SCREAMING_SNAKE_CASE_ )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
_SCREAMING_SNAKE_CASE = 3_36 if model_name == """xclip-large-patch14-16-frames""" else 2_24
_SCREAMING_SNAKE_CASE = VideoMAEImageProcessor(size=SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained("""openai/clip-vit-base-patch32""" )
_SCREAMING_SNAKE_CASE = CLIPTokenizerFast.from_pretrained("""openai/clip-vit-base-patch32""" )
_SCREAMING_SNAKE_CASE = XCLIPProcessor(image_processor=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = prepare_video(SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = processor(
text=["""playing sports""", """eating spaghetti""", """go shopping"""] , videos=SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" , padding=SCREAMING_SNAKE_CASE_ )
print("""Shape of pixel values:""" , inputs.pixel_values.shape )
with torch.no_grad():
_SCREAMING_SNAKE_CASE = model(**SCREAMING_SNAKE_CASE_ )
# Verify outputs
_SCREAMING_SNAKE_CASE = outputs.logits_per_video
_SCREAMING_SNAKE_CASE = logits_per_video.softmax(dim=1 )
print("""Probs:""" , SCREAMING_SNAKE_CASE_ )
# kinetics-400
if model_name == "xclip-base-patch32":
_SCREAMING_SNAKE_CASE = torch.tensor([[0.0019, 0.9951, 0.0030]] )
elif model_name == "xclip-base-patch32-16-frames":
_SCREAMING_SNAKE_CASE = torch.tensor([[7.0999e-04, 9.9883e-01, 4.5580e-04]] )
elif model_name == "xclip-base-patch16":
_SCREAMING_SNAKE_CASE = torch.tensor([[0.0083, 0.9681, 0.0236]] )
elif model_name == "xclip-base-patch16-16-frames":
_SCREAMING_SNAKE_CASE = torch.tensor([[7.6937e-04, 9.9728e-01, 1.9473e-03]] )
elif model_name == "xclip-large-patch14":
_SCREAMING_SNAKE_CASE = torch.tensor([[0.0062, 0.9864, 0.0075]] )
elif model_name == "xclip-large-patch14-16-frames":
_SCREAMING_SNAKE_CASE = torch.tensor([[3.3877e-04, 9.9937e-01, 2.8888e-04]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
_SCREAMING_SNAKE_CASE = torch.tensor([[0.0555, 0.8914, 0.0531]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
_SCREAMING_SNAKE_CASE = torch.tensor([[3.8554e-04, 9.9929e-01, 3.2754e-04]] )
elif model_name == "xclip-large-patch14-kinetics-600":
_SCREAMING_SNAKE_CASE = torch.tensor([[0.0036, 0.9920, 0.0045]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
_SCREAMING_SNAKE_CASE = torch.tensor([[7.1890e-06, 9.9994e-01, 5.6559e-05]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
_SCREAMING_SNAKE_CASE = torch.tensor([[1.0320e-05, 9.9993e-01, 6.2435e-05]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
_SCREAMING_SNAKE_CASE = torch.tensor([[4.1377e-06, 9.9990e-01, 9.8386e-05]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
_SCREAMING_SNAKE_CASE = torch.tensor([[4.1347e-05, 9.9962e-01, 3.3411e-04]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
_SCREAMING_SNAKE_CASE = torch.tensor([[8.5857e-05, 9.9928e-01, 6.3291e-04]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
_SCREAMING_SNAKE_CASE = torch.tensor([[8.5857e-05, 9.9928e-01, 6.3291e-04]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
_SCREAMING_SNAKE_CASE = torch.tensor([[0.0027, 0.9904, 0.0070]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
_SCREAMING_SNAKE_CASE = torch.tensor([[9.8219e-04, 9.9593e-01, 3.0863e-03]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
_SCREAMING_SNAKE_CASE = torch.tensor([[3.5082e-04, 9.9785e-01, 1.7966e-03]] )
else:
raise ValueError(F"Model name {model_name} not supported" )
assert torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1e-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(F"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
if push_to_hub:
print("""Pushing model, processor and slow tokenizer files to the hub...""" )
model.push_to_hub(SCREAMING_SNAKE_CASE_ , organization="""nielsr""" )
processor.push_to_hub(SCREAMING_SNAKE_CASE_ , organization="""nielsr""" )
slow_tokenizer.push_to_hub(SCREAMING_SNAKE_CASE_ , organization="""nielsr""" )
if __name__ == "__main__":
UpperCamelCase__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="xclip-base-patch32",
type=str,
help="Name of the model.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the ๐ค hub."
)
UpperCamelCase__ : str = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 0 | 0 |
"""simple docstring"""
from io import BytesIO
from typing import List, Union
import requests
from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_decord_available():
import numpy as np
from decord import VideoReader
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
@add_end_docstrings(A__ )
class lowerCAmelCase_ ( A__ ):
'''simple docstring'''
def __init__( self , *snake_case_ , **snake_case_ ) -> Tuple:
super().__init__(*snake_case_ , **snake_case_ )
requires_backends(self , """decord""" )
self.check_model_type(snake_case_ )
def A__ ( self , snake_case_=None , snake_case_=None , snake_case_=None ) -> Dict:
__lowerCAmelCase = {}
if frame_sampling_rate is not None:
__lowerCAmelCase = frame_sampling_rate
if num_frames is not None:
__lowerCAmelCase = num_frames
__lowerCAmelCase = {}
if top_k is not None:
__lowerCAmelCase = top_k
return preprocess_params, {}, postprocess_params
def __call__( self , snake_case_ , **snake_case_ ) -> List[str]:
return super().__call__(snake_case_ , **snake_case_ )
def A__ ( self , snake_case_ , snake_case_=None , snake_case_=1 ) -> Optional[Any]:
if num_frames is None:
__lowerCAmelCase = self.model.config.num_frames
if video.startswith("""http://""" ) or video.startswith("""https://""" ):
__lowerCAmelCase = BytesIO(requests.get(snake_case_ ).content )
__lowerCAmelCase = VideoReader(snake_case_ )
videoreader.seek(0 )
__lowerCAmelCase = 0
__lowerCAmelCase = num_frames * frame_sampling_rate - 1
__lowerCAmelCase = np.linspace(snake_case_ , snake_case_ , num=snake_case_ , dtype=np.intaa )
__lowerCAmelCase = videoreader.get_batch(snake_case_ ).asnumpy()
__lowerCAmelCase = list(snake_case_ )
__lowerCAmelCase = self.image_processor(snake_case_ , return_tensors=self.framework )
return model_inputs
def A__ ( self , snake_case_ ) -> int:
__lowerCAmelCase = self.model(**snake_case_ )
return model_outputs
def A__ ( self , snake_case_ , snake_case_=5 ) -> List[str]:
if top_k > self.model.config.num_labels:
__lowerCAmelCase = self.model.config.num_labels
if self.framework == "pt":
__lowerCAmelCase = model_outputs.logits.softmax(-1 )[0]
__lowerCAmelCase , __lowerCAmelCase = probs.topk(snake_case_ )
else:
raise ValueError(f"""Unsupported framework: {self.framework}""" )
__lowerCAmelCase = scores.tolist()
__lowerCAmelCase = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(snake_case_ , snake_case_ )]
| 465 |
"""simple docstring"""
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def lowercase (_lowerCAmelCase ):
if is_torch_version("""<""" , """2.0.0""" ) or not hasattr(_lowerCAmelCase , """_dynamo""" ):
return False
return isinstance(_lowerCAmelCase , torch._dynamo.eval_frame.OptimizedModule )
def lowercase (_lowerCAmelCase , _lowerCAmelCase = True ):
__lowerCAmelCase = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
__lowerCAmelCase = is_compiled_module(_lowerCAmelCase )
if is_compiled:
__lowerCAmelCase = model
__lowerCAmelCase = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(_lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = model.module
if not keep_fpaa_wrapper:
__lowerCAmelCase = getattr(_lowerCAmelCase , """forward""" )
__lowerCAmelCase = model.__dict__.pop("""_original_forward""" , _lowerCAmelCase )
if original_forward is not None:
while hasattr(_lowerCAmelCase , """__wrapped__""" ):
__lowerCAmelCase = forward.__wrapped__
if forward == original_forward:
break
__lowerCAmelCase = forward
if getattr(_lowerCAmelCase , """_converted_to_transformer_engine""" , _lowerCAmelCase ):
convert_model(_lowerCAmelCase , to_transformer_engine=_lowerCAmelCase )
if is_compiled:
__lowerCAmelCase = model
__lowerCAmelCase = compiled_model
return model
def lowercase ():
PartialState().wait_for_everyone()
def lowercase (_lowerCAmelCase , _lowerCAmelCase ):
if PartialState().distributed_type == DistributedType.TPU:
xm.save(_lowerCAmelCase , _lowerCAmelCase )
elif PartialState().local_process_index == 0:
torch.save(_lowerCAmelCase , _lowerCAmelCase )
@contextmanager
def lowercase (**_lowerCAmelCase ):
for key, value in kwargs.items():
__lowerCAmelCase = str(_lowerCAmelCase )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def lowercase (_lowerCAmelCase ):
if not hasattr(_lowerCAmelCase , """__qualname__""" ) and not hasattr(_lowerCAmelCase , """__name__""" ):
__lowerCAmelCase = getattr(_lowerCAmelCase , """__class__""" , _lowerCAmelCase )
if hasattr(_lowerCAmelCase , """__qualname__""" ):
return obj.__qualname__
if hasattr(_lowerCAmelCase , """__name__""" ):
return obj.__name__
return str(_lowerCAmelCase )
def lowercase (_lowerCAmelCase , _lowerCAmelCase ):
for key, value in source.items():
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = destination.setdefault(_lowerCAmelCase , {} )
merge_dicts(_lowerCAmelCase , _lowerCAmelCase )
else:
__lowerCAmelCase = value
return destination
def lowercase (_lowerCAmelCase = None ):
if port is None:
__lowerCAmelCase = 2_9500
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(("""localhost""", port) ) == 0
| 465 | 1 |
'''simple docstring'''
from __future__ import annotations
def _snake_case ( A , A ) -> List[str]:
# Checks if the entire collection has been sorted
if len(A ) <= 1 or n <= 1:
return
insert_next(A , n - 1 )
rec_insertion_sort(A , n - 1 )
def _snake_case ( A , A ) -> Union[str, Any]:
# Checks order between adjacent elements
if index >= len(A ) or collection[index - 1] <= collection[index]:
return
# Swaps adjacent elements since they are not in ascending order
lowerCAmelCase__ , lowerCAmelCase__ = (
collection[index],
collection[index - 1],
)
insert_next(A , index + 1 )
if __name__ == "__main__":
__UpperCAmelCase = input('''Enter integers separated by spaces: ''')
__UpperCAmelCase = [int(num) for num in numbers.split()]
rec_insertion_sort(number_list, len(number_list))
print(number_list)
| 701 |
'''simple docstring'''
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def _snake_case ( A , A ) -> List[Any]:
lowerCAmelCase__ = []
for part_id in partition_order:
lowerCAmelCase__ = df.where(F"""SPARK_PARTITION_ID() = {part_id}""" ).collect()
for row_idx, row in enumerate(A ):
expected_row_ids_and_row_dicts.append((F"""{part_id}_{row_idx}""", row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def _snake_case ( ) -> Tuple:
lowerCAmelCase__ = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
lowerCAmelCase__ = spark.range(100 ).repartition(1 )
lowerCAmelCase__ = Spark(A )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def _snake_case ( ) -> Optional[int]:
lowerCAmelCase__ = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
lowerCAmelCase__ = spark.range(10 ).repartition(2 )
lowerCAmelCase__ = [1, 0]
lowerCAmelCase__ = _generate_iterable_examples(A , A ) # Reverse the partitions.
lowerCAmelCase__ = _get_expected_row_ids_and_row_dicts_for_partition_order(A , A )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
lowerCAmelCase__ , lowerCAmelCase__ = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _snake_case ( ) -> Optional[Any]:
lowerCAmelCase__ = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
lowerCAmelCase__ = spark.range(10 ).repartition(1 )
lowerCAmelCase__ = SparkExamplesIterable(A )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(A ):
assert row_id == F"""0_{i}"""
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def _snake_case ( ) -> Union[str, Any]:
lowerCAmelCase__ = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
lowerCAmelCase__ = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch('''numpy.random.Generator''' ) as generator_mock:
lowerCAmelCase__ = lambda A : x.reverse()
lowerCAmelCase__ = _get_expected_row_ids_and_row_dicts_for_partition_order(A , [2, 1, 0] )
lowerCAmelCase__ = SparkExamplesIterable(A ).shuffle_data_sources(A )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(A ):
lowerCAmelCase__ , lowerCAmelCase__ = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _snake_case ( ) -> Dict:
lowerCAmelCase__ = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
lowerCAmelCase__ = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
lowerCAmelCase__ = SparkExamplesIterable(A ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
lowerCAmelCase__ = _get_expected_row_ids_and_row_dicts_for_partition_order(A , [0, 2] )
for i, (row_id, row_dict) in enumerate(A ):
lowerCAmelCase__ , lowerCAmelCase__ = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
lowerCAmelCase__ = SparkExamplesIterable(A ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
lowerCAmelCase__ = _get_expected_row_ids_and_row_dicts_for_partition_order(A , [1, 3] )
for i, (row_id, row_dict) in enumerate(A ):
lowerCAmelCase__ , lowerCAmelCase__ = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _snake_case ( ) -> Dict:
lowerCAmelCase__ = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
lowerCAmelCase__ = spark.range(100 ).repartition(1 )
lowerCAmelCase__ = Spark(A )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 100 | 98 | 0 |
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class a__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
__lowerCamelCase = IFInpaintingPipeline
__lowerCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'width', 'height'}
__lowerCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__lowerCamelCase = PipelineTesterMixin.required_optional_params - {'latents'}
def UpperCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
return self._get_dummy_components()
def UpperCamelCase ( self , lowercase , lowercase=0 ) -> List[str]:
'''simple docstring'''
if str(lowercase ).startswith("mps" ):
A__ = torch.manual_seed(lowercase )
else:
A__ = torch.Generator(device=lowercase ).manual_seed(lowercase )
A__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowercase ) ).to(lowercase )
A__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowercase ) ).to(lowercase )
A__ = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"mask_image": mask_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def UpperCamelCase ( self ) -> Dict:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def UpperCamelCase ( self ) -> int:
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def UpperCamelCase ( self ) -> str:
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1e-1 )
def UpperCamelCase ( self ) -> str:
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def UpperCamelCase ( self ) -> List[str]:
'''simple docstring'''
self._test_save_load_local()
def UpperCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 514 |
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def A ( UpperCamelCase_ : List[Any] ) -> Tuple:
'''simple docstring'''
if "img_encoder.pos_embed" in name:
lowerCAmelCase__ = name.replace("img_encoder.pos_embed" , "vision_model.embeddings.position_embeddings" )
if "img_encoder.patch_embed.proj" in name:
lowerCAmelCase__ = name.replace("img_encoder.patch_embed.proj" , "vision_model.embeddings.patch_embeddings.projection" )
if "img_encoder.patch_embed.norm" in name:
lowerCAmelCase__ = name.replace("img_encoder.patch_embed.norm" , "vision_model.embeddings.layernorm" )
if "img_encoder.layers" in name:
lowerCAmelCase__ = name.replace("img_encoder.layers" , "vision_model.encoder.stages" )
if "blocks" in name and "res" not in name:
lowerCAmelCase__ = name.replace("blocks" , "layers" )
if "attn" in name and "pre_assign" not in name:
lowerCAmelCase__ = name.replace("attn" , "self_attn" )
if "proj" in name and "self_attn" in name and "text" not in name:
lowerCAmelCase__ = name.replace("proj" , "out_proj" )
if "pre_assign_attn.attn.proj" in name:
lowerCAmelCase__ = name.replace("pre_assign_attn.attn.proj" , "pre_assign_attn.attn.out_proj" )
if "norm1" in name:
lowerCAmelCase__ = name.replace("norm1" , "layer_norm1" )
if "norm2" in name and "pre_assign" not in name:
lowerCAmelCase__ = name.replace("norm2" , "layer_norm2" )
if "img_encoder.norm" in name:
lowerCAmelCase__ = name.replace("img_encoder.norm" , "vision_model.layernorm" )
# text encoder
if "text_encoder.token_embedding" in name:
lowerCAmelCase__ = name.replace("text_encoder.token_embedding" , "text_model.embeddings.token_embedding" )
if "text_encoder.positional_embedding" in name:
lowerCAmelCase__ = name.replace("text_encoder.positional_embedding" , "text_model.embeddings.position_embedding.weight" )
if "text_encoder.transformer.resblocks." in name:
lowerCAmelCase__ = name.replace("text_encoder.transformer.resblocks." , "text_model.encoder.layers." )
if "ln_1" in name:
lowerCAmelCase__ = name.replace("ln_1" , "layer_norm1" )
if "ln_2" in name:
lowerCAmelCase__ = name.replace("ln_2" , "layer_norm2" )
if "c_fc" in name:
lowerCAmelCase__ = name.replace("c_fc" , "fc1" )
if "c_proj" in name:
lowerCAmelCase__ = name.replace("c_proj" , "fc2" )
if "text_encoder" in name:
lowerCAmelCase__ = name.replace("text_encoder" , "text_model" )
if "ln_final" in name:
lowerCAmelCase__ = name.replace("ln_final" , "final_layer_norm" )
# projection layers
if "img_projector.linear_hidden." in name:
lowerCAmelCase__ = name.replace("img_projector.linear_hidden." , "visual_projection." )
if "img_projector.linear_out." in name:
lowerCAmelCase__ = name.replace("img_projector.linear_out." , "visual_projection.3." )
if "text_projector.linear_hidden" in name:
lowerCAmelCase__ = name.replace("text_projector.linear_hidden" , "text_projection" )
if "text_projector.linear_out" in name:
lowerCAmelCase__ = name.replace("text_projector.linear_out" , "text_projection.3" )
return name
def A ( UpperCamelCase_ : str , UpperCamelCase_ : str ) -> List[Any]:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
lowerCAmelCase__ = orig_state_dict.pop(UpperCamelCase_ )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
lowerCAmelCase__ = key.split("." )
lowerCAmelCase__ ,lowerCAmelCase__ = int(key_split[2] ), int(key_split[4] )
lowerCAmelCase__ = config.vision_config.hidden_size
if "weight" in key:
lowerCAmelCase__ = val[:dim, :]
lowerCAmelCase__ = val[dim : dim * 2, :]
lowerCAmelCase__ = val[-dim:, :]
else:
lowerCAmelCase__ = val[:dim]
lowerCAmelCase__ = val[dim : dim * 2]
lowerCAmelCase__ = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
lowerCAmelCase__ = key.split("." )
lowerCAmelCase__ = int(key_split[3] )
lowerCAmelCase__ = config.text_config.hidden_size
if "weight" in key:
lowerCAmelCase__ = val[:dim, :]
lowerCAmelCase__ = val[
dim : dim * 2, :
]
lowerCAmelCase__ = val[-dim:, :]
else:
lowerCAmelCase__ = val[:dim]
lowerCAmelCase__ = val[dim : dim * 2]
lowerCAmelCase__ = val[-dim:]
else:
lowerCAmelCase__ = rename_key(UpperCamelCase_ )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
lowerCAmelCase__ = val.squeeze_()
else:
lowerCAmelCase__ = val
return orig_state_dict
def A ( ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCAmelCase__ = Image.open(requests.get(UpperCamelCase_ , stream=UpperCamelCase_ ).raw )
return im
@torch.no_grad()
def A ( UpperCamelCase_ : List[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Tuple="groupvit-gcc-yfcc" , UpperCamelCase_ : Dict=False ) -> Any:
'''simple docstring'''
lowerCAmelCase__ = GroupViTConfig()
lowerCAmelCase__ = GroupViTModel(UpperCamelCase_ ).eval()
lowerCAmelCase__ = torch.load(UpperCamelCase_ , map_location="cpu" )["model"]
lowerCAmelCase__ = convert_state_dict(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ ,lowerCAmelCase__ = model.load_state_dict(UpperCamelCase_ , strict=UpperCamelCase_ )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(UpperCamelCase_ ) == 0)
# verify result
lowerCAmelCase__ = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32" )
lowerCAmelCase__ = prepare_img()
lowerCAmelCase__ = processor(text=["a photo of a cat", "a photo of a dog"] , images=UpperCamelCase_ , padding=UpperCamelCase_ , return_tensors="pt" )
with torch.no_grad():
lowerCAmelCase__ = model(**UpperCamelCase_ )
if model_name == "groupvit-gcc-yfcc":
lowerCAmelCase__ = torch.tensor([[13.3_523, 6.3_629]] )
elif model_name == "groupvit-gcc-redcaps":
lowerCAmelCase__ = torch.tensor([[16.1_873, 8.6_230]] )
else:
raise ValueError(F"""Model name {model_name} not supported.""" )
assert torch.allclose(outputs.logits_per_image , UpperCamelCase_ , atol=1E-3 )
processor.save_pretrained(UpperCamelCase_ )
model.save_pretrained(UpperCamelCase_ )
print("Successfully saved processor and model to" , UpperCamelCase_ )
if push_to_hub:
print("Pushing to the hub..." )
processor.push_to_hub(UpperCamelCase_ , organization="nielsr" )
model.push_to_hub(UpperCamelCase_ , organization="nielsr" )
if __name__ == "__main__":
UpperCAmelCase__ : List[str] = argparse.ArgumentParser()
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to dump the processor and PyTorch model."
)
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to GroupViT checkpoint")
parser.add_argument(
"--model_name",
default="groupvit-gccy-fcc",
type=str,
help="Name of the model. Expecting either 'groupvit-gcc-yfcc' or 'groupvit-gcc-redcaps'",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether or not to push the converted model and processor to the ๐ค hub using the provided `model_name`.",
)
UpperCAmelCase__ : Any = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 48 | 0 |
"""simple docstring"""
from __future__ import annotations
import pandas as pd
def lowercase ( _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
_UpperCAmelCase = [0] * no_of_processes
_UpperCAmelCase = [0] * no_of_processes
# Copy the burst time into remaining_time[]
for i in range(_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = burst_time[i]
_UpperCAmelCase = 0
_UpperCAmelCase = 0
_UpperCAmelCase = 9_9999_9999
_UpperCAmelCase = 0
_UpperCAmelCase = False
# Process until all processes are completed
while complete != no_of_processes:
for j in range(_SCREAMING_SNAKE_CASE ):
if arrival_time[j] <= increment_time and remaining_time[j] > 0:
if remaining_time[j] < minm:
_UpperCAmelCase = remaining_time[j]
_UpperCAmelCase = j
_UpperCAmelCase = True
if not check:
increment_time += 1
continue
remaining_time[short] -= 1
_UpperCAmelCase = remaining_time[short]
if minm == 0:
_UpperCAmelCase = 9_9999_9999
if remaining_time[short] == 0:
complete += 1
_UpperCAmelCase = False
# Find finish time of current process
_UpperCAmelCase = increment_time + 1
# Calculate waiting time
_UpperCAmelCase = finish_time - arrival_time[short]
_UpperCAmelCase = finar - burst_time[short]
if waiting_time[short] < 0:
_UpperCAmelCase = 0
# Increment time
increment_time += 1
return waiting_time
def lowercase ( _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : list[int] ):
'''simple docstring'''
_UpperCAmelCase = [0] * no_of_processes
for i in range(_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = burst_time[i] + waiting_time[i]
return turn_around_time
def lowercase ( _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
_UpperCAmelCase = 0
_UpperCAmelCase = 0
for i in range(_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = total_waiting_time + waiting_time[i]
_UpperCAmelCase = total_turn_around_time + turn_around_time[i]
print(f'Average waiting time = {total_waiting_time / no_of_processes:.5f}' )
print('''Average turn around time =''' , total_turn_around_time / no_of_processes )
if __name__ == "__main__":
print("Enter how many process you want to analyze")
__A : Union[str, Any] = int(input())
__A : Optional[int] = [0] * no_of_processes
__A : List[str] = [0] * no_of_processes
__A : int = list(range(1, no_of_processes + 1))
for i in range(no_of_processes):
print("Enter the arrival time and burst time for process:--" + str(i + 1))
__A : Optional[Any] = map(int, input().split())
__A : List[Any] = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
__A : Any = burst_time
__A : List[Any] = no_of_processes
__A : Union[str, Any] = waiting_time
__A : Optional[int] = calculate_turnaroundtime(bt, n, wt)
calculate_average_times(waiting_time, turn_around_time, no_of_processes)
__A : Any = pd.DataFrame(
list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)),
columns=[
"Process",
"BurstTime",
"ArrivalTime",
"WaitingTime",
"TurnAroundTime",
],
)
# Printing the dataFrame
pd.set_option("display.max_rows", fcfs.shape[0] + 1)
print(fcfs)
| 721 |
"""simple docstring"""
class _a :
"""simple docstring"""
def __init__( self : Tuple , __UpperCamelCase : list[int] )->None:
_UpperCAmelCase = len(__UpperCamelCase )
_UpperCAmelCase = [0] * len_array
if len_array > 0:
_UpperCAmelCase = array[0]
for i in range(1 , __UpperCamelCase ):
_UpperCAmelCase = self.prefix_sum[i - 1] + array[i]
def lowercase__ ( self : str , __UpperCamelCase : int , __UpperCamelCase : int )->int:
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def lowercase__ ( self : Any , __UpperCamelCase : int )->bool:
_UpperCAmelCase = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(__UpperCamelCase )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 95 | 0 |
"""simple docstring"""
def lowerCamelCase_ (UpperCamelCase__ : str ):
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 506 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
_lowerCAmelCase :Tuple = logging.get_logger(__name__)
_lowerCAmelCase :Union[str, Any] = {
'Intel/dpt-large': 'https://huggingface.co/Intel/dpt-large/resolve/main/config.json',
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class _UpperCAmelCase ( a ):
'''simple docstring'''
a__ ='''dpt'''
def __init__( self , A=7_6_8 , A=1_2 , A=1_2 , A=3_0_7_2 , A="gelu" , A=0.0 , A=0.0 , A=0.02 , A=1E-12 , A=3_8_4 , A=1_6 , A=3 , A=False , A=True , A=[2, 5, 8, 1_1] , A="project" , A=[4, 2, 1, 0.5] , A=[9_6, 1_9_2, 3_8_4, 7_6_8] , A=2_5_6 , A=-1 , A=False , A=True , A=0.4 , A=2_5_5 , A=0.1 , A=[1, 1_0_2_4, 2_4, 2_4] , A=[0, 1] , A=None , **A , ) -> int:
super().__init__(**A )
_UpperCAmelCase : Union[str, Any] = hidden_size
_UpperCAmelCase : Union[str, Any] = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info('''Initializing the config with a `BiT` backbone.''' )
_UpperCAmelCase : List[Any] = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
}
_UpperCAmelCase : int = BitConfig(**A )
elif isinstance(A , A ):
logger.info('''Initializing the config with a `BiT` backbone.''' )
_UpperCAmelCase : Union[str, Any] = BitConfig(**A )
elif isinstance(A , A ):
_UpperCAmelCase : Tuple = backbone_config
else:
raise ValueError(
f'backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.' )
_UpperCAmelCase : int = backbone_featmap_shape
_UpperCAmelCase : Dict = neck_ignore_stages
if readout_type != "project":
raise ValueError('''Readout type must be \'project\' when using `DPT-hybrid` mode.''' )
else:
_UpperCAmelCase : Tuple = None
_UpperCAmelCase : Any = None
_UpperCAmelCase : Any = []
_UpperCAmelCase : str = num_hidden_layers
_UpperCAmelCase : Any = num_attention_heads
_UpperCAmelCase : Dict = intermediate_size
_UpperCAmelCase : Optional[Any] = hidden_act
_UpperCAmelCase : List[str] = hidden_dropout_prob
_UpperCAmelCase : List[Any] = attention_probs_dropout_prob
_UpperCAmelCase : Union[str, Any] = initializer_range
_UpperCAmelCase : int = layer_norm_eps
_UpperCAmelCase : int = image_size
_UpperCAmelCase : int = patch_size
_UpperCAmelCase : str = num_channels
_UpperCAmelCase : Tuple = qkv_bias
_UpperCAmelCase : str = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError('''Readout_type must be one of [\'ignore\', \'add\', \'project\']''' )
_UpperCAmelCase : List[Any] = readout_type
_UpperCAmelCase : int = reassemble_factors
_UpperCAmelCase : int = neck_hidden_sizes
_UpperCAmelCase : Tuple = fusion_hidden_size
_UpperCAmelCase : Any = head_in_index
_UpperCAmelCase : Optional[int] = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
_UpperCAmelCase : List[str] = use_auxiliary_head
_UpperCAmelCase : int = auxiliary_loss_weight
_UpperCAmelCase : Any = semantic_loss_ignore_index
_UpperCAmelCase : List[Any] = semantic_classifier_dropout
def __lowerCAmelCase ( self ) -> str:
_UpperCAmelCase : Optional[Any] = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
_UpperCAmelCase : Any = self.backbone_config.to_dict()
_UpperCAmelCase : List[str] = self.__class__.model_type
return output
| 506 | 1 |
import math
def UpperCAmelCase__( __UpperCAmelCase : Any ):
assert isinstance(__snake_case , __snake_case ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
__snake_case : Optional[Any] = range(3 , int(math.sqrt(__snake_case ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def UpperCAmelCase__( __UpperCAmelCase : str , __UpperCAmelCase : int=1 , **__UpperCAmelCase : str ):
__snake_case : int = factor * value
__snake_case : Union[str, Any] = value
while not is_prime(__snake_case ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **__snake_case )
return value
| 707 | from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__UpperCAmelCase = 42
__UpperCAmelCase = None
# Automatically constructed
__UpperCAmelCase = "dict"
__UpperCAmelCase = None
__UpperCAmelCase = field(default="Translation" , init=UpperCamelCase , repr=UpperCamelCase)
def __call__( self ):
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def lowercase_ ( self ):
from .features import Value
return {k: Value('string' ) for k in sorted(self.languages )}
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__UpperCAmelCase = None
__UpperCAmelCase = None
__UpperCAmelCase = None
# Automatically constructed
__UpperCAmelCase = "dict"
__UpperCAmelCase = None
__UpperCAmelCase = field(default="TranslationVariableLanguages" , init=UpperCamelCase , repr=UpperCamelCase)
def lowercase_ ( self ):
__snake_case : List[str] = sorted(set(self.languages ) ) if self.languages else None
__snake_case : Optional[Any] = len(self.languages ) if self.languages else None
def __call__( self ):
return pa.struct({'language': pa.list_(pa.string() ), 'translation': pa.list_(pa.string() )} )
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : Optional[int] = set(self.languages )
if self.languages and set(_UpperCAmelCase ) - lang_set:
raise ValueError(
F"""Some languages in example ({", ".join(sorted(set(_UpperCAmelCase ) - lang_set ) )}) are not in valid set ({", ".join(_UpperCAmelCase )}).""" )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
__snake_case : Any = []
for lang, text in translation_dict.items():
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
__snake_case , __snake_case : Any = zip(*sorted(_UpperCAmelCase ) )
return {"language": languages, "translation": translations}
def lowercase_ ( self ):
from .features import Sequence, Value
return {
"language": Sequence(Value('string' ) ),
"translation": Sequence(Value('string' ) ),
}
| 679 | 0 |
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class __UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case_ ( self , __A ):
__a = 3
__a = 250
__a = ids_tensor((batch_size, length) , __A )
__a = torch.ones((batch_size, length) , device=__A , dtype=torch.float ) / length
return input_ids, scores
def snake_case_ ( self ):
__a , __a = self._get_tensors(5 )
__a = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(__A , __A ) )
__a , __a = self._get_tensors(9 )
self.assertFalse(criteria(__A , __A ) )
__a , __a = self._get_tensors(10 )
self.assertTrue(criteria(__A , __A ) )
def snake_case_ ( self ):
__a = MaxLengthCriteria(max_length=10 )
__a , __a = self._get_tensors(5 )
self.assertFalse(criteria(__A , __A ) )
__a , __a = self._get_tensors(9 )
self.assertFalse(criteria(__A , __A ) )
__a , __a = self._get_tensors(10 )
self.assertTrue(criteria(__A , __A ) )
def snake_case_ ( self ):
__a = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
__a , __a = self._get_tensors(5 )
self.assertFalse(criteria(__A , __A ) )
__a , __a = self._get_tensors(9 )
self.assertFalse(criteria(__A , __A ) )
__a , __a = self._get_tensors(10 )
self.assertTrue(criteria(__A , __A ) )
__a = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 10 )
def snake_case_ ( self ):
__a , __a = self._get_tensors(5 )
__a = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(__A , __A ) )
__a = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(__A , __A ) )
def snake_case_ ( self ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 )
with self.assertWarns(__A ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 )
__a = validate_stopping_criteria(StoppingCriteriaList() , 11 )
self.assertEqual(len(__A ) , 1 )
| 99 | import requests
__lowercase = '''''' # <-- Put your OpenWeatherMap appid here!
__lowercase = '''https://api.openweathermap.org/data/2.5/'''
def lowerCamelCase ( SCREAMING_SNAKE_CASE = "Chicago" , SCREAMING_SNAKE_CASE = APPID ):
'''simple docstring'''
return requests.get(URL_BASE + '''weather''' , params=locals() ).json()
def lowerCamelCase ( SCREAMING_SNAKE_CASE = "Kolkata, India" , SCREAMING_SNAKE_CASE = APPID ):
'''simple docstring'''
return requests.get(URL_BASE + '''forecast''' , params=locals() ).json()
def lowerCamelCase ( SCREAMING_SNAKE_CASE = 55.68 , SCREAMING_SNAKE_CASE = 12.57 , SCREAMING_SNAKE_CASE = APPID ):
'''simple docstring'''
return requests.get(URL_BASE + '''onecall''' , params=locals() ).json()
if __name__ == "__main__":
from pprint import pprint
while True:
__lowercase = input('''Enter a location:''').strip()
if location:
pprint(current_weather(location))
else:
break
| 167 | 0 |
import os
def UpperCAmelCase__( ):
with open(os.path.dirname(__UpperCAmelCase ) + '/grid.txt' ) as f:
__snake_case : str = [] # noqa: E741
for _ in range(20 ):
l.append([int(__UpperCAmelCase ) for x in f.readline().split()] )
__snake_case : Tuple = 0
# right
for i in range(20 ):
for j in range(17 ):
__snake_case : Tuple = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
__snake_case : Any = temp
# down
for i in range(17 ):
for j in range(20 ):
__snake_case : Any = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
__snake_case : Any = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
__snake_case : int = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
__snake_case : Optional[int] = temp
# diagonal 2
for i in range(17 ):
for j in range(3 , 20 ):
__snake_case : int = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
__snake_case : int = temp
return maximum
if __name__ == "__main__":
print(solution())
| 679 | def UpperCAmelCase__( __UpperCAmelCase : str ):
if not all(x.isalpha() for x in string ):
raise ValueError('String must only contain alphabetic characters.' )
__snake_case : str = sorted(string.lower() )
return len(__UpperCAmelCase ) == len(set(__UpperCAmelCase ) )
if __name__ == "__main__":
__magic_name__ = input('''Enter a string ''').strip()
__magic_name__ = is_isogram(input_str)
print(F'''{input_str} is {"an" if isogram else "not an"} isogram.''')
| 679 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'MIT/ast-finetuned-audioset-10-10-0.4593': (
'https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json'
),
}
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: int = 'audio-spectrogram-transformer'
def __init__( self : List[Any] , a : Optional[Any]=7_6_8 , a : List[str]=1_2 , a : int=1_2 , a : List[Any]=3_0_7_2 , a : int="gelu" , a : str=0.0 , a : Union[str, Any]=0.0 , a : List[Any]=0.02 , a : Dict=1e-12 , a : Union[str, Any]=1_6 , a : Tuple=True , a : List[str]=1_0 , a : Tuple=1_0 , a : List[str]=1_0_2_4 , a : Dict=1_2_8 , **a : Optional[Any] , ):
'''simple docstring'''
super().__init__(**a )
lowercase_ : Dict = hidden_size
lowercase_ : Dict = num_hidden_layers
lowercase_ : str = num_attention_heads
lowercase_ : Any = intermediate_size
lowercase_ : List[str] = hidden_act
lowercase_ : int = hidden_dropout_prob
lowercase_ : Tuple = attention_probs_dropout_prob
lowercase_ : Union[str, Any] = initializer_range
lowercase_ : Union[str, Any] = layer_norm_eps
lowercase_ : Optional[Any] = patch_size
lowercase_ : int = qkv_bias
lowercase_ : Optional[int] = frequency_stride
lowercase_ : List[Any] = time_stride
lowercase_ : Union[str, Any] = max_length
lowercase_ : Any = num_mel_bins
| 620 |
'''simple docstring'''
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
'The `inpainting.py` script is outdated. Please use directly `from diffusers import'
' StableDiffusionInpaintPipeline` instead.'
)
| 620 | 1 |
from __future__ import annotations
snake_case__ = 8.988E9 # units = N * m^s * C^-2
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> dict[str, float]:
'''simple docstring'''
_lowerCamelCase = abs(chargea * chargea )
if (force, chargea, chargea, distance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if distance < 0:
raise ValueError('''Distance cannot be negative''' )
if force == 0:
_lowerCamelCase = COULOMBS_CONSTANT * charge_product / (distance**2)
return {"force": force}
elif chargea == 0:
_lowerCamelCase = abs(__UpperCAmelCase ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge1": chargea}
elif chargea == 0:
_lowerCamelCase = abs(__UpperCAmelCase ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge2": chargea}
elif distance == 0:
_lowerCamelCase = (COULOMBS_CONSTANT * charge_product / abs(__UpperCAmelCase )) ** 0.5
return {"distance": distance}
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod() | 638 | import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(__lowercase ) , 'Tatoeba directory does not exist.' )
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
_lowerCamelCase = tempfile.mkdtemp()
return TatoebaConverter(save_dir=A_ )
@slow
def UpperCamelCase_ ( self ) -> Any:
"""simple docstring"""
self.resolver.convert_models(['''heb-eng'''] )
@slow
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase = self.resolver.write_model_card('''opus-mt-he-en''' , dry_run=A_ )
assert mmeta["long_pair"] == "heb-eng" | 638 | 1 |
def _A ( _lowercase ) -> int:
"""simple docstring"""
assert column_title.isupper()
__UpperCamelCase = 0
__UpperCamelCase = len(_lowercase ) - 1
__UpperCamelCase = 0
while index >= 0:
__UpperCamelCase = (ord(column_title[index] ) - 64) * pow(26 , _lowercase )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 1 |
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class __A( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = 3
UpperCamelCase__ = 2_50
UpperCamelCase__ = ids_tensor((batch_size, length) , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = torch.ones((batch_size, length) , device=SCREAMING_SNAKE_CASE_ , dtype=torch.float ) / length
return input_ids, scores
def UpperCAmelCase_ (self ):
UpperCamelCase__ , UpperCamelCase__ = self._get_tensors(5 )
UpperCamelCase__ = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase__ , UpperCamelCase__ = self._get_tensors(9 )
self.assertFalse(criteria(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase__ , UpperCamelCase__ = self._get_tensors(10 )
self.assertTrue(criteria(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = MaxLengthCriteria(max_length=10 )
UpperCamelCase__ , UpperCamelCase__ = self._get_tensors(5 )
self.assertFalse(criteria(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase__ , UpperCamelCase__ = self._get_tensors(9 )
self.assertFalse(criteria(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase__ , UpperCamelCase__ = self._get_tensors(10 )
self.assertTrue(criteria(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
UpperCamelCase__ , UpperCamelCase__ = self._get_tensors(5 )
self.assertFalse(criteria(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase__ , UpperCamelCase__ = self._get_tensors(9 )
self.assertFalse(criteria(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase__ , UpperCamelCase__ = self._get_tensors(10 )
self.assertTrue(criteria(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase__ = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 10 )
def UpperCAmelCase_ (self ):
UpperCamelCase__ , UpperCamelCase__ = self._get_tensors(5 )
UpperCamelCase__ = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase__ = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
def UpperCAmelCase_ (self ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 )
with self.assertWarns(SCREAMING_SNAKE_CASE_ ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 )
UpperCamelCase__ = validate_stopping_criteria(StoppingCriteriaList() , 11 )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 1 )
| 513 | 0 |
"""simple docstring"""
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ ) ->str:
return "".join([hex(SCREAMING_SNAKE_CASE_ )[2:].zfill(2 ).upper() for byte in list(SCREAMING_SNAKE_CASE_ )] )
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ ) ->bytes:
# Check data validity, following RFC3548
# https://www.ietf.org/rfc/rfc3548.txt
if (len(SCREAMING_SNAKE_CASE_ ) % 2) != 0:
raise ValueError(
'''Base16 encoded data is invalid:
Data does not have an even number of hex digits.''' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(SCREAMING_SNAKE_CASE_ ) <= set('''0123456789ABCDEF''' ):
raise ValueError(
'''Base16 encoded data is invalid:
Data is not uppercase hex or it contains invalid characters.''' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 558 | """simple docstring"""
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class _UpperCAmelCase ( a_ , a_ , a_ ):
"""simple docstring"""
@register_to_config
def __init__( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase = False , ) -> Tuple:
super().__init__()
_lowerCamelCase : Tuple = nn.Embedding(_lowercase , _lowercase )
_lowerCamelCase : Dict = nn.Embedding(_lowercase , _lowercase )
_lowerCamelCase : Tuple = False
_lowerCamelCase : Any = nn.Dropout(p=_lowercase )
_lowerCamelCase : List[Any] = TaConfig(
vocab_size=_lowercase , d_model=_lowercase , num_heads=_lowercase , d_kv=_lowercase , d_ff=_lowercase , dropout_rate=_lowercase , feed_forward_proj=_lowercase , is_decoder=_lowercase , is_encoder_decoder=_lowercase , )
_lowerCamelCase : List[Any] = nn.ModuleList()
for lyr_num in range(_lowercase ):
_lowerCamelCase : Tuple = TaBlock(_lowercase )
self.encoders.append(_lowercase )
_lowerCamelCase : str = TaLayerNorm(_lowercase )
_lowerCamelCase : List[Any] = nn.Dropout(p=_lowercase )
def a__ ( self , _lowercase , _lowercase ) -> Optional[Any]:
_lowerCamelCase : List[Any] = self.token_embedder(_lowercase )
_lowerCamelCase : Union[str, Any] = encoder_input_tokens.shape[1]
_lowerCamelCase : int = torch.arange(_lowercase , device=encoder_input_tokens.device )
x += self.position_encoding(_lowercase )
_lowerCamelCase : Tuple = self.dropout_pre(_lowercase )
# inverted the attention mask
_lowerCamelCase : int = encoder_input_tokens.size()
_lowerCamelCase : Union[str, Any] = self.get_extended_attention_mask(_lowercase , _lowercase )
for lyr in self.encoders:
_lowerCamelCase : List[Any] = lyr(_lowercase , _lowercase )[0]
_lowerCamelCase : str = self.layer_norm(_lowercase )
return self.dropout_post(_lowercase ), encoder_inputs_mask
| 558 | 1 |
'''simple docstring'''
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
@slow
@require_torch
def UpperCAmelCase_ ( self ) -> int:
lowerCAmelCase__ : Any = EncoderDecoderModel.from_encoder_decoder_pretrained("""prajjwal1/bert-tiny""" ,"""prajjwal1/bert-tiny""" )
lowerCAmelCase__ : Optional[Any] = BertTokenizer.from_pretrained("""bert-base-uncased""" )
lowerCAmelCase__ : List[str] = bertabert.config.encoder.vocab_size
lowerCAmelCase__ : List[str] = tokenizer.sep_token_id
lowerCAmelCase__ : Any = tokenizer.cls_token_id
lowerCAmelCase__ : Dict = 128
lowerCAmelCase__ : Union[str, Any] = datasets.load_dataset("""cnn_dailymail""" ,"""3.0.0""" ,split="""train[:1%]""" )
lowerCAmelCase__ : Union[str, Any] = datasets.load_dataset("""cnn_dailymail""" ,"""3.0.0""" ,split="""validation[:1%]""" )
lowerCAmelCase__ : str = train_dataset.select(range(32 ) )
lowerCAmelCase__ : int = val_dataset.select(range(16 ) )
lowerCAmelCase__ : Dict = 4
def _map_to_encoder_decoder_inputs(__UpperCAmelCase ):
# Tokenizer will automatically set [BOS] <text> [EOS]
lowerCAmelCase__ : str = tokenizer(batch["""article"""] ,padding="""max_length""" ,truncation=__UpperCAmelCase ,max_length=512 )
lowerCAmelCase__ : int = tokenizer(batch["""highlights"""] ,padding="""max_length""" ,truncation=__UpperCAmelCase ,max_length=128 )
lowerCAmelCase__ : List[str] = inputs.input_ids
lowerCAmelCase__ : List[str] = inputs.attention_mask
lowerCAmelCase__ : Tuple = outputs.input_ids
lowerCAmelCase__ : int = outputs.input_ids.copy()
lowerCAmelCase__ : int = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["""labels"""]
]
lowerCAmelCase__ : Any = outputs.attention_mask
assert all(len(__UpperCAmelCase ) == 512 for x in inputs.input_ids )
assert all(len(__UpperCAmelCase ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(__UpperCAmelCase ):
lowerCAmelCase__ : Tuple = pred.label_ids
lowerCAmelCase__ : Any = pred.predictions
# all unnecessary tokens are removed
lowerCAmelCase__ : Optional[int] = tokenizer.batch_decode(__UpperCAmelCase ,skip_special_tokens=__UpperCAmelCase )
lowerCAmelCase__ : List[str] = tokenizer.batch_decode(__UpperCAmelCase ,skip_special_tokens=__UpperCAmelCase )
lowerCAmelCase__ : List[str] = sum([int(pred_str[i] == label_str[i] ) for i in range(len(__UpperCAmelCase ) )] ) / len(__UpperCAmelCase )
return {"accuracy": accuracy}
# map train dataset
lowerCAmelCase__ : Optional[int] = train_dataset.map(
_map_to_encoder_decoder_inputs ,batched=__UpperCAmelCase ,batch_size=__UpperCAmelCase ,remove_columns=["""article""", """highlights"""] ,)
train_dataset.set_format(
type="""torch""" ,columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] ,)
# same for validation dataset
lowerCAmelCase__ : str = val_dataset.map(
_map_to_encoder_decoder_inputs ,batched=__UpperCAmelCase ,batch_size=__UpperCAmelCase ,remove_columns=["""article""", """highlights"""] ,)
val_dataset.set_format(
type="""torch""" ,columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] ,)
lowerCAmelCase__ : Any = self.get_auto_remove_tmp_dir()
lowerCAmelCase__ : int = SeqaSeqTrainingArguments(
output_dir=__UpperCAmelCase ,per_device_train_batch_size=__UpperCAmelCase ,per_device_eval_batch_size=__UpperCAmelCase ,predict_with_generate=__UpperCAmelCase ,evaluation_strategy="""steps""" ,do_train=__UpperCAmelCase ,do_eval=__UpperCAmelCase ,warmup_steps=0 ,eval_steps=2 ,logging_steps=2 ,)
# instantiate trainer
lowerCAmelCase__ : Any = SeqaSeqTrainer(
model=__UpperCAmelCase ,args=__UpperCAmelCase ,compute_metrics=_compute_metrics ,train_dataset=__UpperCAmelCase ,eval_dataset=__UpperCAmelCase ,tokenizer=__UpperCAmelCase ,)
# start training
trainer.train()
| 565 |
'''simple docstring'''
_lowerCAmelCase = '''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
_lowerCAmelCase = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
_lowerCAmelCase = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 565 | 1 |
'''simple docstring'''
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def __UpperCamelCase ( ) -> Tuple:
'''simple docstring'''
_a , _a = 9, 14 # noqa: F841
_a = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
_a = defaultdict(__lowerCamelCase )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
_a = mst(__lowerCamelCase )
_a = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
_a = tuple(answer[:2] )
_a = tuple(edge[::-1] )
assert edge in result or reverse in result
| 276 |
'''simple docstring'''
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
lowercase__ = False
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def a_ ( self ) -> Dict:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a_ ( self ) -> List[str]:
_a = VersatileDiffusionTextToImagePipeline.from_pretrained("shi-labs/versatile-diffusion" )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
_a = "A painting of a squirrel eating a burger "
_a = torch.manual_seed(0 )
_a = pipe(
prompt=__UpperCamelCase , generator=__UpperCamelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__UpperCamelCase )
_a = VersatileDiffusionTextToImagePipeline.from_pretrained(__UpperCamelCase )
pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
_a = generator.manual_seed(0 )
_a = pipe(
prompt=__UpperCamelCase , generator=__UpperCamelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def a_ ( self ) -> List[Any]:
_a = VersatileDiffusionTextToImagePipeline.from_pretrained(
"shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
_a = "A painting of a squirrel eating a burger "
_a = torch.manual_seed(0 )
_a = pipe(
prompt=__UpperCamelCase , generator=__UpperCamelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" ).images
_a = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
_a = np.array([0.33_67, 0.31_69, 0.26_56, 0.38_70, 0.47_90, 0.37_96, 0.40_09, 0.48_78, 0.47_78] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 276 | 1 |
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class a :
"""simple docstring"""
def __init__( self : Tuple , __lowercase : Any , __lowercase : Union[str, Any]=3 , __lowercase : str=32 , __lowercase : List[str]=3 , __lowercase : Union[str, Any]=10 , __lowercase : int=[10, 20, 30, 40] , __lowercase : Any=[1, 1, 2, 1] , __lowercase : Union[str, Any]=True , __lowercase : Optional[Any]=True , __lowercase : str="relu" , __lowercase : int=3 , __lowercase : Any=None , ) -> Optional[int]:
__UpperCAmelCase : Tuple = parent
__UpperCAmelCase : Optional[Any] = batch_size
__UpperCAmelCase : Union[str, Any] = image_size
__UpperCAmelCase : List[str] = num_channels
__UpperCAmelCase : str = embeddings_size
__UpperCAmelCase : Union[str, Any] = hidden_sizes
__UpperCAmelCase : str = depths
__UpperCAmelCase : Any = is_training
__UpperCAmelCase : Optional[Any] = use_labels
__UpperCAmelCase : str = hidden_act
__UpperCAmelCase : Optional[int] = num_labels
__UpperCAmelCase : Dict = scope
__UpperCAmelCase : Any = len(__lowercase )
def UpperCAmelCase ( self : List[str] ) -> List[Any]:
__UpperCAmelCase : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCAmelCase : Any = None
if self.use_labels:
__UpperCAmelCase : Dict = ids_tensor([self.batch_size] , self.num_labels )
__UpperCAmelCase : List[Any] = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase ( self : str ) -> Optional[int]:
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def UpperCAmelCase ( self : int , __lowercase : Dict , __lowercase : Union[str, Any] , __lowercase : Optional[Any] ) -> Optional[Any]:
__UpperCAmelCase : Optional[int] = TFResNetModel(config=__lowercase )
__UpperCAmelCase : Any = model(__lowercase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def UpperCAmelCase ( self : Any , __lowercase : Optional[int] , __lowercase : Optional[int] , __lowercase : Any ) -> List[Any]:
__UpperCAmelCase : Optional[Any] = self.num_labels
__UpperCAmelCase : List[Any] = TFResNetForImageClassification(__lowercase )
__UpperCAmelCase : Dict = model(__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase ( self : List[Any] ) -> str:
__UpperCAmelCase : Tuple = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Tuple = config_and_inputs
__UpperCAmelCase : int = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class a ( lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
a : Tuple = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
a : Union[str, Any] = (
{'feature-extraction': TFResNetModel, 'image-classification': TFResNetForImageClassification}
if is_tf_available()
else {}
)
a : List[str] = False
a : List[Any] = False
a : Tuple = False
a : List[Any] = False
a : Union[str, Any] = False
def UpperCAmelCase ( self : List[Any] ) -> Optional[int]:
__UpperCAmelCase : Union[str, Any] = TFResNetModelTester(self )
__UpperCAmelCase : Optional[Any] = ConfigTester(self , config_class=__lowercase , has_text_modality=__lowercase )
def UpperCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase ( self : List[Any] ) -> Tuple:
return
@unittest.skip(reason="""ResNet does not use inputs_embeds""" )
def UpperCAmelCase ( self : Tuple ) -> List[str]:
pass
@unittest.skip(reason="""ResNet does not support input and output embeddings""" )
def UpperCAmelCase ( self : List[str] ) -> Optional[int]:
pass
def UpperCAmelCase ( self : Union[str, Any] ) -> List[str]:
__UpperCAmelCase , __UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : str = model_class(__lowercase )
__UpperCAmelCase : Optional[Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase : Optional[Any] = [*signature.parameters.keys()]
__UpperCAmelCase : Optional[int] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __lowercase )
def UpperCAmelCase ( self : Tuple ) -> str:
__UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase )
def UpperCAmelCase ( self : Any ) -> List[Any]:
def check_hidden_states_output(__lowercase : List[Any] , __lowercase : List[Any] , __lowercase : Tuple ):
__UpperCAmelCase : Optional[int] = model_class(__lowercase )
__UpperCAmelCase : str = model(**self._prepare_for_class(__lowercase , __lowercase ) )
__UpperCAmelCase : int = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__UpperCAmelCase : Dict = self.model_tester.num_stages
self.assertEqual(len(__lowercase ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : List[str] = ["""basic""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
__UpperCAmelCase : Any = layer_type
__UpperCAmelCase : Optional[Any] = True
check_hidden_states_output(__lowercase , __lowercase , __lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCAmelCase : Tuple = True
check_hidden_states_output(__lowercase , __lowercase , __lowercase )
def UpperCAmelCase ( self : Optional[Any] ) -> Tuple:
__UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowercase )
@slow
def UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]:
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : Optional[int] = TFResNetModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
def lowerCamelCase__ ( ):
__UpperCAmelCase : str = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class a ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCAmelCase ( self : Any ) -> int:
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
__UpperCAmelCase : Any = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
__UpperCAmelCase : int = self.default_image_processor
__UpperCAmelCase : Optional[Any] = prepare_img()
__UpperCAmelCase : List[str] = image_processor(images=__lowercase , return_tensors="""tf""" )
# forward pass
__UpperCAmelCase : Tuple = model(**__lowercase )
# verify the logits
__UpperCAmelCase : Optional[int] = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , __lowercase )
__UpperCAmelCase : Union[str, Any] = tf.constant([-11.1_069, -9.7_877, -8.3_777] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , __lowercase , atol=1e-4 ) )
| 63 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__)
UpperCAmelCase_ : List[str] = {
"google/vivit-b-16x2-kinetics400": (
"https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json"
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class a ( snake_case__ ):
'''simple docstring'''
__lowerCAmelCase : str = """vivit"""
def __init__( self , lowerCamelCase_=2_2_4 , lowerCamelCase_=3_2 , lowerCamelCase_=[2, 1_6, 1_6] , lowerCamelCase_=3 , lowerCamelCase_=7_6_8 , lowerCamelCase_=1_2 , lowerCamelCase_=1_2 , lowerCamelCase_=3_0_7_2 , lowerCamelCase_="gelu_fast" , lowerCamelCase_=0.0 , lowerCamelCase_=0.0 , lowerCamelCase_=0.02 , lowerCamelCase_=1e-06 , lowerCamelCase_=True , **lowerCamelCase_ , ) -> int:
_a : Tuple = hidden_size
_a : Dict = num_hidden_layers
_a : List[str] = num_attention_heads
_a : int = intermediate_size
_a : Optional[int] = hidden_act
_a : Dict = hidden_dropout_prob
_a : List[str] = attention_probs_dropout_prob
_a : List[str] = initializer_range
_a : List[str] = layer_norm_eps
_a : Any = image_size
_a : Optional[Any] = num_frames
_a : Dict = tubelet_size
_a : Union[str, Any] = num_channels
_a : Optional[int] = qkv_bias
super().__init__(**lowerCamelCase_ )
| 120 | 0 |
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
def _snake_case ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
_lowerCAmelCase : List[Any] = nn.functional.normalize(__lowerCAmelCase )
_lowerCAmelCase : List[str] = nn.functional.normalize(__lowerCAmelCase )
return torch.mm(__lowerCAmelCase , normalized_text_embeds.t() )
class UpperCAmelCase_ ( UpperCAmelCase__ ):
"""simple docstring"""
_lowercase : Optional[Any] = CLIPConfig
_lowercase : Dict = ["CLIPEncoderLayer"]
def __init__( self : str , A_ : CLIPConfig ):
'''simple docstring'''
super().__init__(lowerCamelCase__ )
_lowerCAmelCase : Optional[Any] = CLIPVisionModel(config.vision_config )
_lowerCAmelCase : List[str] = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=lowerCamelCase__ )
_lowerCAmelCase : Any = nn.Parameter(torch.ones(1_7 , config.projection_dim ) , requires_grad=lowerCamelCase__ )
_lowerCAmelCase : Dict = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=lowerCamelCase__ )
_lowerCAmelCase : str = nn.Parameter(torch.ones(1_7 ) , requires_grad=lowerCamelCase__ )
_lowerCAmelCase : List[Any] = nn.Parameter(torch.ones(3 ) , requires_grad=lowerCamelCase__ )
@torch.no_grad()
def __magic_name__ ( self : Union[str, Any] , A_ : List[str] , A_ : Optional[Any] ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.vision_model(lowerCamelCase__ )[1] # pooled_output
_lowerCAmelCase : Union[str, Any] = self.visual_projection(lowerCamelCase__ )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
_lowerCAmelCase : List[Any] = cosine_distance(lowerCamelCase__ , self.special_care_embeds ).cpu().float().numpy()
_lowerCAmelCase : str = cosine_distance(lowerCamelCase__ , self.concept_embeds ).cpu().float().numpy()
_lowerCAmelCase : str = []
_lowerCAmelCase : Any = image_embeds.shape[0]
for i in range(lowerCamelCase__ ):
_lowerCAmelCase : List[Any] = {"special_scores": {}, "special_care": [], "concept_scores": {}, "bad_concepts": []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
_lowerCAmelCase : List[Any] = 0.0
for concept_idx in range(len(special_cos_dist[0] ) ):
_lowerCAmelCase : Dict = special_cos_dist[i][concept_idx]
_lowerCAmelCase : str = self.special_care_embeds_weights[concept_idx].item()
_lowerCAmelCase : List[str] = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img["special_scores"][concept_idx]} )
_lowerCAmelCase : Tuple = 0.01
for concept_idx in range(len(cos_dist[0] ) ):
_lowerCAmelCase : Union[str, Any] = cos_dist[i][concept_idx]
_lowerCAmelCase : int = self.concept_embeds_weights[concept_idx].item()
_lowerCAmelCase : List[Any] = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(lowerCamelCase__ )
result.append(lowerCamelCase__ )
_lowerCAmelCase : Union[str, Any] = [len(res["bad_concepts"] ) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def __magic_name__ ( self : Optional[int] , A_ : torch.FloatTensor , A_ : torch.FloatTensor ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.vision_model(lowerCamelCase__ )[1] # pooled_output
_lowerCAmelCase : Optional[Any] = self.visual_projection(lowerCamelCase__ )
_lowerCAmelCase : Any = cosine_distance(lowerCamelCase__ , self.special_care_embeds )
_lowerCAmelCase : Optional[int] = cosine_distance(lowerCamelCase__ , self.concept_embeds )
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
_lowerCAmelCase : Union[str, Any] = 0.0
_lowerCAmelCase : List[str] = special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
_lowerCAmelCase : int = torch.any(special_scores > 0 , dim=1 )
_lowerCAmelCase : Dict = special_care * 0.01
_lowerCAmelCase : Union[str, Any] = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] )
_lowerCAmelCase : str = (cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
_lowerCAmelCase : List[str] = torch.any(concept_scores > 0 , dim=1 )
return images, has_nsfw_concepts
| 718 |
def _snake_case ( SCREAMING_SNAKE_CASE ) -> "list[int]":
"""simple docstring"""
if upper_limit < 0:
raise ValueError("Limit for the Catalan sequence must be โฅ 0" )
_lowerCAmelCase : Optional[Any] = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
_lowerCAmelCase : Optional[int] = 1
if upper_limit > 0:
_lowerCAmelCase : Any = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(SCREAMING_SNAKE_CASE ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print('\n********* Catalan Numbers Using Dynamic Programming ************\n')
print('\n*** Enter -1 at any time to quit ***')
print('\nEnter the upper limit (โฅ 0) for the Catalan number sequence: ', end='')
try:
while True:
__UpperCAmelCase = int(input().strip())
if N < 0:
print('\n********* Goodbye!! ************')
break
else:
print(f'''The Catalan numbers from 0 through {N} are:''')
print(catalan_numbers(N))
print('Try another upper limit for the sequence: ', end='')
except (NameError, ValueError):
print('\n********* Invalid input, goodbye! ************\n')
import doctest
doctest.testmod()
| 503 | 0 |
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
lowerCamelCase_ = {
"gwf-440k": {
"url": "https://model-server.zqevans2.workers.dev/gwf-440k.ckpt",
"sample_rate": 48000,
"sample_size": 65536,
},
"jmann-small-190k": {
"url": "https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt",
"sample_rate": 48000,
"sample_size": 65536,
},
"jmann-large-580k": {
"url": "https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt",
"sample_rate": 48000,
"sample_size": 131072,
},
"maestro-uncond-150k": {
"url": "https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt",
"sample_rate": 16000,
"sample_size": 65536,
},
"unlocked-uncond-250k": {
"url": "https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt",
"sample_rate": 16000,
"sample_size": 65536,
},
"honk-140k": {
"url": "https://model-server.zqevans2.workers.dev/honk-140k.ckpt",
"sample_rate": 16000,
"sample_size": 65536,
},
}
def UpperCAmelCase_ ( __UpperCamelCase, __UpperCamelCase ):
return torch.atana(_lowerCAmelCase, _lowerCAmelCase ) / math.pi * 2
def UpperCAmelCase_ ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE__ =torch.sin(t * math.pi / 2 ) ** 2
SCREAMING_SNAKE_CASE__ =(1 - sigma**2) ** 0.5
return alpha_sigma_to_t(_lowerCAmelCase, _lowerCAmelCase )
class __a ( lowerCamelCase__ ):
"""simple docstring"""
pass
class __a ( nn.Module ):
"""simple docstring"""
def __init__( self : Any ,_UpperCamelCase : Dict ) -> Dict:
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE__ =DiffusionAttnUnetaD(__UpperCamelCase ,n_attn_layers=4 )
SCREAMING_SNAKE_CASE__ =deepcopy(self.diffusion )
SCREAMING_SNAKE_CASE__ =torch.quasirandom.SobolEngine(1 ,scramble=__UpperCamelCase )
def UpperCAmelCase_ ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE__ =MODELS_MAP[model_name]["url"]
os.system(f"""wget {url} ./""" )
return f"""./{model_name}.ckpt"""
lowerCamelCase_ = {
"1": "resnets.0",
"2": "attentions.0",
"3": "resnets.1",
"4": "attentions.1",
"5": "resnets.2",
"6": "attentions.2",
}
lowerCamelCase_ = {
"8": "resnets.0",
"9": "attentions.0",
"10": "resnets.1",
"11": "attentions.1",
"12": "resnets.2",
"13": "attentions.2",
}
lowerCamelCase_ = {
"1": "resnets.0",
"2": "attentions.0",
"3": "resnets.1",
"4": "attentions.1",
"5": "resnets.2",
"6": "attentions.2",
"8": "resnets.3",
"9": "attentions.3",
"10": "resnets.4",
"11": "attentions.4",
"12": "resnets.5",
"13": "attentions.5",
}
lowerCamelCase_ = {
"0": "resnets.0",
"1": "resnets.1",
"2": "resnets.2",
"4": "resnets.0",
"5": "resnets.1",
"6": "resnets.2",
}
lowerCamelCase_ = {
"skip": "conv_skip",
"main.0": "conv_1",
"main.1": "group_norm_1",
"main.3": "conv_2",
"main.4": "group_norm_2",
}
lowerCamelCase_ = {
"norm": "group_norm",
"qkv_proj": ["query", "key", "value"],
"out_proj": ["proj_attn"],
}
def UpperCAmelCase_ ( __UpperCamelCase ):
if name.startswith("""skip""" ):
return name.replace("""skip""", RES_CONV_MAP["""skip"""] )
# name has to be of format main.{digit}
if not name.startswith("""main.""" ):
raise ValueError(f"""ResConvBlock error with {name}""" )
return name.replace(name[:6], RES_CONV_MAP[name[:6]] )
def UpperCAmelCase_ ( __UpperCamelCase ):
for key, value in ATTN_MAP.items():
if name.startswith(_lowerCAmelCase ) and not isinstance(_lowerCAmelCase, _lowerCAmelCase ):
return name.replace(_lowerCAmelCase, _lowerCAmelCase )
elif name.startswith(_lowerCAmelCase ):
return [name.replace(_lowerCAmelCase, _lowerCAmelCase ) for v in value]
raise ValueError(f"""Attn error with {name}""" )
def UpperCAmelCase_ ( __UpperCamelCase, __UpperCamelCase=13 ):
SCREAMING_SNAKE_CASE__ =input_string
if string.split(""".""" )[0] == "timestep_embed":
return string.replace("""timestep_embed""", """time_proj""" )
SCREAMING_SNAKE_CASE__ =0
if string.startswith("""net.3.""" ):
depth += 1
SCREAMING_SNAKE_CASE__ =string[6:]
elif string.startswith("""net.""" ):
SCREAMING_SNAKE_CASE__ =string[4:]
while string.startswith("""main.7.""" ):
depth += 1
SCREAMING_SNAKE_CASE__ =string[7:]
if string.startswith("""main.""" ):
SCREAMING_SNAKE_CASE__ =string[5:]
# mid block
if string[:2].isdigit():
SCREAMING_SNAKE_CASE__ =string[:2]
SCREAMING_SNAKE_CASE__ =string[2:]
else:
SCREAMING_SNAKE_CASE__ =string[0]
SCREAMING_SNAKE_CASE__ =string[1:]
if depth == max_depth:
SCREAMING_SNAKE_CASE__ =MID_NUM_TO_LAYER[layer_num]
SCREAMING_SNAKE_CASE__ ="mid_block"
elif depth > 0 and int(_lowerCAmelCase ) < 7:
SCREAMING_SNAKE_CASE__ =DOWN_NUM_TO_LAYER[layer_num]
SCREAMING_SNAKE_CASE__ =f"""down_blocks.{depth}"""
elif depth > 0 and int(_lowerCAmelCase ) > 7:
SCREAMING_SNAKE_CASE__ =UP_NUM_TO_LAYER[layer_num]
SCREAMING_SNAKE_CASE__ =f"""up_blocks.{max_depth - depth - 1}"""
elif depth == 0:
SCREAMING_SNAKE_CASE__ =DEPTH_0_TO_LAYER[layer_num]
SCREAMING_SNAKE_CASE__ =f"""up_blocks.{max_depth - 1}""" if int(_lowerCAmelCase ) > 3 else "down_blocks.0"
if not string_left.startswith(""".""" ):
raise ValueError(f"""Naming error with {input_string} and string_left: {string_left}.""" )
SCREAMING_SNAKE_CASE__ =string_left[1:]
if "resnets" in new_layer:
SCREAMING_SNAKE_CASE__ =convert_resconv_naming(_lowerCAmelCase )
elif "attentions" in new_layer:
SCREAMING_SNAKE_CASE__ =convert_attn_naming(_lowerCAmelCase )
SCREAMING_SNAKE_CASE__ =new_string_left
if not isinstance(_lowerCAmelCase, _lowerCAmelCase ):
SCREAMING_SNAKE_CASE__ =prefix + "." + new_layer + "." + string_left
else:
SCREAMING_SNAKE_CASE__ =[prefix + "." + new_layer + "." + s for s in string_left]
return new_string
def UpperCAmelCase_ ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE__ ={}
for k, v in state_dict.items():
if k.endswith("""kernel""" ):
# up- and downsample layers, don't have trainable weights
continue
SCREAMING_SNAKE_CASE__ =rename(_lowerCAmelCase )
# check if we need to transform from Conv => Linear for attention
if isinstance(_lowerCAmelCase, _lowerCAmelCase ):
SCREAMING_SNAKE_CASE__ =transform_conv_attns(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE__ =v
return new_state_dict
def UpperCAmelCase_ ( __UpperCamelCase, __UpperCamelCase, __UpperCamelCase ):
if len(_lowerCAmelCase ) == 1:
if len(v.shape ) == 3:
# weight
SCREAMING_SNAKE_CASE__ =v[:, :, 0]
else:
# bias
SCREAMING_SNAKE_CASE__ =v
else:
# qkv matrices
SCREAMING_SNAKE_CASE__ =v.shape[0]
SCREAMING_SNAKE_CASE__ =trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
SCREAMING_SNAKE_CASE__ =v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
SCREAMING_SNAKE_CASE__ =v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def UpperCAmelCase_ ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE__ =torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
SCREAMING_SNAKE_CASE__ =args.model_path.split("""/""" )[-1].split(""".""" )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), f"""Make sure to provide one of the official model names {MODELS_MAP.keys()}"""
SCREAMING_SNAKE_CASE__ =download(_lowerCAmelCase )
SCREAMING_SNAKE_CASE__ =MODELS_MAP[model_name]["sample_rate"]
SCREAMING_SNAKE_CASE__ =MODELS_MAP[model_name]["sample_size"]
SCREAMING_SNAKE_CASE__ =Object()
SCREAMING_SNAKE_CASE__ =sample_size
SCREAMING_SNAKE_CASE__ =sample_rate
SCREAMING_SNAKE_CASE__ =0
SCREAMING_SNAKE_CASE__ =UNetaDModel(sample_size=_lowerCAmelCase, sample_rate=_lowerCAmelCase )
SCREAMING_SNAKE_CASE__ =diffusers_model.state_dict()
SCREAMING_SNAKE_CASE__ =DiffusionUncond(_lowerCAmelCase )
orig_model.load_state_dict(torch.load(args.model_path, map_location=_lowerCAmelCase )["""state_dict"""] )
SCREAMING_SNAKE_CASE__ =orig_model.diffusion_ema.eval()
SCREAMING_SNAKE_CASE__ =orig_model.state_dict()
SCREAMING_SNAKE_CASE__ =rename_orig_weights(_lowerCAmelCase )
SCREAMING_SNAKE_CASE__ =set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
SCREAMING_SNAKE_CASE__ =set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(_lowerCAmelCase ) == 0, f"""Problem with {renamed_minus_diffusers}"""
assert all(k.endswith("""kernel""" ) for k in list(_lowerCAmelCase ) ), f"""Problem with {diffusers_minus_renamed}"""
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), f"""Shape for {key} doesn\'t match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}"""
if key == "time_proj.weight":
SCREAMING_SNAKE_CASE__ =value.squeeze()
SCREAMING_SNAKE_CASE__ =value
diffusers_model.load_state_dict(_lowerCAmelCase )
SCREAMING_SNAKE_CASE__ =100
SCREAMING_SNAKE_CASE__ =33
SCREAMING_SNAKE_CASE__ =IPNDMScheduler(num_train_timesteps=_lowerCAmelCase )
SCREAMING_SNAKE_CASE__ =torch.manual_seed(_lowerCAmelCase )
SCREAMING_SNAKE_CASE__ =torch.randn([1, 2, config.sample_size], generator=_lowerCAmelCase ).to(_lowerCAmelCase )
SCREAMING_SNAKE_CASE__ =torch.linspace(1, 0, steps + 1, device=_lowerCAmelCase )[:-1]
SCREAMING_SNAKE_CASE__ =get_crash_schedule(_lowerCAmelCase )
SCREAMING_SNAKE_CASE__ =DanceDiffusionPipeline(unet=_lowerCAmelCase, scheduler=_lowerCAmelCase )
SCREAMING_SNAKE_CASE__ =torch.manual_seed(33 )
SCREAMING_SNAKE_CASE__ =pipe(num_inference_steps=_lowerCAmelCase, generator=_lowerCAmelCase ).audios
SCREAMING_SNAKE_CASE__ =sampling.iplms_sample(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, {} )
SCREAMING_SNAKE_CASE__ =generated.clamp(-1, 1 )
SCREAMING_SNAKE_CASE__ =(generated - audio).abs().sum()
SCREAMING_SNAKE_CASE__ =(generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print("""Diff sum""", _lowerCAmelCase )
print("""Diff max""", _lowerCAmelCase )
assert diff_max < 1E-3, f"""Diff max: {diff_max} is too much :-/"""
print(f"""Conversion for {model_name} successful!""" )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
parser.add_argument("--model_path", default=None, type=str, required=True, help="Path to the model to convert.")
parser.add_argument(
"--save", default=True, type=bool, required=False, help="Whether to save the converted model or not."
)
parser.add_argument("--checkpoint_path", default=None, type=str, required=True, help="Path to the output model.")
lowerCamelCase_ = parser.parse_args()
main(args)
| 151 |
def UpperCAmelCase_ (_lowerCAmelCase : int = 60_08_51_47_51_43 ):
try:
__UpperCamelCase : Optional[Any] = int(_lowerCAmelCase )
except (TypeError, ValueError):
raise TypeError("Parameter n must be int or castable to int." )
if n <= 0:
raise ValueError("Parameter n must be greater than or equal to one." )
__UpperCamelCase : List[Any] = 2
__UpperCamelCase : int = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
__UpperCamelCase : int = i
while n % i == 0:
__UpperCamelCase : Optional[Any] = n // i
i += 1
return int(_lowerCAmelCase )
if __name__ == "__main__":
print(F"""{solution() = }""") | 327 | 0 |
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
_UpperCAmelCase : Union[str, Any] ="""2.13.1"""
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("""3.7"""):
raise ImportWarning(
"""To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."""
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
"""To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"""
"""If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."""
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
_UpperCAmelCase : List[Any] =concatenate_datasets
_UpperCAmelCase : List[str] =DownloadConfig
_UpperCAmelCase : Union[str, Any] =DownloadManager
_UpperCAmelCase : str =DownloadMode
_UpperCAmelCase : Union[str, Any] =DownloadConfig
_UpperCAmelCase : List[str] =DownloadMode
_UpperCAmelCase : Dict =DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager | 703 |
import math
import qiskit
def lowerCAmelCase ( lowerCAmelCase_ = 1 , lowerCAmelCase_ = 1 , lowerCAmelCase_ = 1 )-> qiskit.result.counts.Counts:
if (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
or isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
or isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
):
raise TypeError('''inputs must be integers.''' )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError('''inputs must be positive.''' )
if (
(math.floor(lowerCAmelCase_ ) != input_a)
or (math.floor(lowerCAmelCase_ ) != input_a)
or (math.floor(lowerCAmelCase_ ) != carry_in)
):
raise ValueError('''inputs must be exact integers.''' )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError('''inputs must be less or equal to 2.''' )
# build registers
lowerCAmelCase_ : str = qiskit.QuantumRegister(4 , '''qr''' )
lowerCAmelCase_ : str = qiskit.ClassicalRegister(2 , '''cr''' )
# list the entries
lowerCAmelCase_ : Any = [input_a, input_a, carry_in]
lowerCAmelCase_ : int = qiskit.QuantumCircuit(lowerCAmelCase_ , lowerCAmelCase_ )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(lowerCAmelCase_ ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(lowerCAmelCase_ ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(lowerCAmelCase_ ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , lowerCAmelCase_ ) # measure the last two qbits
lowerCAmelCase_ : Tuple = qiskit.Aer.get_backend('''aer_simulator''' )
lowerCAmelCase_ : Union[str, Any] = qiskit.execute(lowerCAmelCase_ , lowerCAmelCase_ , shots=1_000 )
return job.result().get_counts(lowerCAmelCase_ )
if __name__ == "__main__":
print(f"""Total sum count for state is: {quantum_full_adder(1, 1, 1)}""") | 619 | 0 |
import os
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_doctest_list.py
lowerCamelCase : Optional[int] = "."
if __name__ == "__main__":
lowerCamelCase : List[Any] = os.path.join(REPO_PATH, "utils/documentation_tests.txt")
lowerCamelCase : List[str] = []
lowerCamelCase : int = []
with open(doctest_file_path) as fp:
for line in fp:
lowerCamelCase : int = line.strip()
lowerCamelCase : Dict = os.path.join(REPO_PATH, line)
if not (os.path.isfile(path) or os.path.isdir(path)):
non_existent_paths.append(line)
all_paths.append(path)
if len(non_existent_paths) > 0:
lowerCamelCase : Tuple = "\n".join(non_existent_paths)
raise ValueError(F"""`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}""")
if all_paths != sorted(all_paths):
raise ValueError("Files in `utils/documentation_tests.txt` are not in alphabetical order.")
| 70 |
"""simple docstring"""
import random
def snake_case__ ( _lowerCamelCase, _lowerCamelCase, _lowerCamelCase = False ) ->dict:
"""simple docstring"""
__lowercase : dict = {i: [] for i in range(_lowerCamelCase )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(_lowerCamelCase )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(_lowerCamelCase ):
for j in range(i + 1, _lowerCamelCase ):
if random.random() < probability:
graph[i].append(_lowerCamelCase )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(_lowerCamelCase )
return graph
def snake_case__ ( _lowerCamelCase ) ->dict:
"""simple docstring"""
return {
i: [j for j in range(_lowerCamelCase ) if i != j] for i in range(_lowerCamelCase )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 575 | 0 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowercase = {"""configuration_mmbt""": ["""MMBTConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = ["""MMBTForClassification""", """MMBTModel""", """ModalEmbeddings"""]
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
__lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 563 |
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class _lowercase :
def __init__( self : Dict , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : str=1_3 , lowerCamelCase__ : Any=7 , lowerCamelCase__ : Optional[Any]=True , lowerCamelCase__ : Optional[Any]=True , lowerCamelCase__ : List[Any]=True , lowerCamelCase__ : Optional[Any]=True , lowerCamelCase__ : List[str]=9_9 , lowerCamelCase__ : Optional[Any]=[1, 1, 2] , lowerCamelCase__ : Optional[Any]=1 , lowerCamelCase__ : Union[str, Any]=3_2 , lowerCamelCase__ : int=4 , lowerCamelCase__ : Optional[int]=8 , lowerCamelCase__ : Union[str, Any]=3_7 , lowerCamelCase__ : List[Any]="gelu_new" , lowerCamelCase__ : List[Any]=0.1 , lowerCamelCase__ : Union[str, Any]=0.1 , lowerCamelCase__ : Optional[Any]=0.0 , lowerCamelCase__ : List[Any]=5_1_2 , lowerCamelCase__ : Optional[int]=3 , lowerCamelCase__ : List[Any]=0.02 , lowerCamelCase__ : str=3 , lowerCamelCase__ : Union[str, Any]=4 , lowerCamelCase__ : Union[str, Any]=None , lowerCamelCase__ : List[Any]=False , ) -> Union[str, Any]:
"""simple docstring"""
A_ = parent
A_ = batch_size
A_ = seq_length
A_ = is_training
A_ = use_input_mask
A_ = use_token_type_ids
A_ = use_labels
A_ = vocab_size
A_ = block_sizes
A_ = num_decoder_layers
A_ = d_model
A_ = n_head
A_ = d_head
A_ = d_inner
A_ = hidden_act
A_ = hidden_dropout
A_ = attention_dropout
A_ = activation_dropout
A_ = max_position_embeddings
A_ = type_vocab_size
A_ = 2
A_ = num_labels
A_ = num_choices
A_ = scope
A_ = initializer_std
# Used in the tests to check the size of the first attention layer
A_ = n_head
# Used in the tests to check the size of the first hidden state
A_ = self.d_model
# Used in the tests to check the number of output hidden states/attentions
A_ = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
A_ = self.num_hidden_layers + 2
def UpperCamelCase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
A_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A_ = None
if self.use_input_mask:
A_ = random_attention_mask([self.batch_size, self.seq_length] )
A_ = None
if self.use_token_type_ids:
A_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A_ = None
A_ = None
A_ = None
if self.use_labels:
A_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A_ = ids_tensor([self.batch_size] , self.num_choices )
A_ = FunnelConfig(
vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def UpperCamelCase ( self : Optional[Any] , lowerCamelCase__ : List[str] , lowerCamelCase__ : Tuple , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : str , lowerCamelCase__ : str , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : int , ) -> List[Any]:
"""simple docstring"""
A_ = TFFunnelModel(config=lowerCamelCase__ )
A_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
A_ = model(lowerCamelCase__ )
A_ = [input_ids, input_mask]
A_ = model(lowerCamelCase__ )
A_ = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
A_ = False
A_ = TFFunnelModel(config=lowerCamelCase__ )
A_ = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
A_ = False
A_ = TFFunnelModel(config=lowerCamelCase__ )
A_ = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
def UpperCamelCase ( self : List[Any] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : str , lowerCamelCase__ : Tuple , lowerCamelCase__ : Any , lowerCamelCase__ : Tuple , lowerCamelCase__ : Tuple , lowerCamelCase__ : Tuple , ) -> Tuple:
"""simple docstring"""
A_ = TFFunnelBaseModel(config=lowerCamelCase__ )
A_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
A_ = model(lowerCamelCase__ )
A_ = [input_ids, input_mask]
A_ = model(lowerCamelCase__ )
A_ = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
A_ = False
A_ = TFFunnelBaseModel(config=lowerCamelCase__ )
A_ = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model) )
A_ = False
A_ = TFFunnelBaseModel(config=lowerCamelCase__ )
A_ = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
def UpperCamelCase ( self : Optional[Any] , lowerCamelCase__ : int , lowerCamelCase__ : Dict , lowerCamelCase__ : int , lowerCamelCase__ : Dict , lowerCamelCase__ : List[str] , lowerCamelCase__ : Tuple , lowerCamelCase__ : Optional[int] , ) -> List[str]:
"""simple docstring"""
A_ = TFFunnelForPreTraining(config=lowerCamelCase__ )
A_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
A_ = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase ( self : List[Any] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : List[str] , lowerCamelCase__ : int , lowerCamelCase__ : int , lowerCamelCase__ : List[str] , lowerCamelCase__ : Dict , ) -> List[str]:
"""simple docstring"""
A_ = TFFunnelForMaskedLM(config=lowerCamelCase__ )
A_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
A_ = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase ( self : str , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Dict , lowerCamelCase__ : Dict , lowerCamelCase__ : int , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : str , lowerCamelCase__ : Tuple , ) -> Union[str, Any]:
"""simple docstring"""
A_ = self.num_labels
A_ = TFFunnelForSequenceClassification(config=lowerCamelCase__ )
A_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
A_ = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase ( self : List[str] , lowerCamelCase__ : List[str] , lowerCamelCase__ : Tuple , lowerCamelCase__ : int , lowerCamelCase__ : Dict , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : List[Any] , ) -> Optional[Any]:
"""simple docstring"""
A_ = self.num_choices
A_ = TFFunnelForMultipleChoice(config=lowerCamelCase__ )
A_ = tf.tile(tf.expand_dims(lowerCamelCase__ , 1 ) , (1, self.num_choices, 1) )
A_ = tf.tile(tf.expand_dims(lowerCamelCase__ , 1 ) , (1, self.num_choices, 1) )
A_ = tf.tile(tf.expand_dims(lowerCamelCase__ , 1 ) , (1, self.num_choices, 1) )
A_ = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
A_ = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase ( self : Optional[Any] , lowerCamelCase__ : Dict , lowerCamelCase__ : Dict , lowerCamelCase__ : str , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Dict , ) -> Union[str, Any]:
"""simple docstring"""
A_ = self.num_labels
A_ = TFFunnelForTokenClassification(config=lowerCamelCase__ )
A_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
A_ = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase ( self : Any , lowerCamelCase__ : Any , lowerCamelCase__ : List[str] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Dict , lowerCamelCase__ : str , ) -> str:
"""simple docstring"""
A_ = TFFunnelForQuestionAnswering(config=lowerCamelCase__ )
A_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
A_ = model(lowerCamelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase ( self : Dict ) -> str:
"""simple docstring"""
A_ = self.prepare_config_and_inputs()
(
(
A_
) ,(
A_
) ,(
A_
) ,(
A_
) ,(
A_
) ,(
A_
) ,(
A_
) ,
) = config_and_inputs
A_ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class _lowercase ( __lowerCamelCase,__lowerCamelCase,unittest.TestCase ):
_lowercase : Optional[Any] = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
_lowercase : List[Any] = (
{
'feature-extraction': (TFFunnelBaseModel, TFFunnelModel),
'fill-mask': TFFunnelForMaskedLM,
'question-answering': TFFunnelForQuestionAnswering,
'text-classification': TFFunnelForSequenceClassification,
'token-classification': TFFunnelForTokenClassification,
'zero-shot': TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
_lowercase : Dict = False
_lowercase : Optional[int] = False
def UpperCamelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
A_ = TFFunnelModelTester(self )
A_ = ConfigTester(self , config_class=lowerCamelCase__ )
def UpperCamelCase ( self : int ) -> Dict:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase ( self : List[Any] ) -> int:
"""simple docstring"""
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def UpperCamelCase ( self : Any ) -> List[str]:
"""simple docstring"""
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCamelCase__ )
def UpperCamelCase ( self : Dict ) -> Dict:
"""simple docstring"""
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCamelCase__ )
def UpperCamelCase ( self : Any ) -> Any:
"""simple docstring"""
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCamelCase__ )
def UpperCamelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCamelCase__ )
@require_tf
class _lowercase ( __lowerCamelCase,unittest.TestCase ):
_lowercase : Any = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
_lowercase : str = False
_lowercase : Any = False
def UpperCamelCase ( self : int ) -> Optional[int]:
"""simple docstring"""
A_ = TFFunnelModelTester(self , base=lowerCamelCase__ )
A_ = ConfigTester(self , config_class=lowerCamelCase__ )
def UpperCamelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*lowerCamelCase__ )
def UpperCamelCase ( self : str ) -> Dict:
"""simple docstring"""
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCamelCase__ )
def UpperCamelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowerCamelCase__ )
| 563 | 1 |
"""simple docstring"""
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __lowercase :
"""simple docstring"""
def __init__(self , lowercase__ , lowercase__=13 , lowercase__=30 , lowercase__=2 , lowercase__=3 , lowercase__=True , lowercase__=True , lowercase__=32 , lowercase__=5 , lowercase__=4 , lowercase__=37 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=10 , lowercase__=0.02 , lowercase__=None , ):
snake_case_ : Dict = parent
snake_case_ : Dict = batch_size
snake_case_ : Any = image_size
snake_case_ : Tuple = patch_size
snake_case_ : Tuple = num_channels
snake_case_ : Optional[Any] = is_training
snake_case_ : str = use_labels
snake_case_ : Optional[Any] = hidden_size
snake_case_ : Optional[Any] = num_hidden_layers
snake_case_ : int = num_attention_heads
snake_case_ : List[Any] = intermediate_size
snake_case_ : int = hidden_act
snake_case_ : Any = hidden_dropout_prob
snake_case_ : Any = attention_probs_dropout_prob
snake_case_ : Union[str, Any] = type_sequence_label_size
snake_case_ : Union[str, Any] = initializer_range
snake_case_ : Dict = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
snake_case_ : Union[str, Any] = (image_size // patch_size) ** 2
snake_case_ : List[str] = num_patches + 1
def __UpperCamelCase (self ):
snake_case_ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ : Any = None
if self.use_labels:
snake_case_ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ : Optional[Any] = self.get_config()
return config, pixel_values, labels
def __UpperCamelCase (self ):
return ViTMSNConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ ):
snake_case_ : Dict = ViTMSNModel(config=__a )
model.to(__a )
model.eval()
snake_case_ : int = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ ):
snake_case_ : Union[str, Any] = self.type_sequence_label_size
snake_case_ : Tuple = ViTMSNForImageClassification(__a )
model.to(__a )
model.eval()
snake_case_ : str = model(__a , labels=__a )
print("""Pixel and labels shape: {pixel_values.shape}, {labels.shape}""" )
print("""Labels: {labels}""" )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
snake_case_ : List[Any] = 1
snake_case_ : Optional[int] = ViTMSNForImageClassification(__a )
model.to(__a )
model.eval()
snake_case_ : Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case_ : Any = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __UpperCamelCase (self ):
snake_case_ : Any = self.prepare_config_and_inputs()
snake_case_ : Dict = config_and_inputs
snake_case_ : List[Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __lowercase ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase):
"""simple docstring"""
_A : Optional[Any] = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
_A : int = (
{"""feature-extraction""": ViTMSNModel, """image-classification""": ViTMSNForImageClassification}
if is_torch_available()
else {}
)
_A : List[Any] = False
_A : List[Any] = False
_A : Optional[int] = False
_A : List[Any] = False
def __UpperCamelCase (self ):
snake_case_ : Any = ViTMSNModelTester(self )
snake_case_ : Union[str, Any] = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37 )
def __UpperCamelCase (self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMSN does not use inputs_embeds""" )
def __UpperCamelCase (self ):
pass
def __UpperCamelCase (self ):
snake_case_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ : int = model_class(__a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
snake_case_ : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__a , nn.Linear ) )
def __UpperCamelCase (self ):
snake_case_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ : List[Any] = model_class(__a )
snake_case_ : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ : Any = [*signature.parameters.keys()]
snake_case_ : int = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __a )
def __UpperCamelCase (self ):
snake_case_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def __UpperCamelCase (self ):
snake_case_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
@slow
def __UpperCamelCase (self ):
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ : Optional[Any] = ViTMSNModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def SCREAMING_SNAKE_CASE__ ( ):
"""simple docstring"""
snake_case_ : int = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class __lowercase ( unittest.TestCase):
"""simple docstring"""
@cached_property
def __UpperCamelCase (self ):
return ViTImageProcessor.from_pretrained("""facebook/vit-msn-small""" ) if is_vision_available() else None
@slow
def __UpperCamelCase (self ):
torch.manual_seed(2 )
snake_case_ : str = ViTMSNForImageClassification.from_pretrained("""facebook/vit-msn-small""" ).to(__a )
snake_case_ : Dict = self.default_image_processor
snake_case_ : int = prepare_img()
snake_case_ : List[Any] = image_processor(images=__a , return_tensors="""pt""" ).to(__a )
# forward pass
with torch.no_grad():
snake_case_ : Union[str, Any] = model(**__a )
# verify the logits
snake_case_ : Any = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , __a )
snake_case_ : List[Any] = torch.tensor([-0.0803, -0.4454, -0.2375] ).to(__a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1e-4 ) )
| 480 |
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import KarrasVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 42
lowerCamelCase__ = 42
def __init__( self, __a, __a):
'''simple docstring'''
super().__init__()
self.register_modules(unet=__a, scheduler=__a)
@torch.no_grad()
def __call__( self, __a = 1, __a = 50, __a = None, __a = "pil", __a = True, **__a, ):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.unet.config.sample_size
_lowerCAmelCase : Optional[Any] = (batch_size, 3, img_size, img_size)
_lowerCAmelCase : Any = self.unet
# sample x_0 ~ N(0, sigma_0^2 * I)
_lowerCAmelCase : Union[str, Any] = randn_tensor(__a, generator=__a, device=self.device) * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(__a)
for t in self.progress_bar(self.scheduler.timesteps):
# here sigma_t == t_i from the paper
_lowerCAmelCase : Optional[Any] = self.scheduler.schedule[t]
_lowerCAmelCase : int = self.scheduler.schedule[t - 1] if t > 0 else 0
# 1. Select temporarily increased noise level sigma_hat
# 2. Add new noise to move from sample_i to sample_hat
_lowerCAmelCase , _lowerCAmelCase : Dict = self.scheduler.add_noise_to_input(__a, __a, generator=__a)
# 3. Predict the noise residual given the noise magnitude `sigma_hat`
# The model inputs and output are adjusted by following eq. (213) in [1].
_lowerCAmelCase : Optional[int] = (sigma_hat / 2) * model((sample_hat + 1) / 2, sigma_hat / 2).sample
# 4. Evaluate dx/dt at sigma_hat
# 5. Take Euler step from sigma to sigma_prev
_lowerCAmelCase : Optional[int] = self.scheduler.step(__a, __a, __a, __a)
if sigma_prev != 0:
# 6. Apply 2nd order correction
# The model inputs and output are adjusted by following eq. (213) in [1].
_lowerCAmelCase : List[str] = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2, sigma_prev / 2).sample
_lowerCAmelCase : List[str] = self.scheduler.step_correct(
__a, __a, __a, __a, step_output.prev_sample, step_output["derivative"], )
_lowerCAmelCase : Optional[int] = step_output.prev_sample
_lowerCAmelCase : Tuple = (sample / 2 + 0.5).clamp(0, 1)
_lowerCAmelCase : int = sample.cpu().permute(0, 2, 3, 1).numpy()
if output_type == "pil":
_lowerCAmelCase : int = self.numpy_to_pil(__a)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__a)
| 500 | 0 |
def _UpperCAmelCase (UpperCamelCase_ : str , UpperCamelCase_ : str ):
'''simple docstring'''
_lowerCAmelCase : int = len(UpperCamelCase_ )
_lowerCAmelCase : int = len(UpperCamelCase_ )
_lowerCAmelCase : int = (
first_str_length if first_str_length > second_str_length else second_str_length
)
_lowerCAmelCase : list = []
for char_count in range(UpperCamelCase_ ):
if char_count < first_str_length:
output_list.append(first_str[char_count] )
if char_count < second_str_length:
output_list.append(second_str[char_count] )
return "".join(UpperCamelCase_ )
if __name__ == "__main__":
print(alternative_string_arrange("AB", "XYZ"), end=" ")
| 196 |
import sys
_lowerCamelCase : Optional[Any] = (
"73167176531330624919225119674426574742355349194934"
"96983520312774506326239578318016984801869478851843"
"85861560789112949495459501737958331952853208805511"
"12540698747158523863050715693290963295227443043557"
"66896648950445244523161731856403098711121722383113"
"62229893423380308135336276614282806444486645238749"
"30358907296290491560440772390713810515859307960866"
"70172427121883998797908792274921901699720888093776"
"65727333001053367881220235421809751254540594752243"
"52584907711670556013604839586446706324415722155397"
"53697817977846174064955149290862569321978468622482"
"83972241375657056057490261407972968652414535100474"
"82166370484403199890008895243450658541227588666881"
"16427171479924442928230863465674813919123162824586"
"17866458359124566529476545682848912883142607690042"
"24219022671055626321111109370544217506941658960408"
"07198403850962455444362981230987879927244284909188"
"84580156166097919133875499200524063689912560717606"
"05886116467109405077541002256983155200055935729725"
"71636269561882670428252483600823257530420752963450"
)
def _UpperCAmelCase (UpperCamelCase_ : str = N ):
'''simple docstring'''
_lowerCAmelCase : List[str] = -sys.maxsize - 1
for i in range(len(UpperCamelCase_ ) - 12 ):
_lowerCAmelCase : List[str] = 1
for j in range(13 ):
product *= int(n[i + j] )
if product > largest_product:
_lowerCAmelCase : Any = product
return largest_product
if __name__ == "__main__":
print(F'''{solution() = }''')
| 196 | 1 |
'''simple docstring'''
import requests
A_ : Tuple ='''''' # <-- Put your OpenWeatherMap appid here!
A_ : Tuple ='''https://api.openweathermap.org/data/2.5/'''
def snake_case_ ( __snake_case : str = "Chicago" , __snake_case : str = APPID) -> dict:
return requests.get(URL_BASE + '''weather''' , params=locals()).json()
def snake_case_ ( __snake_case : str = "Kolkata, India" , __snake_case : str = APPID) -> dict:
return requests.get(URL_BASE + '''forecast''' , params=locals()).json()
def snake_case_ ( __snake_case : float = 5_5.6_8 , __snake_case : float = 1_2.5_7 , __snake_case : str = APPID) -> dict:
return requests.get(URL_BASE + '''onecall''' , params=locals()).json()
if __name__ == "__main__":
from pprint import pprint
while True:
A_ : Union[str, Any] =input('''Enter a location:''').strip()
if location:
pprint(current_weather(location))
else:
break
| 274 | '''simple docstring'''
import colorsys
from PIL import Image # type: ignore
def snake_case_ ( __snake_case : float , __snake_case : float , __snake_case : int) -> float:
lowerCAmelCase_ = x
lowerCAmelCase_ = y
for step in range(__snake_case): # noqa: B007
lowerCAmelCase_ = a * a - b * b + x
lowerCAmelCase_ = 2 * a * b + y
lowerCAmelCase_ = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def snake_case_ ( __snake_case : float) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def snake_case_ ( __snake_case : float) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255) for i in colorsys.hsv_to_rgb(__snake_case , 1 , 1))
def snake_case_ ( __snake_case : int = 800 , __snake_case : int = 600 , __snake_case : float = -0.6 , __snake_case : float = 0 , __snake_case : float = 3.2 , __snake_case : int = 50 , __snake_case : bool = True , ) -> Image.Image:
lowerCAmelCase_ = Image.new('''RGB''' , (image_width, image_height))
lowerCAmelCase_ = img.load()
# loop through the image-coordinates
for image_x in range(__snake_case):
for image_y in range(__snake_case):
# determine the figure-coordinates based on the image-coordinates
lowerCAmelCase_ = figure_width / image_width * image_height
lowerCAmelCase_ = figure_center_x + (image_x / image_width - 0.5) * figure_width
lowerCAmelCase_ = figure_center_y + (image_y / image_height - 0.5) * figure_height
lowerCAmelCase_ = get_distance(__snake_case , __snake_case , __snake_case)
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
lowerCAmelCase_ = get_color_coded_rgb(__snake_case)
else:
lowerCAmelCase_ = get_black_and_white_rgb(__snake_case)
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
A_ : List[str] =get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 274 | 1 |
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self: Union[str, Any] , __A: Dict , __A: Union[str, Any]=sys.maxsize ) -> Tuple:
_A = '''bilinear'''
_A = max_size
_A = short_edge_length
def __call__( self: Dict , __A: Optional[int] ) -> Any:
_A = []
for img in imgs:
_A ,_A = img.shape[:2]
# later: provide list and randomly choose index for resize
_A = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 )
if size == 0:
return img
_A = size * 1.0 / min(__A , __A )
if h < w:
_A ,_A = size, scale * w
else:
_A ,_A = scale * h, size
if max(__A , __A ) > self.max_size:
_A = self.max_size * 1.0 / max(__A , __A )
_A = newh * scale
_A = neww * scale
_A = int(neww + 0.5 )
_A = int(newh + 0.5 )
if img.dtype == np.uinta:
_A = Image.fromarray(__A )
_A = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR )
_A = np.asarray(__A )
else:
_A = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw
_A = nn.functional.interpolate(
__A , (newh, neww) , mode=self.interp_method , align_corners=__A ).squeeze(0 )
img_augs.append(__A )
return img_augs
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self: Tuple , __A: int ) -> List[str]:
_A = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST )
_A = cfg.INPUT.FORMAT
_A = cfg.SIZE_DIVISIBILITY
_A = cfg.PAD_VALUE
_A = cfg.INPUT.MAX_SIZE_TEST
_A = cfg.MODEL.DEVICE
_A = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
_A = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
_A = lambda __A : (x - self.pixel_mean) / self.pixel_std
def __A ( self: List[Any] , __A: str ) -> Union[str, Any]:
_A = tuple(max(__A ) for s in zip(*[img.shape for img in images] ) )
_A = [im.shape[-2:] for im in images]
_A = [
nn.functional.pad(
__A , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(__A , __A )
]
return torch.stack(__A ), torch.tensor(__A )
def __call__( self: int , __A: Dict , __A: List[str]=False ) -> List[str]:
with torch.no_grad():
if not isinstance(__A , __A ):
_A = [images]
if single_image:
assert len(__A ) == 1
for i in range(len(__A ) ):
if isinstance(images[i] , torch.Tensor ):
images.insert(__A , images.pop(__A ).to(self.device ).float() )
elif not isinstance(images[i] , torch.Tensor ):
images.insert(
__A , torch.as_tensor(img_tensorize(images.pop(__A ) , input_format=self.input_format ) )
.to(self.device )
.float() , )
# resize smallest edge
_A = torch.tensor([im.shape[:2] for im in images] )
_A = self.aug(__A )
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
_A = [self.normalizer(__A ) for x in images]
# now pad them to do the following operations
_A ,_A = self.pad(__A )
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
_A = torch.true_divide(__A , __A )
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def __A ( _lowercase , _lowercase ):
'''simple docstring'''
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def __A ( _lowercase , _lowercase ):
'''simple docstring'''
assert torch.isfinite(_lowercase ).all(), "Box tensor contains infinite or NaN!"
_A ,_A = box_size
tensor[:, 0].clamp_(min=0 , max=_lowercase )
tensor[:, 1].clamp_(min=0 , max=_lowercase )
tensor[:, 2].clamp_(min=0 , max=_lowercase )
tensor[:, 3].clamp_(min=0 , max=_lowercase )
| 62 |
def __A ( _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
_A ,_A = len(_lowercase ), len(grid[0] )
if (
min(_lowercase , _lowercase ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
_A = 0
count += depth_first_search(_lowercase , row + 1 , _lowercase , _lowercase )
count += depth_first_search(_lowercase , row - 1 , _lowercase , _lowercase )
count += depth_first_search(_lowercase , _lowercase , col + 1 , _lowercase )
count += depth_first_search(_lowercase , _lowercase , col - 1 , _lowercase )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 62 | 1 |
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
lowerCAmelCase : Any = trt.Logger(trt.Logger.WARNING)
lowerCAmelCase : str = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
lowerCAmelCase : Optional[int] = logging.getLogger(__name__)
lowerCAmelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--onnx_model_path""",
default=None,
type=str,
required=True,
help="""Path to ONNX model: """,
)
parser.add_argument(
"""--output_dir""",
default=None,
type=str,
required=True,
help="""The output directory where the model checkpoints and predictions will be written.""",
)
# Other parameters
parser.add_argument(
"""--tokenizer_name""",
default="""""",
type=str,
required=True,
help="""Pretrained tokenizer name or path if not the same as model_name""",
)
parser.add_argument(
"""--version_2_with_negative""",
action="""store_true""",
help="""If true, the SQuAD examples contain some that do not have an answer.""",
)
parser.add_argument(
"""--null_score_diff_threshold""",
type=float,
default=0.0,
help="""If null_score - best_non_null is greater than the threshold predict null.""",
)
parser.add_argument(
"""--max_seq_length""",
default=384,
type=int,
help=(
"""The maximum total input sequence length after WordPiece tokenization. Sequences """
"""longer than this will be truncated, and sequences shorter than this will be padded."""
),
)
parser.add_argument(
"""--doc_stride""",
default=128,
type=int,
help="""When splitting up a long document into chunks, how much stride to take between chunks.""",
)
parser.add_argument("""--per_device_eval_batch_size""", default=8, type=int, help="""Batch size per GPU/CPU for evaluation.""")
parser.add_argument(
"""--n_best_size""",
default=20,
type=int,
help="""The total number of n-best predictions to generate in the nbest_predictions.json output file.""",
)
parser.add_argument(
"""--max_answer_length""",
default=30,
type=int,
help=(
"""The maximum length of an answer that can be generated. This is needed because the start """
"""and end predictions are not conditioned on one another."""
),
)
parser.add_argument("""--seed""", type=int, default=42, help="""random seed for initialization""")
parser.add_argument(
"""--dataset_name""",
type=str,
default=None,
required=True,
help="""The name of the dataset to use (via the datasets library).""",
)
parser.add_argument(
"""--dataset_config_name""",
type=str,
default=None,
help="""The configuration name of the dataset to use (via the datasets library).""",
)
parser.add_argument(
"""--preprocessing_num_workers""", type=int, default=4, help="""A csv or a json file containing the training data."""
)
parser.add_argument("""--overwrite_cache""", action="""store_true""", help="""Overwrite the cached training and evaluation sets""")
parser.add_argument(
"""--fp16""",
action="""store_true""",
help="""Whether to use 16-bit (mixed) precision instead of 32-bit""",
)
parser.add_argument(
"""--int8""",
action="""store_true""",
help="""Whether to use INT8""",
)
lowerCAmelCase : Union[str, Any] = parser.parse_args()
if args.tokenizer_name:
lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
"""You are instantiating a new tokenizer from scratch. This is not supported by this script."""
"""You can do it from another script, save it, and load it from here, using --tokenizer_name."""
)
logger.info("""Training/evaluation parameters %s""", args)
lowerCAmelCase : List[str] = args.per_device_eval_batch_size
lowerCAmelCase : Optional[Any] = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
lowerCAmelCase : Union[str, Any] = True
lowerCAmelCase : int = 'temp_engine/bert-fp32.engine'
if args.fpaa:
lowerCAmelCase : Optional[Any] = 'temp_engine/bert-fp16.engine'
if args.inta:
lowerCAmelCase : int = 'temp_engine/bert-int8.engine'
# import ONNX file
if not os.path.exists("""temp_engine"""):
os.makedirs("""temp_engine""")
lowerCAmelCase : Any = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, """rb""") as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
lowerCAmelCase : str = [network.get_input(i) for i in range(network.num_inputs)]
lowerCAmelCase : Dict = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
lowerCAmelCase : str = 1 << 50
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
lowerCAmelCase : Dict = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
lowerCAmelCase : str = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, """wb""") as f:
f.write(engine.serialize())
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Tuple = np.asarray(inputs["input_ids"] , dtype=np.intaa )
SCREAMING_SNAKE_CASE_: Union[str, Any] = np.asarray(inputs["attention_mask"] , dtype=np.intaa )
SCREAMING_SNAKE_CASE_: Optional[Any] = np.asarray(inputs["token_type_ids"] , dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , _lowerCamelCase )
cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , _lowerCamelCase )
cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , _lowerCamelCase )
# start time
SCREAMING_SNAKE_CASE_: int = time.time()
# Run inference
context.execute_async(
bindings=[int(_lowerCamelCase ) for d_inp in d_inputs] + [int(_lowerCamelCase ), int(_lowerCamelCase )] , stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
cuda.memcpy_dtoh_async(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Synchronize the stream and take time
stream.synchronize()
# end time
SCREAMING_SNAKE_CASE_: Optional[Any] = time.time()
SCREAMING_SNAKE_CASE_: Union[str, Any] = end_time - start_time
SCREAMING_SNAKE_CASE_: Tuple = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
lowerCAmelCase : List[Any] = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""",
datefmt="""%m/%d/%Y %H:%M:%S""",
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
lowerCAmelCase : str = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError("""Evaluation requires a dataset name""")
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
lowerCAmelCase : Optional[int] = raw_datasets['validation'].column_names
lowerCAmelCase : Optional[int] = 'question' if 'question' in column_names else column_names[0]
lowerCAmelCase : Dict = 'context' if 'context' in column_names else column_names[1]
lowerCAmelCase : List[Any] = 'answers' if 'answers' in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
lowerCAmelCase : List[str] = tokenizer.padding_side == 'right'
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f'''The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the'''
f'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.'''
)
lowerCAmelCase : Optional[int] = min(args.max_seq_length, tokenizer.model_max_length)
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Any = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
SCREAMING_SNAKE_CASE_: List[str] = tokenizer(
examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation="only_second" if pad_on_right else "only_first" , max_length=_lowerCamelCase , stride=args.doc_stride , return_overflowing_tokens=_lowerCamelCase , return_offsets_mapping=_lowerCamelCase , padding="max_length" , )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
SCREAMING_SNAKE_CASE_: Dict = tokenized_examples.pop("overflow_to_sample_mapping" )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
SCREAMING_SNAKE_CASE_: str = []
for i in range(len(tokenized_examples["input_ids"] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
SCREAMING_SNAKE_CASE_: Tuple = tokenized_examples.sequence_ids(_lowerCamelCase )
SCREAMING_SNAKE_CASE_: Dict = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
SCREAMING_SNAKE_CASE_: str = sample_mapping[i]
tokenized_examples["example_id"].append(examples["id"][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
SCREAMING_SNAKE_CASE_: Optional[int] = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples["offset_mapping"][i] )
]
return tokenized_examples
lowerCAmelCase : Optional[int] = raw_datasets['validation']
# Validation Feature Creation
lowerCAmelCase : List[str] = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc="""Running tokenizer on validation dataset""",
)
lowerCAmelCase : Any = default_data_collator
lowerCAmelCase : Tuple = eval_dataset.remove_columns(["""example_id""", """offset_mapping"""])
lowerCAmelCase : int = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase="eval" ):
SCREAMING_SNAKE_CASE_: int = postprocess_qa_predictions(
examples=_lowerCamelCase , features=_lowerCamelCase , predictions=_lowerCamelCase , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=_lowerCamelCase , )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
SCREAMING_SNAKE_CASE_: int = [
{"id": k, "prediction_text": v, "no_answer_probability": 0.0} for k, v in predictions.items()
]
else:
SCREAMING_SNAKE_CASE_: Dict = [{"id": k, "prediction_text": v} for k, v in predictions.items()]
SCREAMING_SNAKE_CASE_: str = [{"id": ex["id"], "answers": ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=_lowerCamelCase , label_ids=_lowerCamelCase )
lowerCAmelCase : Dict = load_metric("""squad_v2""" if args.version_2_with_negative else """squad""")
# Evaluation!
logger.info("""Loading ONNX model %s for evaluation""", args.onnx_model_path)
with open(engine_name, """rb""") as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def A_ ( _UpperCAmelCase ):
return trt.volume(engine.get_binding_shape(_lowerCamelCase ) ) * engine.get_binding_dtype(_lowerCamelCase ).itemsize
# Allocate device memory for inputs and outputs.
lowerCAmelCase : Any = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
lowerCAmelCase : Optional[int] = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
lowerCAmelCase : Dict = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
lowerCAmelCase : List[Any] = cuda.mem_alloc(h_outputa.nbytes)
lowerCAmelCase : Optional[Any] = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
lowerCAmelCase : Optional[int] = cuda.Stream()
# Evaluation
logger.info("""***** Running Evaluation *****""")
logger.info(f''' Num examples = {len(eval_dataset)}''')
logger.info(f''' Batch size = {args.per_device_eval_batch_size}''')
lowerCAmelCase : Optional[int] = 0.0
lowerCAmelCase : Tuple = 0
lowerCAmelCase : str = timeit.default_timer()
lowerCAmelCase : List[str] = None
for step, batch in enumerate(eval_dataloader):
lowerCAmelCase : int = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
lowerCAmelCase : Union[str, Any] = outputs
lowerCAmelCase : Tuple = torch.tensor(start_logits)
lowerCAmelCase : List[str] = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
lowerCAmelCase : Any = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100)
lowerCAmelCase : Any = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100)
lowerCAmelCase : str = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
lowerCAmelCase : Optional[Any] = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)
if all_preds is not None:
lowerCAmelCase : Optional[Any] = nested_truncate(all_preds, len(eval_dataset))
lowerCAmelCase : Optional[int] = timeit.default_timer() - start_time
logger.info(""" Evaluation done in total %f secs (%f sec per example)""", evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info("""Average Inference Time = {:.3f} ms""".format(total_time * 1000 / niter))
logger.info("""Total Inference Time = {:.3f} ms""".format(total_time * 1000))
logger.info("""Total Number of Inference = %d""", niter)
lowerCAmelCase : Optional[Any] = post_processing_function(eval_examples, eval_dataset, all_preds)
lowerCAmelCase : List[str] = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(f'''Evaluation metrics: {eval_metric}''')
| 671 |
import warnings
from ...utils import logging
from .image_processing_perceiver import PerceiverImageProcessor
lowercase : List[str] = logging.get_logger(__name__)
class lowerCamelCase__ ( __lowercase):
'''simple docstring'''
def __init__( self :Dict , *a :Tuple , **a :List[Any] ) -> None:
warnings.warn(
"The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use PerceiverImageProcessor instead." , a , )
super().__init__(*a , **a ) | 557 | 0 |
"""simple docstring"""
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
lowercase__ = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
'text-classification',
'language-modeling',
'summarization',
'token-classification',
'question-answering',
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
lowercase__ = logging.getLogger()
def __a ( ) ->Dict:
a__: int = argparse.ArgumentParser()
parser.add_argument('-f' )
a__: List[str] = parser.parse_args()
return args.f
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="eval" ) ->Tuple:
a__: List[Any] = os.path.join(_SCREAMING_SNAKE_CASE , F'{split}_results.json' )
if os.path.exists(_SCREAMING_SNAKE_CASE ):
with open(_SCREAMING_SNAKE_CASE , 'r' ) as f:
return json.load(_SCREAMING_SNAKE_CASE )
raise ValueError(F'can\'t find {path}' )
lowercase__ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class __snake_case ( __lowerCAmelCase ):
def lowerCamelCase_ ( self) -> int:
'''simple docstring'''
a__: Optional[Any] = self.get_auto_remove_tmp_dir()
a__: List[str] = f'\n run_glue.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --eval_steps=2\n --warmup_steps=2\n --seed=42\n --max_seq_length=128\n '.split()
with patch.object(lowercase , 'argv' , lowercase):
run_flax_glue.main()
a__: Dict = get_results(lowercase)
self.assertGreaterEqual(result['eval_accuracy'] , 0.75)
@slow
def lowerCamelCase_ ( self) -> Union[str, Any]:
'''simple docstring'''
a__: Optional[int] = self.get_auto_remove_tmp_dir()
a__: Optional[int] = f'\n run_clm_flax.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --block_size 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n '.split()
with patch.object(lowercase , 'argv' , lowercase):
run_clm_flax.main()
a__: int = get_results(lowercase)
self.assertLess(result['eval_perplexity'] , 1_00)
@slow
def lowerCamelCase_ ( self) -> Optional[int]:
'''simple docstring'''
a__: str = self.get_auto_remove_tmp_dir()
a__: Tuple = f'\n run_summarization.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --test_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=8\n --do_train\n --do_eval\n --do_predict\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --predict_with_generate\n '.split()
with patch.object(lowercase , 'argv' , lowercase):
run_summarization_flax.main()
a__: Dict = get_results(lowercase , split='test')
self.assertGreaterEqual(result['test_rouge1'] , 10)
self.assertGreaterEqual(result['test_rouge2'] , 2)
self.assertGreaterEqual(result['test_rougeL'] , 7)
self.assertGreaterEqual(result['test_rougeLsum'] , 7)
@slow
def lowerCamelCase_ ( self) -> Union[str, Any]:
'''simple docstring'''
a__: List[str] = self.get_auto_remove_tmp_dir()
a__: int = f'\n run_mlm.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --logging_steps 2 --eval_steps 2\n --do_train\n --do_eval\n --num_train_epochs=1\n '.split()
with patch.object(lowercase , 'argv' , lowercase):
run_mlm_flax.main()
a__: List[Any] = get_results(lowercase)
self.assertLess(result['eval_perplexity'] , 42)
@slow
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
a__: str = self.get_auto_remove_tmp_dir()
a__: Optional[Any] = f'\n run_t5_mlm_flax.py\n --model_name_or_path t5-small\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n '.split()
with patch.object(lowercase , 'argv' , lowercase):
run_ta_mlm_flax.main()
a__: List[Any] = get_results(lowercase)
self.assertGreaterEqual(result['eval_accuracy'] , 0.42)
@slow
def lowerCamelCase_ ( self) -> Union[str, Any]:
'''simple docstring'''
a__: Tuple = 7 if get_gpu_count() > 1 else 2
a__: Optional[Any] = self.get_auto_remove_tmp_dir()
a__: Any = f'\n run_flax_ner.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --do_train\n --do_eval\n --warmup_steps=2\n --learning_rate=2e-4\n --logging_steps 2 --eval_steps 2\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n '.split()
with patch.object(lowercase , 'argv' , lowercase):
run_flax_ner.main()
a__: List[str] = get_results(lowercase)
self.assertGreaterEqual(result['eval_accuracy'] , 0.75)
self.assertGreaterEqual(result['eval_f1'] , 0.3)
@slow
def lowerCamelCase_ ( self) -> Optional[int]:
'''simple docstring'''
a__: Optional[int] = self.get_auto_remove_tmp_dir()
a__: Union[str, Any] = f'\n run_qa.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=2\n --do_train\n --do_eval\n --logging_steps 2 --eval_steps 2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n '.split()
with patch.object(lowercase , 'argv' , lowercase):
run_qa.main()
a__: Any = get_results(lowercase)
self.assertGreaterEqual(result['eval_f1'] , 30)
self.assertGreaterEqual(result['eval_exact'] , 30)
| 217 | """simple docstring"""
class __snake_case :
def __init__( self , lowercase) -> Optional[Any]:
'''simple docstring'''
a__: int = n
a__: int = [None] * self.n
a__: List[str] = 0 # index of the first element
a__: Any = 0
a__: Any = 0
def __len__( self) -> int:
'''simple docstring'''
return self.size
def lowerCamelCase_ ( self) -> bool:
'''simple docstring'''
return self.size == 0
def lowerCamelCase_ ( self) -> Optional[int]:
'''simple docstring'''
return False if self.is_empty() else self.array[self.front]
def lowerCamelCase_ ( self , lowercase) -> Union[str, Any]:
'''simple docstring'''
if self.size >= self.n:
raise Exception('QUEUE IS FULL')
a__: Tuple = data
a__: Dict = (self.rear + 1) % self.n
self.size += 1
return self
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
if self.size == 0:
raise Exception('UNDERFLOW')
a__: str = self.array[self.front]
a__: Any = None
a__: Optional[Any] = (self.front + 1) % self.n
self.size -= 1
return temp
| 217 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
lowercase_ = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowercase_ = {
'''vocab_file''': {
'''google/electra-small-generator''': (
'''https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt'''
),
'''google/electra-base-generator''': '''https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt''',
'''google/electra-large-generator''': (
'''https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt'''
),
'''google/electra-small-discriminator''': (
'''https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt'''
),
'''google/electra-base-discriminator''': (
'''https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt'''
),
'''google/electra-large-discriminator''': (
'''https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''google/electra-small-generator''': (
'''https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json'''
),
'''google/electra-base-generator''': (
'''https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json'''
),
'''google/electra-large-generator''': (
'''https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json'''
),
'''google/electra-small-discriminator''': (
'''https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json'''
),
'''google/electra-base-discriminator''': (
'''https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json'''
),
'''google/electra-large-discriminator''': (
'''https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json'''
),
},
}
lowercase_ = {
'''google/electra-small-generator''': 5_12,
'''google/electra-base-generator''': 5_12,
'''google/electra-large-generator''': 5_12,
'''google/electra-small-discriminator''': 5_12,
'''google/electra-base-discriminator''': 5_12,
'''google/electra-large-discriminator''': 5_12,
}
lowercase_ = {
'''google/electra-small-generator''': {'''do_lower_case''': True},
'''google/electra-base-generator''': {'''do_lower_case''': True},
'''google/electra-large-generator''': {'''do_lower_case''': True},
'''google/electra-small-discriminator''': {'''do_lower_case''': True},
'''google/electra-base-discriminator''': {'''do_lower_case''': True},
'''google/electra-large-discriminator''': {'''do_lower_case''': True},
}
class _UpperCamelCase ( lowerCamelCase__ ):
'''simple docstring'''
_A = VOCAB_FILES_NAMES
_A = PRETRAINED_VOCAB_FILES_MAP
_A = PRETRAINED_INIT_CONFIGURATION
_A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A = ElectraTokenizer
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any]=None , SCREAMING_SNAKE_CASE_ : int=None , SCREAMING_SNAKE_CASE_ : Optional[Any]=True , SCREAMING_SNAKE_CASE_ : str="[UNK]" , SCREAMING_SNAKE_CASE_ : Dict="[SEP]" , SCREAMING_SNAKE_CASE_ : List[Any]="[PAD]" , SCREAMING_SNAKE_CASE_ : int="[CLS]" , SCREAMING_SNAKE_CASE_ : Any="[MASK]" , SCREAMING_SNAKE_CASE_ : int=True , SCREAMING_SNAKE_CASE_ : Optional[Any]=None , **SCREAMING_SNAKE_CASE_ : Tuple , ):
super().__init__(
UpperCAmelCase__ , tokenizer_file=UpperCAmelCase__ , do_lower_case=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , tokenize_chinese_chars=UpperCAmelCase__ , strip_accents=UpperCAmelCase__ , **UpperCAmelCase__ , )
_a = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , UpperCAmelCase__ ) != do_lower_case
or normalizer_state.get('strip_accents' , UpperCAmelCase__ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , UpperCAmelCase__ ) != tokenize_chinese_chars
):
_a = getattr(UpperCAmelCase__ , normalizer_state.pop('type' ) )
_a = do_lower_case
_a = strip_accents
_a = tokenize_chinese_chars
_a = normalizer_class(**UpperCAmelCase__ )
_a = do_lower_case
def _UpperCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Any=None ):
_a = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _UpperCAmelCase ( self : int , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ):
_a = [self.sep_token_id]
_a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _UpperCAmelCase ( self : List[str] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[str] = None ):
_a = self._tokenizer.model.save(UpperCAmelCase__ , name=UpperCAmelCase__ )
return tuple(UpperCAmelCase__ )
| 562 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class UpperCamelCase_ ( UpperCamelCase , unittest.TestCase):
"""simple docstring"""
snake_case__ : int = RoCBertTokenizer
snake_case__ : int = None
snake_case__ : Optional[Any] = False
snake_case__ : int = True
snake_case__ : Any = filter_non_english
def UpperCAmelCase_ ( self : Any ) -> Union[str, Any]:
super().setUp()
__SCREAMING_SNAKE_CASE = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "ไฝ ", "ๅฅฝ", "ๆฏ", "่ฐ", "a", "b", "c", "d"]
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = {}
for i, value in enumerate(UpperCAmelCase__ ):
__SCREAMING_SNAKE_CASE = i
__SCREAMING_SNAKE_CASE = i
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["word_shape_file"] )
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["word_pronunciation_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
with open(self.word_shape_file , "w" , encoding="utf-8" ) as word_shape_writer:
json.dump(UpperCAmelCase__ , UpperCAmelCase__ , ensure_ascii=UpperCAmelCase__ )
with open(self.word_pronunciation_file , "w" , encoding="utf-8" ) as word_pronunciation_writer:
json.dump(UpperCAmelCase__ , UpperCAmelCase__ , ensure_ascii=UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Any ) -> List[str]:
__SCREAMING_SNAKE_CASE = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
__SCREAMING_SNAKE_CASE = tokenizer.tokenize("ไฝ ๅฅฝ[SEP]ไฝ ๆฏ่ฐ" )
self.assertListEqual(UpperCAmelCase__ , ["ไฝ ", "ๅฅฝ", "[SEP]", "ไฝ ", "ๆฏ", "่ฐ"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(UpperCAmelCase__ ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(UpperCAmelCase__ ) , [5, 6, 2, 5, 7, 8] )
def UpperCAmelCase_ ( self : Any ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] )
def UpperCAmelCase_ ( self : int ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def UpperCAmelCase_ ( self : Dict ) -> Dict:
__SCREAMING_SNAKE_CASE = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase__ , strip_accents=UpperCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(" \tHรคLLo!how \n Are yoU? " ) , ["hรคllo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] )
def UpperCAmelCase_ ( self : int ) -> Dict:
__SCREAMING_SNAKE_CASE = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase__ , strip_accents=UpperCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(" \tHรคLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def UpperCAmelCase_ ( self : Any ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(" \tHรคLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] )
def UpperCAmelCase_ ( self : int ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase__ , strip_accents=UpperCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(" \tHรคLLo!how \n Are yoU? " ) , ["HรคLLo", "!", "how", "Are", "yoU", "?"] )
def UpperCAmelCase_ ( self : int ) -> List[Any]:
__SCREAMING_SNAKE_CASE = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase__ , strip_accents=UpperCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(" \tHรคLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] )
def UpperCAmelCase_ ( self : Optional[int] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase__ , never_split=["[UNK]"] )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] )
def UpperCAmelCase_ ( self : str ) -> List[str]:
__SCREAMING_SNAKE_CASE = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
__SCREAMING_SNAKE_CASE = {}
for i, token in enumerate(UpperCAmelCase__ ):
__SCREAMING_SNAKE_CASE = i
__SCREAMING_SNAKE_CASE = RoCBertWordpieceTokenizer(vocab=UpperCAmelCase__ , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] )
self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] )
def UpperCAmelCase_ ( self : List[Any] ) -> str:
self.assertTrue(_is_whitespace(" " ) )
self.assertTrue(_is_whitespace("\t" ) )
self.assertTrue(_is_whitespace("\r" ) )
self.assertTrue(_is_whitespace("\n" ) )
self.assertTrue(_is_whitespace("\u00A0" ) )
self.assertFalse(_is_whitespace("A" ) )
self.assertFalse(_is_whitespace("-" ) )
def UpperCAmelCase_ ( self : List[Any] ) -> List[str]:
self.assertTrue(_is_control("\u0005" ) )
self.assertFalse(_is_control("A" ) )
self.assertFalse(_is_control(" " ) )
self.assertFalse(_is_control("\t" ) )
self.assertFalse(_is_control("\r" ) )
def UpperCAmelCase_ ( self : List[str] ) -> Tuple:
self.assertTrue(_is_punctuation("-" ) )
self.assertTrue(_is_punctuation("$" ) )
self.assertTrue(_is_punctuation("`" ) )
self.assertTrue(_is_punctuation("." ) )
self.assertFalse(_is_punctuation("A" ) )
self.assertFalse(_is_punctuation(" " ) )
def UpperCAmelCase_ ( self : int ) -> int:
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(UpperCAmelCase__ ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
if self.test_rust_tokenizer:
__SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(UpperCAmelCase__ ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
def UpperCAmelCase_ ( self : Tuple ) -> List[Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = F"""A, naรฏve {tokenizer_r.mask_token} AllenNLP sentence."""
__SCREAMING_SNAKE_CASE = tokenizer_r.encode_plus(
UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , return_token_type_ids=UpperCAmelCase__ , return_offsets_mapping=UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , )
__SCREAMING_SNAKE_CASE = tokenizer_r.do_lower_case if hasattr(UpperCAmelCase__ , "do_lower_case" ) else False
__SCREAMING_SNAKE_CASE = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), "A"),
((1, 2), ","),
((3, 5), "na"),
((5, 6), "##รฏ"),
((6, 8), "##ve"),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), "Allen"),
((2_1, 2_3), "##NL"),
((2_3, 2_4), "##P"),
((2_5, 3_3), "sentence"),
((3_3, 3_4), "."),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), "a"),
((1, 2), ","),
((3, 8), "naive"),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), "allen"),
((2_1, 2_3), "##nl"),
((2_3, 2_4), "##p"),
((2_5, 3_3), "sentence"),
((3_3, 3_4), "."),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["input_ids"] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["offset_mapping"] )
def UpperCAmelCase_ ( self : Tuple ) -> Dict:
__SCREAMING_SNAKE_CASE = ["็", "ไบบ", "ๆ"]
__SCREAMING_SNAKE_CASE = "".join(UpperCAmelCase__ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = tokenizer_p.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = tokenizer_r.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = tokenizer_r.convert_ids_to_tokens(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = tokenizer_p.convert_ids_to_tokens(UpperCAmelCase__ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = tokenizer_r.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = tokenizer_p.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = tokenizer_r.convert_ids_to_tokens(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = tokenizer_p.convert_ids_to_tokens(UpperCAmelCase__ )
# it is expected that only the first Chinese character is not preceded by "##".
__SCREAMING_SNAKE_CASE = [
F"""##{token}""" if idx != 0 else token for idx, token in enumerate(UpperCAmelCase__ )
]
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
@slow
def UpperCAmelCase_ ( self : List[Any] ) -> Tuple:
__SCREAMING_SNAKE_CASE = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
__SCREAMING_SNAKE_CASE = tokenizer.encode("ไฝ ๅฅฝ" , add_special_tokens=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = tokenizer.encode("ไฝ ๆฏ่ฐ" , add_special_tokens=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase__ , UpperCAmelCase__ )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def UpperCAmelCase_ ( self : str ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = self.get_tokenizers(do_lower_case=UpperCAmelCase__ )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
__SCREAMING_SNAKE_CASE = "ไฝ ๅฅฝ๏ผไฝ ๆฏ่ฐ"
__SCREAMING_SNAKE_CASE = tokenizer.tokenize(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_ids(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_shape_ids(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_pronunciation_ids(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = tokenizer.prepare_for_model(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = tokenizer.encode_plus(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
| 682 | 0 |
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase = MODEL_FOR_MASKED_LM_MAPPING
__UpperCAmelCase = TF_MODEL_FOR_MASKED_LM_MAPPING
def A ( self ) -> Optional[Any]:
'''simple docstring'''
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def A ( self ) -> str:
'''simple docstring'''
__lowercase = pipeline(task='''fill-mask''' , model='''sshleifer/tiny-distilroberta-base''' , top_k=2 , framework='''tf''' )
__lowercase = unmasker('''My name is <mask>''' )
self.assertEqual(
nested_simplify(snake_case_ , decimals=6 ) , [
{'''sequence''': '''My name is grouped''', '''score''': 2.1e-0_5, '''token''': 3_8_0_1_5, '''token_str''': ''' grouped'''},
{'''sequence''': '''My name is accuser''', '''score''': 2.1e-0_5, '''token''': 2_5_5_0_6, '''token_str''': ''' accuser'''},
] , )
__lowercase = unmasker('''The largest city in France is <mask>''' )
self.assertEqual(
nested_simplify(snake_case_ , decimals=6 ) , [
{
'''sequence''': '''The largest city in France is grouped''',
'''score''': 2.1e-0_5,
'''token''': 3_8_0_1_5,
'''token_str''': ''' grouped''',
},
{
'''sequence''': '''The largest city in France is accuser''',
'''score''': 2.1e-0_5,
'''token''': 2_5_5_0_6,
'''token_str''': ''' accuser''',
},
] , )
__lowercase = unmasker('''My name is <mask>''' , targets=[''' Patrick''', ''' Clara''', ''' Teven'''] , top_k=3 )
self.assertEqual(
nested_simplify(snake_case_ , decimals=6 ) , [
{'''sequence''': '''My name is Clara''', '''score''': 2e-0_5, '''token''': 1_3_6_0_6, '''token_str''': ''' Clara'''},
{'''sequence''': '''My name is Patrick''', '''score''': 2e-0_5, '''token''': 3_4_9_9, '''token_str''': ''' Patrick'''},
{'''sequence''': '''My name is Te''', '''score''': 1.9e-0_5, '''token''': 2_9_4_1, '''token_str''': ''' Te'''},
] , )
@require_torch
def A ( self ) -> Any:
'''simple docstring'''
__lowercase = pipeline(task='''fill-mask''' , model='''sshleifer/tiny-distilroberta-base''' , top_k=2 , framework='''pt''' )
__lowercase = unmasker('''My name is <mask>''' )
self.assertEqual(
nested_simplify(snake_case_ , decimals=6 ) , [
{'''sequence''': '''My name is Maul''', '''score''': 2.2e-0_5, '''token''': 3_5_6_7_6, '''token_str''': ''' Maul'''},
{'''sequence''': '''My name isELS''', '''score''': 2.2e-0_5, '''token''': 1_6_4_1_6, '''token_str''': '''ELS'''},
] , )
__lowercase = unmasker('''The largest city in France is <mask>''' )
self.assertEqual(
nested_simplify(snake_case_ , decimals=6 ) , [
{
'''sequence''': '''The largest city in France is Maul''',
'''score''': 2.2e-0_5,
'''token''': 3_5_6_7_6,
'''token_str''': ''' Maul''',
},
{'''sequence''': '''The largest city in France isELS''', '''score''': 2.2e-0_5, '''token''': 1_6_4_1_6, '''token_str''': '''ELS'''},
] , )
__lowercase = unmasker('''My name is <mask>''' , targets=[''' Patrick''', ''' Clara''', ''' Teven'''] , top_k=3 )
self.assertEqual(
nested_simplify(snake_case_ , decimals=6 ) , [
{'''sequence''': '''My name is Patrick''', '''score''': 2.1e-0_5, '''token''': 3_4_9_9, '''token_str''': ''' Patrick'''},
{'''sequence''': '''My name is Te''', '''score''': 2e-0_5, '''token''': 2_9_4_1, '''token_str''': ''' Te'''},
{'''sequence''': '''My name is Clara''', '''score''': 2e-0_5, '''token''': 1_3_6_0_6, '''token_str''': ''' Clara'''},
] , )
__lowercase = unmasker('''My name is <mask> <mask>''' , top_k=2 )
self.assertEqual(
nested_simplify(snake_case_ , decimals=6 ) , [
[
{
'''score''': 2.2e-0_5,
'''token''': 3_5_6_7_6,
'''token_str''': ''' Maul''',
'''sequence''': '''<s>My name is Maul<mask></s>''',
},
{'''score''': 2.2e-0_5, '''token''': 1_6_4_1_6, '''token_str''': '''ELS''', '''sequence''': '''<s>My name isELS<mask></s>'''},
],
[
{
'''score''': 2.2e-0_5,
'''token''': 3_5_6_7_6,
'''token_str''': ''' Maul''',
'''sequence''': '''<s>My name is<mask> Maul</s>''',
},
{'''score''': 2.2e-0_5, '''token''': 1_6_4_1_6, '''token_str''': '''ELS''', '''sequence''': '''<s>My name is<mask>ELS</s>'''},
],
] , )
@require_torch_gpu
def A ( self ) -> str:
'''simple docstring'''
__lowercase = pipeline('''fill-mask''' , model='''hf-internal-testing/tiny-random-distilbert''' , device=0 , framework='''pt''' )
# convert model to fp16
pipe.model.half()
__lowercase = pipe('''Paris is the [MASK] of France.''' )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(snake_case_ , snake_case_ )
@slow
@require_torch
def A ( self ) -> Tuple:
'''simple docstring'''
__lowercase = pipeline(task='''fill-mask''' , model='''distilroberta-base''' , top_k=2 , framework='''pt''' )
self.run_large_test(snake_case_ )
@slow
@require_tf
def A ( self ) -> Dict:
'''simple docstring'''
__lowercase = pipeline(task='''fill-mask''' , model='''distilroberta-base''' , top_k=2 , framework='''tf''' )
self.run_large_test(snake_case_ )
def A ( self , snake_case_ ) -> Any:
'''simple docstring'''
__lowercase = unmasker('''My name is <mask>''' )
self.assertEqual(
nested_simplify(snake_case_ ) , [
{'''sequence''': '''My name is John''', '''score''': 0.0_0_8, '''token''': 6_1_0, '''token_str''': ''' John'''},
{'''sequence''': '''My name is Chris''', '''score''': 0.0_0_7, '''token''': 1_5_7_3, '''token_str''': ''' Chris'''},
] , )
__lowercase = unmasker('''The largest city in France is <mask>''' )
self.assertEqual(
nested_simplify(snake_case_ ) , [
{
'''sequence''': '''The largest city in France is Paris''',
'''score''': 0.2_5_1,
'''token''': 2_2_0_1,
'''token_str''': ''' Paris''',
},
{
'''sequence''': '''The largest city in France is Lyon''',
'''score''': 0.2_1_4,
'''token''': 1_2_7_9_0,
'''token_str''': ''' Lyon''',
},
] , )
__lowercase = unmasker('''My name is <mask>''' , targets=[''' Patrick''', ''' Clara''', ''' Teven'''] , top_k=3 )
self.assertEqual(
nested_simplify(snake_case_ ) , [
{'''sequence''': '''My name is Patrick''', '''score''': 0.0_0_5, '''token''': 3_4_9_9, '''token_str''': ''' Patrick'''},
{'''sequence''': '''My name is Clara''', '''score''': 0.0_0_0, '''token''': 1_3_6_0_6, '''token_str''': ''' Clara'''},
{'''sequence''': '''My name is Te''', '''score''': 0.0_0_0, '''token''': 2_9_4_1, '''token_str''': ''' Te'''},
] , )
@require_torch
def A ( self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = pipeline(task='''fill-mask''' , model='''sshleifer/tiny-distilroberta-base''' , framework='''pt''' )
__lowercase = None
__lowercase = None
self.run_pipeline_test(snake_case_ , [] )
@require_tf
def A ( self ) -> int:
'''simple docstring'''
__lowercase = pipeline(task='''fill-mask''' , model='''sshleifer/tiny-distilroberta-base''' , framework='''tf''' )
__lowercase = None
__lowercase = None
self.run_pipeline_test(snake_case_ , [] )
def A ( self , snake_case_ , snake_case_ , snake_case_ ) -> List[Any]:
'''simple docstring'''
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest('''The provided tokenizer has no mask token, (probably reformer or wav2vec2)''' )
__lowercase = FillMaskPipeline(model=snake_case_ , tokenizer=snake_case_ )
__lowercase = [
F'This is another {tokenizer.mask_token} test',
]
return fill_masker, examples
def A ( self , snake_case_ , snake_case_ ) -> Optional[Any]:
'''simple docstring'''
__lowercase = fill_masker.tokenizer
__lowercase = fill_masker.model
__lowercase = fill_masker(
F'This is a {tokenizer.mask_token}' , )
self.assertEqual(
snake_case_ , [
{'''sequence''': ANY(snake_case_ ), '''score''': ANY(snake_case_ ), '''token''': ANY(snake_case_ ), '''token_str''': ANY(snake_case_ )},
{'''sequence''': ANY(snake_case_ ), '''score''': ANY(snake_case_ ), '''token''': ANY(snake_case_ ), '''token_str''': ANY(snake_case_ )},
{'''sequence''': ANY(snake_case_ ), '''score''': ANY(snake_case_ ), '''token''': ANY(snake_case_ ), '''token_str''': ANY(snake_case_ )},
{'''sequence''': ANY(snake_case_ ), '''score''': ANY(snake_case_ ), '''token''': ANY(snake_case_ ), '''token_str''': ANY(snake_case_ )},
{'''sequence''': ANY(snake_case_ ), '''score''': ANY(snake_case_ ), '''token''': ANY(snake_case_ ), '''token_str''': ANY(snake_case_ )},
] , )
__lowercase = fill_masker([F'This is a {tokenizer.mask_token}'] )
self.assertEqual(
snake_case_ , [
{'''sequence''': ANY(snake_case_ ), '''score''': ANY(snake_case_ ), '''token''': ANY(snake_case_ ), '''token_str''': ANY(snake_case_ )},
{'''sequence''': ANY(snake_case_ ), '''score''': ANY(snake_case_ ), '''token''': ANY(snake_case_ ), '''token_str''': ANY(snake_case_ )},
{'''sequence''': ANY(snake_case_ ), '''score''': ANY(snake_case_ ), '''token''': ANY(snake_case_ ), '''token_str''': ANY(snake_case_ )},
{'''sequence''': ANY(snake_case_ ), '''score''': ANY(snake_case_ ), '''token''': ANY(snake_case_ ), '''token_str''': ANY(snake_case_ )},
{'''sequence''': ANY(snake_case_ ), '''score''': ANY(snake_case_ ), '''token''': ANY(snake_case_ ), '''token_str''': ANY(snake_case_ )},
] , )
__lowercase = fill_masker([F'This is a {tokenizer.mask_token}', F'Another {tokenizer.mask_token} great test.'] )
self.assertEqual(
snake_case_ , [
[
{'''sequence''': ANY(snake_case_ ), '''score''': ANY(snake_case_ ), '''token''': ANY(snake_case_ ), '''token_str''': ANY(snake_case_ )},
{'''sequence''': ANY(snake_case_ ), '''score''': ANY(snake_case_ ), '''token''': ANY(snake_case_ ), '''token_str''': ANY(snake_case_ )},
{'''sequence''': ANY(snake_case_ ), '''score''': ANY(snake_case_ ), '''token''': ANY(snake_case_ ), '''token_str''': ANY(snake_case_ )},
{'''sequence''': ANY(snake_case_ ), '''score''': ANY(snake_case_ ), '''token''': ANY(snake_case_ ), '''token_str''': ANY(snake_case_ )},
{'''sequence''': ANY(snake_case_ ), '''score''': ANY(snake_case_ ), '''token''': ANY(snake_case_ ), '''token_str''': ANY(snake_case_ )},
],
[
{'''sequence''': ANY(snake_case_ ), '''score''': ANY(snake_case_ ), '''token''': ANY(snake_case_ ), '''token_str''': ANY(snake_case_ )},
{'''sequence''': ANY(snake_case_ ), '''score''': ANY(snake_case_ ), '''token''': ANY(snake_case_ ), '''token_str''': ANY(snake_case_ )},
{'''sequence''': ANY(snake_case_ ), '''score''': ANY(snake_case_ ), '''token''': ANY(snake_case_ ), '''token_str''': ANY(snake_case_ )},
{'''sequence''': ANY(snake_case_ ), '''score''': ANY(snake_case_ ), '''token''': ANY(snake_case_ ), '''token_str''': ANY(snake_case_ )},
{'''sequence''': ANY(snake_case_ ), '''score''': ANY(snake_case_ ), '''token''': ANY(snake_case_ ), '''token_str''': ANY(snake_case_ )},
],
] , )
with self.assertRaises(snake_case_ ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(snake_case_ ):
fill_masker('''This is''' )
self.run_test_top_k(snake_case_ , snake_case_ )
self.run_test_targets(snake_case_ , snake_case_ )
self.run_test_top_k_targets(snake_case_ , snake_case_ )
self.fill_mask_with_duplicate_targets_and_top_k(snake_case_ , snake_case_ )
self.fill_mask_with_multiple_masks(snake_case_ , snake_case_ )
def A ( self , snake_case_ , snake_case_ ) -> Optional[Any]:
'''simple docstring'''
__lowercase = tokenizer.get_vocab()
__lowercase = sorted(vocab.keys() )[:2]
# Pipeline argument
__lowercase = FillMaskPipeline(model=snake_case_ , tokenizer=snake_case_ , targets=snake_case_ )
__lowercase = fill_masker(F'This is a {tokenizer.mask_token}' )
self.assertEqual(
snake_case_ , [
{'''sequence''': ANY(snake_case_ ), '''score''': ANY(snake_case_ ), '''token''': ANY(snake_case_ ), '''token_str''': ANY(snake_case_ )},
{'''sequence''': ANY(snake_case_ ), '''score''': ANY(snake_case_ ), '''token''': ANY(snake_case_ ), '''token_str''': ANY(snake_case_ )},
] , )
__lowercase = {vocab[el] for el in targets}
self.assertEqual({el['''token'''] for el in outputs} , snake_case_ )
__lowercase = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el['''token_str'''] for el in outputs} , set(snake_case_ ) )
# Call argument
__lowercase = FillMaskPipeline(model=snake_case_ , tokenizer=snake_case_ )
__lowercase = fill_masker(F'This is a {tokenizer.mask_token}' , targets=snake_case_ )
self.assertEqual(
snake_case_ , [
{'''sequence''': ANY(snake_case_ ), '''score''': ANY(snake_case_ ), '''token''': ANY(snake_case_ ), '''token_str''': ANY(snake_case_ )},
{'''sequence''': ANY(snake_case_ ), '''score''': ANY(snake_case_ ), '''token''': ANY(snake_case_ ), '''token_str''': ANY(snake_case_ )},
] , )
__lowercase = {vocab[el] for el in targets}
self.assertEqual({el['''token'''] for el in outputs} , snake_case_ )
__lowercase = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el['''token_str'''] for el in outputs} , set(snake_case_ ) )
# Score equivalence
__lowercase = fill_masker(F'This is a {tokenizer.mask_token}' , targets=snake_case_ )
__lowercase = [top_mask['''token_str'''] for top_mask in outputs]
__lowercase = [top_mask['''score'''] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(snake_case_ ) == set(snake_case_ ):
__lowercase = fill_masker(F'This is a {tokenizer.mask_token}' , targets=snake_case_ )
__lowercase = [top_mask['''score'''] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(snake_case_ ) , nested_simplify(snake_case_ ) )
# Raises with invalid
with self.assertRaises(snake_case_ ):
__lowercase = fill_masker(F'This is a {tokenizer.mask_token}' , targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(snake_case_ ):
__lowercase = fill_masker(F'This is a {tokenizer.mask_token}' , targets=[''''''] )
with self.assertRaises(snake_case_ ):
__lowercase = fill_masker(F'This is a {tokenizer.mask_token}' , targets='''''' )
def A ( self , snake_case_ , snake_case_ ) -> Dict:
'''simple docstring'''
__lowercase = FillMaskPipeline(model=snake_case_ , tokenizer=snake_case_ , top_k=2 )
__lowercase = fill_masker(F'This is a {tokenizer.mask_token}' )
self.assertEqual(
snake_case_ , [
{'''sequence''': ANY(snake_case_ ), '''score''': ANY(snake_case_ ), '''token''': ANY(snake_case_ ), '''token_str''': ANY(snake_case_ )},
{'''sequence''': ANY(snake_case_ ), '''score''': ANY(snake_case_ ), '''token''': ANY(snake_case_ ), '''token_str''': ANY(snake_case_ )},
] , )
__lowercase = FillMaskPipeline(model=snake_case_ , tokenizer=snake_case_ )
__lowercase = fill_masker(F'This is a {tokenizer.mask_token}' , top_k=2 )
self.assertEqual(
snake_case_ , [
{'''sequence''': ANY(snake_case_ ), '''score''': ANY(snake_case_ ), '''token''': ANY(snake_case_ ), '''token_str''': ANY(snake_case_ )},
{'''sequence''': ANY(snake_case_ ), '''score''': ANY(snake_case_ ), '''token''': ANY(snake_case_ ), '''token_str''': ANY(snake_case_ )},
] , )
self.assertEqual(nested_simplify(snake_case_ ) , nested_simplify(snake_case_ ) )
def A ( self , snake_case_ , snake_case_ ) -> List[str]:
'''simple docstring'''
__lowercase = tokenizer.get_vocab()
__lowercase = FillMaskPipeline(model=snake_case_ , tokenizer=snake_case_ )
# top_k=2, ntargets=3
__lowercase = sorted(vocab.keys() )[:3]
__lowercase = fill_masker(F'This is a {tokenizer.mask_token}' , top_k=2 , targets=snake_case_ )
# If we use the most probably targets, and filter differently, we should still
# have the same results
__lowercase = [el['''token_str'''] for el in sorted(snake_case_ , key=lambda snake_case_ : x["score"] , reverse=snake_case_ )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(snake_case_ ).issubset(snake_case_ ):
__lowercase = fill_masker(F'This is a {tokenizer.mask_token}' , top_k=3 , targets=snake_case_ )
# They should yield exactly the same result
self.assertEqual(nested_simplify(snake_case_ ) , nested_simplify(snake_case_ ) )
def A ( self , snake_case_ , snake_case_ ) -> int:
'''simple docstring'''
__lowercase = FillMaskPipeline(model=snake_case_ , tokenizer=snake_case_ )
__lowercase = tokenizer.get_vocab()
# String duplicates + id duplicates
__lowercase = sorted(vocab.keys() )[:3]
__lowercase = [targets[0], targets[1], targets[0], targets[2], targets[1]]
__lowercase = fill_masker(F'My name is {tokenizer.mask_token}' , targets=snake_case_ , top_k=1_0 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(snake_case_ ) , 3 )
def A ( self , snake_case_ , snake_case_ ) -> List[str]:
'''simple docstring'''
__lowercase = FillMaskPipeline(model=snake_case_ , tokenizer=snake_case_ )
__lowercase = fill_masker(
F'This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}' , top_k=2 )
self.assertEqual(
snake_case_ , [
[
{'''sequence''': ANY(snake_case_ ), '''score''': ANY(snake_case_ ), '''token''': ANY(snake_case_ ), '''token_str''': ANY(snake_case_ )},
{'''sequence''': ANY(snake_case_ ), '''score''': ANY(snake_case_ ), '''token''': ANY(snake_case_ ), '''token_str''': ANY(snake_case_ )},
],
[
{'''sequence''': ANY(snake_case_ ), '''score''': ANY(snake_case_ ), '''token''': ANY(snake_case_ ), '''token_str''': ANY(snake_case_ )},
{'''sequence''': ANY(snake_case_ ), '''score''': ANY(snake_case_ ), '''token''': ANY(snake_case_ ), '''token_str''': ANY(snake_case_ )},
],
[
{'''sequence''': ANY(snake_case_ ), '''score''': ANY(snake_case_ ), '''token''': ANY(snake_case_ ), '''token_str''': ANY(snake_case_ )},
{'''sequence''': ANY(snake_case_ ), '''score''': ANY(snake_case_ ), '''token''': ANY(snake_case_ ), '''token_str''': ANY(snake_case_ )},
],
] , )
| 527 |
def lowercase_ ( _UpperCamelCase ):
'''simple docstring'''
__lowercase = len(_UpperCamelCase )
__lowercase = len(matrix[0] )
__lowercase = min(_UpperCamelCase , _UpperCamelCase )
for row in range(_UpperCamelCase ):
# Check if diagonal element is not zero
if matrix[row][row] != 0:
# Eliminate all the elements below the diagonal
for col in range(row + 1 , _UpperCamelCase ):
__lowercase = matrix[col][row] / matrix[row][row]
for i in range(_UpperCamelCase , _UpperCamelCase ):
matrix[col][i] -= multiplier * matrix[row][i]
else:
# Find a non-zero diagonal element to swap rows
__lowercase = True
for i in range(row + 1 , _UpperCamelCase ):
if matrix[i][row] != 0:
__lowercase , __lowercase = matrix[i], matrix[row]
__lowercase = False
break
if reduce:
rank -= 1
for i in range(_UpperCamelCase ):
__lowercase = matrix[i][rank]
# Reduce the row pointer by one to stay on the same row
row -= 1
return rank
if __name__ == "__main__":
import doctest
doctest.testmod()
| 527 | 1 |
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class A_ :
'''simple docstring'''
def __init__( self , snake_case , snake_case=13 , snake_case=7 , snake_case=True , snake_case=True , snake_case=True , snake_case=True , snake_case=99 , snake_case=[1, 1, 2] , snake_case=1 , snake_case=32 , snake_case=4 , snake_case=8 , snake_case=37 , snake_case="gelu_new" , snake_case=0.1 , snake_case=0.1 , snake_case=0.0 , snake_case=512 , snake_case=3 , snake_case=0.02 , snake_case=3 , snake_case=4 , snake_case=None , snake_case=False , ):
lowercase = parent
lowercase = batch_size
lowercase = seq_length
lowercase = is_training
lowercase = use_input_mask
lowercase = use_token_type_ids
lowercase = use_labels
lowercase = vocab_size
lowercase = block_sizes
lowercase = num_decoder_layers
lowercase = d_model
lowercase = n_head
lowercase = d_head
lowercase = d_inner
lowercase = hidden_act
lowercase = hidden_dropout
lowercase = attention_dropout
lowercase = activation_dropout
lowercase = max_position_embeddings
lowercase = type_vocab_size
lowercase = 2
lowercase = num_labels
lowercase = num_choices
lowercase = scope
lowercase = initializer_std
# Used in the tests to check the size of the first attention layer
lowercase = n_head
# Used in the tests to check the size of the first hidden state
lowercase = self.d_model
# Used in the tests to check the number of output hidden states/attentions
lowercase = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
lowercase = self.num_hidden_layers + 2
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase = None
if self.use_input_mask:
lowercase = random_attention_mask([self.batch_size, self.seq_length] )
lowercase = None
if self.use_token_type_ids:
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase = None
lowercase = None
lowercase = None
if self.use_labels:
lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase = ids_tensor([self.batch_size] , self.num_choices )
lowercase = FunnelConfig(
vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ):
lowercase = TFFunnelModel(config=snake_case )
lowercase = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowercase = model(snake_case )
lowercase = [input_ids, input_mask]
lowercase = model(snake_case )
lowercase = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
lowercase = False
lowercase = TFFunnelModel(config=snake_case )
lowercase = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
lowercase = False
lowercase = TFFunnelModel(config=snake_case )
lowercase = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ):
lowercase = TFFunnelBaseModel(config=snake_case )
lowercase = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowercase = model(snake_case )
lowercase = [input_ids, input_mask]
lowercase = model(snake_case )
lowercase = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
lowercase = False
lowercase = TFFunnelBaseModel(config=snake_case )
lowercase = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model) )
lowercase = False
lowercase = TFFunnelBaseModel(config=snake_case )
lowercase = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ):
lowercase = TFFunnelForPreTraining(config=snake_case )
lowercase = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowercase = model(snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ):
lowercase = TFFunnelForMaskedLM(config=snake_case )
lowercase = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowercase = model(snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ):
lowercase = self.num_labels
lowercase = TFFunnelForSequenceClassification(config=snake_case )
lowercase = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowercase = model(snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ):
lowercase = self.num_choices
lowercase = TFFunnelForMultipleChoice(config=snake_case )
lowercase = tf.tile(tf.expand_dims(snake_case , 1 ) , (1, self.num_choices, 1) )
lowercase = tf.tile(tf.expand_dims(snake_case , 1 ) , (1, self.num_choices, 1) )
lowercase = tf.tile(tf.expand_dims(snake_case , 1 ) , (1, self.num_choices, 1) )
lowercase = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
lowercase = model(snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ):
lowercase = self.num_labels
lowercase = TFFunnelForTokenClassification(config=snake_case )
lowercase = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowercase = model(snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ):
lowercase = TFFunnelForQuestionAnswering(config=snake_case )
lowercase = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowercase = model(snake_case )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) = config_and_inputs
lowercase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class A_ ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
_UpperCamelCase : str = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
_UpperCamelCase : Tuple = (
{
"""feature-extraction""": (TFFunnelBaseModel, TFFunnelModel),
"""fill-mask""": TFFunnelForMaskedLM,
"""question-answering""": TFFunnelForQuestionAnswering,
"""text-classification""": TFFunnelForSequenceClassification,
"""token-classification""": TFFunnelForTokenClassification,
"""zero-shot""": TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
_UpperCamelCase : Optional[Any] = False
_UpperCamelCase : List[Any] = False
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = TFFunnelModelTester(self )
lowercase = ConfigTester(self , config_class=snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case )
@require_tf
class A_ ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
_UpperCamelCase : Any = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
_UpperCamelCase : int = False
_UpperCamelCase : Dict = False
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = TFFunnelModelTester(self , base=snake_case )
lowercase = ConfigTester(self , config_class=snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*snake_case )
| 84 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
| 387 | 0 |
"""simple docstring"""
import math
def A_ ( __lowercase , __lowercase ):
UpperCamelCase_ : List[str] =len(__lowercase )
UpperCamelCase_ : Optional[int] =int(math.floor(math.sqrt(__lowercase ) ) )
UpperCamelCase_ : Any =0
while arr[min(__lowercase , __lowercase ) - 1] < x:
UpperCamelCase_ : str =step
step += int(math.floor(math.sqrt(__lowercase ) ) )
if prev >= n:
return -1
while arr[prev] < x:
UpperCamelCase_ : List[str] =prev + 1
if prev == min(__lowercase , __lowercase ):
return -1
if arr[prev] == x:
return prev
return -1
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = input('Enter numbers separated by a comma:\n').strip()
__SCREAMING_SNAKE_CASE = [int(item) for item in user_input.split(',')]
__SCREAMING_SNAKE_CASE = int(input('Enter the number to be searched:\n'))
__SCREAMING_SNAKE_CASE = jump_search(arr, x)
if res == -1:
print('Number not found!')
else:
print(F"""Number {x} is at index {res}""")
| 395 |
"""simple docstring"""
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils import logging
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def A_ ( __lowercase , __lowercase , __lowercase , __lowercase=False ):
try:
import torch # noqa: F401
except ImportError:
logger.error(
'Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see'
' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'
' instructions.' )
raise
if not is_sharded:
UpperCamelCase_ : Dict =os.path.abspath(__lowercase )
logger.info(F'''Loading PyTorch weights from {pt_path}''' )
UpperCamelCase_ : List[Any] =torch.load(__lowercase , map_location='cpu' )
logger.info(F'''PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.''' )
UpperCamelCase_ : str =convert_pytorch_state_dict_to_flax(__lowercase , __lowercase )
else:
# model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files
UpperCamelCase_ : str =convert_pytorch_sharded_state_dict_to_flax(__lowercase , __lowercase )
return flax_state_dict
def A_ ( __lowercase , __lowercase , __lowercase , __lowercase , ):
def is_key_or_prefix_key_in_dict(__lowercase ) -> bool:
return len(set(__lowercase ) & {key, (model_prefix,) + key} ) > 0
# layer norm
UpperCamelCase_ : Any =pt_tuple_key[:-1] + ('scale',)
if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(__lowercase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer mean
UpperCamelCase_ : Union[str, Any] =pt_tuple_key[:-1] + ('mean',)
if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(__lowercase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer var
UpperCamelCase_ : Optional[int] =pt_tuple_key[:-1] + ('var',)
if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(__lowercase ):
return renamed_pt_tuple_key, pt_tensor
# embedding
UpperCamelCase_ : Union[str, Any] =pt_tuple_key[:-1] + ('embedding',)
if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(__lowercase ):
return renamed_pt_tuple_key, pt_tensor
# conv layer
UpperCamelCase_ : Tuple =pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(__lowercase ):
UpperCamelCase_ : List[Any] =pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
UpperCamelCase_ : Optional[Any] =pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(__lowercase ):
UpperCamelCase_ : Union[str, Any] =pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
UpperCamelCase_ : Any =pt_tuple_key[:-1] + ('weight',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
UpperCamelCase_ : Optional[int] =pt_tuple_key[:-1] + ('bias',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
UpperCamelCase_ : Dict =None
if pt_tuple_key[-3::2] == ("parametrizations", "original0"):
UpperCamelCase_ : int =pt_tuple_key[-2] + '_g'
elif pt_tuple_key[-3::2] == ("parametrizations", "original1"):
UpperCamelCase_ : List[str] =pt_tuple_key[-2] + '_v'
if name is not None:
UpperCamelCase_ : Optional[int] =pt_tuple_key[:-3] + (name,)
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def A_ ( __lowercase , __lowercase ):
# convert pytorch tensor to numpy
UpperCamelCase_ : Optional[Any] ={k: v.numpy() for k, v in pt_state_dict.items()}
UpperCamelCase_ : Any =flax_model.base_model_prefix
# use params dict if the model contains batch norm layers
if "params" in flax_model.params:
UpperCamelCase_ : Dict =flax_model.params['params']
else:
UpperCamelCase_ : Union[str, Any] =flax_model.params
UpperCamelCase_ : Optional[Any] =flatten_dict(__lowercase )
# add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
UpperCamelCase_ : Any =flatten_dict(flax_model.params['batch_stats'] )
random_flax_state_dict.update(__lowercase )
UpperCamelCase_ : Optional[Any] ={}
UpperCamelCase_ : str =(model_prefix not in flax_model_params) and (
model_prefix in {k.split('.' )[0] for k in pt_state_dict.keys()}
)
UpperCamelCase_ : Optional[Any] =(model_prefix in flax_model_params) and (
model_prefix not in {k.split('.' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
UpperCamelCase_ : str =tuple(pt_key.split('.' ) )
# remove base model prefix if necessary
UpperCamelCase_ : Dict =pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
UpperCamelCase_ : str =pt_tuple_key[1:]
# Correctly rename weight parameters
UpperCamelCase_ , UpperCamelCase_ : Tuple =rename_key_and_reshape_tensor(
__lowercase , __lowercase , __lowercase , __lowercase )
# add model prefix if necessary
UpperCamelCase_ : str =(model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
UpperCamelCase_ : List[Any] =(model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
F'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1] or "var" in flax_key[-1]:
UpperCamelCase_ : str =jnp.asarray(__lowercase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(__lowercase , __lowercase )
continue
# also add unexpected weight so that warning is thrown
UpperCamelCase_ : str =jnp.asarray(__lowercase )
else:
# also add unexpected weight so that warning is thrown
UpperCamelCase_ : str =jnp.asarray(__lowercase )
return unflatten_dict(__lowercase )
def A_ ( __lowercase , __lowercase ):
import torch
# Load the index
UpperCamelCase_ : List[str] ={}
for shard_file in shard_filenames:
# load using msgpack utils
UpperCamelCase_ : Dict =torch.load(__lowercase )
UpperCamelCase_ : Tuple ={k: v.numpy() for k, v in pt_state_dict.items()}
UpperCamelCase_ : Optional[int] =flax_model.base_model_prefix
# use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
UpperCamelCase_ : Union[str, Any] =flax_model.params['params']
UpperCamelCase_ : str =flatten_dict(__lowercase )
random_flax_state_dict.update(flatten_dict(flax_model.params['batch_stats'] ) )
else:
UpperCamelCase_ : Optional[Any] =flax_model.params
UpperCamelCase_ : Union[str, Any] =flatten_dict(__lowercase )
UpperCamelCase_ : List[str] =(model_prefix not in flax_model_params) and (
model_prefix in {k.split('.' )[0] for k in pt_state_dict.keys()}
)
UpperCamelCase_ : Any =(model_prefix in flax_model_params) and (
model_prefix not in {k.split('.' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
UpperCamelCase_ : str =tuple(pt_key.split('.' ) )
# remove base model prefix if necessary
UpperCamelCase_ : Optional[Any] =pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
UpperCamelCase_ : Optional[int] =pt_tuple_key[1:]
# Correctly rename weight parameters
UpperCamelCase_ , UpperCamelCase_ : Tuple =rename_key_and_reshape_tensor(
__lowercase , __lowercase , __lowercase , __lowercase )
# add model prefix if necessary
UpperCamelCase_ : Tuple =(model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
UpperCamelCase_ : int =(model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
F'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1]:
UpperCamelCase_ : Union[str, Any] =jnp.asarray(__lowercase )
continue
if "var" in flax_key[-1]:
UpperCamelCase_ : Optional[int] =jnp.asarray(__lowercase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(__lowercase , __lowercase )
continue
# also add unexpected weight so that warning is thrown
UpperCamelCase_ : Dict =jnp.asarray(__lowercase )
else:
# also add unexpected weight so that warning is thrown
UpperCamelCase_ : int =jnp.asarray(__lowercase )
return unflatten_dict(__lowercase )
def A_ ( __lowercase , __lowercase ):
UpperCamelCase_ : str =os.path.abspath(__lowercase )
logger.info(F'''Loading Flax weights from {flax_checkpoint_path}''' )
# import correct flax class
UpperCamelCase_ : Any =getattr(__lowercase , 'Flax' + model.__class__.__name__ )
# load flax weight dict
with open(__lowercase , 'rb' ) as state_f:
try:
UpperCamelCase_ : Any =from_bytes(__lowercase , state_f.read() )
except UnpicklingError:
raise EnvironmentError(F'''Unable to convert {flax_checkpoint_path} to Flax deserializable object. ''' )
return load_flax_weights_in_pytorch_model(__lowercase , __lowercase )
def A_ ( __lowercase , __lowercase ):
try:
import torch # noqa: F401
except ImportError:
logger.error(
'Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see'
' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'
' instructions.' )
raise
# check if we have bf16 weights
UpperCamelCase_ : List[Any] =flatten_dict(jax.tree_util.tree_map(lambda __lowercase : x.dtype == jnp.bfloataa , __lowercase ) ).values()
if any(__lowercase ):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
'Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` '
'before loading those in PyTorch model.' )
UpperCamelCase_ : List[Any] =jax.tree_util.tree_map(
lambda __lowercase : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , __lowercase )
UpperCamelCase_ : str =flatten_dict(__lowercase )
UpperCamelCase_ : Union[str, Any] =pt_model.state_dict()
UpperCamelCase_ : int =(pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in {k.split('.' )[0] for k in pt_model_dict.keys()}
)
UpperCamelCase_ : Optional[int] =(pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in {k.split('.' )[0] for k in pt_model_dict.keys()}
)
# keep track of unexpected & missing keys
UpperCamelCase_ : Tuple =[]
UpperCamelCase_ : Tuple =set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
UpperCamelCase_ : str =flax_key_tuple[0] == pt_model.base_model_prefix
UpperCamelCase_ : Tuple ='.'.join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
UpperCamelCase_ : Union[str, Any] =flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
UpperCamelCase_ : int =(pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(__lowercase ) not in pt_model_dict:
# conv layer
UpperCamelCase_ : Union[str, Any] =flax_key_tuple[:-1] + ('weight',)
UpperCamelCase_ : Optional[int] =jnp.transpose(__lowercase , (3, 2, 0, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(__lowercase ) not in pt_model_dict:
# linear layer
UpperCamelCase_ : str =flax_key_tuple[:-1] + ('weight',)
UpperCamelCase_ : List[Any] =flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
UpperCamelCase_ : Optional[Any] =flax_key_tuple[:-1] + ('weight',)
# adding batch stats from flax batch norm to pt
elif "mean" in flax_key_tuple[-1]:
UpperCamelCase_ : int =flax_key_tuple[:-1] + ('running_mean',)
elif "var" in flax_key_tuple[-1]:
UpperCamelCase_ : int =flax_key_tuple[:-1] + ('running_var',)
if "batch_stats" in flax_state:
UpperCamelCase_ : Optional[Any] ='.'.join(flax_key_tuple[1:] ) # Remove the params/batch_stats header
else:
UpperCamelCase_ : Any ='.'.join(__lowercase )
# We also need to look at `pt_model_dict` and see if there are keys requiring further transformation.
UpperCamelCase_ : Optional[Any] ={}
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
for key in pt_model_dict:
UpperCamelCase_ : List[Any] =key.split('.' )
UpperCamelCase_ : Optional[Any] =None
if key_components[-3::2] == ["parametrizations", "original0"]:
UpperCamelCase_ : Optional[int] =key_components[-2] + '_g'
elif key_components[-3::2] == ["parametrizations", "original1"]:
UpperCamelCase_ : Optional[int] =key_components[-2] + '_v'
if name is not None:
UpperCamelCase_ : str =key_components[:-3] + [name]
UpperCamelCase_ : int ='.'.join(__lowercase )
UpperCamelCase_ : int =key
if flax_key in special_pt_names:
UpperCamelCase_ : str =special_pt_names[flax_key]
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F'''Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '''
F'''to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
else:
# add weight to pytorch dict
UpperCamelCase_ : Any =np.asarray(__lowercase ) if not isinstance(__lowercase , np.ndarray ) else flax_tensor
UpperCamelCase_ : Any =torch.from_numpy(__lowercase )
# remove from missing keys
missing_keys.remove(__lowercase )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(__lowercase )
pt_model.load_state_dict(__lowercase )
# re-transform missing_keys to list
UpperCamelCase_ : Tuple =list(__lowercase )
if len(__lowercase ) > 0:
logger.warning(
'Some weights of the Flax model were not used when initializing the PyTorch model'
F''' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'''
F''' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'''
' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This'
F''' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'''
' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a'
' FlaxBertForSequenceClassification model).' )
else:
logger.warning(F'''All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n''' )
if len(__lowercase ) > 0:
logger.warning(
F'''Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'''
F''' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'''
' use it for predictions and inference.' )
else:
logger.warning(
F'''All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n'''
'If your task is similar to the task the model of the checkpoint was trained on, '
F'''you can already use {pt_model.__class__.__name__} for predictions without further training.''' )
return pt_model
| 395 | 1 |
"""simple docstring"""
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def lowercase ( __snake_case : Tuple , __snake_case : Optional[int] ):
lowercase_ : List[Any] = XCLIPTextConfig()
# derive patch size from model name
lowercase_ : str = model_name.find('''patch''' )
lowercase_ : Optional[Any] = int(model_name[start_idx + len('''patch''' ) : start_idx + len('''patch''' ) + 2] )
lowercase_ : Union[str, Any] = XCLIPVisionConfig(patch_size=lowercase__ , num_frames=lowercase__ )
if "large" in model_name:
lowercase_ : Dict = 7_6_8
lowercase_ : Any = 3_0_7_2
lowercase_ : str = 1_2
lowercase_ : int = 1_0_2_4
lowercase_ : int = 4_0_9_6
lowercase_ : Dict = 1_6
lowercase_ : Tuple = 2_4
lowercase_ : Dict = 7_6_8
lowercase_ : List[Any] = 3_0_7_2
if model_name == "xclip-large-patch14-16-frames":
lowercase_ : int = 3_3_6
lowercase_ : Optional[int] = XCLIPConfig.from_text_vision_configs(lowercase__ , lowercase__ )
if "large" in model_name:
lowercase_ : Tuple = 7_6_8
return config
def lowercase ( __snake_case : Tuple ):
# text encoder
if name == "token_embedding.weight":
lowercase_ : str = name.replace('''token_embedding.weight''' , '''text_model.embeddings.token_embedding.weight''' )
if name == "positional_embedding":
lowercase_ : List[Any] = name.replace('''positional_embedding''' , '''text_model.embeddings.position_embedding.weight''' )
if "ln_1" in name:
lowercase_ : List[str] = name.replace('''ln_1''' , '''layer_norm1''' )
if "ln_2" in name:
lowercase_ : Any = name.replace('''ln_2''' , '''layer_norm2''' )
if "c_fc" in name:
lowercase_ : Dict = name.replace('''c_fc''' , '''fc1''' )
if "c_proj" in name:
lowercase_ : str = name.replace('''c_proj''' , '''fc2''' )
if name.startswith('''transformer.resblocks''' ):
lowercase_ : List[str] = name.replace('''transformer.resblocks''' , '''text_model.encoder.layers''' )
if "attn.out_proj" in name and "message" not in name:
lowercase_ : Optional[Any] = name.replace('''attn.out_proj''' , '''self_attn.out_proj''' )
if "ln_final" in name:
lowercase_ : List[str] = name.replace('''ln_final''' , '''text_model.final_layer_norm''' )
# visual encoder
if name == "visual.class_embedding":
lowercase_ : Optional[Any] = name.replace('''visual.class_embedding''' , '''vision_model.embeddings.class_embedding''' )
if name == "visual.positional_embedding":
lowercase_ : Optional[int] = name.replace('''visual.positional_embedding''' , '''vision_model.embeddings.position_embedding.weight''' )
if name.startswith('''visual.transformer.resblocks''' ):
lowercase_ : List[str] = name.replace('''visual.transformer.resblocks''' , '''vision_model.encoder.layers''' )
if "visual.conv1" in name:
lowercase_ : Any = name.replace('''visual.conv1''' , '''vision_model.embeddings.patch_embedding''' )
if "visual.ln_pre" in name:
lowercase_ : List[Any] = name.replace('''visual.ln_pre''' , '''vision_model.pre_layernorm''' )
if "visual.ln_post" in name:
lowercase_ : Tuple = name.replace('''visual.ln_post''' , '''vision_model.post_layernorm''' )
if "visual.proj" in name:
lowercase_ : int = name.replace('''visual.proj''' , '''visual_projection.weight''' )
if "text_projection" in name:
lowercase_ : List[Any] = name.replace('''text_projection''' , '''text_projection.weight''' )
# things on top
if "prompts_visual_proj" in name:
lowercase_ : List[Any] = name.replace('''prompts_visual_proj''' , '''prompts_visual_projection''' )
if "prompts_visual_ln" in name:
lowercase_ : int = name.replace('''prompts_visual_ln''' , '''prompts_visual_layernorm''' )
# mit
if name == "mit.positional_embedding":
lowercase_ : Any = name.replace('''positional''' , '''position''' )
if name.startswith('''mit.resblocks''' ):
lowercase_ : Optional[Any] = name.replace('''mit.resblocks''' , '''mit.encoder.layers''' )
# prompts generator
if name.startswith('''prompts_generator.norm''' ):
lowercase_ : Any = name.replace('''prompts_generator.norm''' , '''prompts_generator.layernorm''' )
return name
def lowercase ( __snake_case : List[str] , __snake_case : Any ):
for key in orig_state_dict.copy().keys():
lowercase_ : Optional[Any] = orig_state_dict.pop(lowercase__ )
if "attn.in_proj" in key:
lowercase_ : Optional[int] = key.split('''.''' )
if key.startswith('''visual''' ):
lowercase_ : List[Any] = key_split[3]
lowercase_ : int = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
lowercase_ : Tuple = val[
:dim, :
]
lowercase_ : int = val[
dim : dim * 2, :
]
lowercase_ : Tuple = val[
-dim:, :
]
else:
lowercase_ : Tuple = val[
:dim
]
lowercase_ : List[Any] = val[
dim : dim * 2
]
lowercase_ : Dict = val[
-dim:
]
else:
if "weight" in key:
lowercase_ : int = val[
:dim, :
]
lowercase_ : Any = val[
dim : dim * 2, :
]
lowercase_ : List[Any] = val[
-dim:, :
]
else:
lowercase_ : Dict = val[:dim]
lowercase_ : Union[str, Any] = val[
dim : dim * 2
]
lowercase_ : str = val[-dim:]
elif key.startswith('''mit''' ):
lowercase_ : List[str] = key_split[2]
lowercase_ : str = config.vision_config.mit_hidden_size
if "weight" in key:
lowercase_ : List[Any] = val[:dim, :]
lowercase_ : List[str] = val[dim : dim * 2, :]
lowercase_ : Tuple = val[-dim:, :]
else:
lowercase_ : Any = val[:dim]
lowercase_ : Optional[int] = val[dim : dim * 2]
lowercase_ : Union[str, Any] = val[-dim:]
else:
lowercase_ : List[Any] = key_split[2]
lowercase_ : str = config.text_config.hidden_size
if "weight" in key:
lowercase_ : str = val[:dim, :]
lowercase_ : Optional[int] = val[
dim : dim * 2, :
]
lowercase_ : Dict = val[-dim:, :]
else:
lowercase_ : List[Any] = val[:dim]
lowercase_ : Dict = val[
dim : dim * 2
]
lowercase_ : int = val[-dim:]
else:
lowercase_ : Any = rename_key(lowercase__ )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
lowercase_ : Optional[Any] = val.T
lowercase_ : str = val
return orig_state_dict
def lowercase ( __snake_case : Optional[Any] ):
if num_frames == 8:
lowercase_ : Tuple = '''eating_spaghetti_8_frames.npy'''
elif num_frames == 1_6:
lowercase_ : Any = '''eating_spaghetti.npy'''
elif num_frames == 3_2:
lowercase_ : Optional[int] = '''eating_spaghetti_32_frames.npy'''
lowercase_ : Optional[Any] = hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''' , filename=lowercase__ , repo_type='''dataset''' , )
lowercase_ : List[str] = np.load(lowercase__ )
return list(lowercase__ )
def lowercase ( __snake_case : int , __snake_case : Any=None , __snake_case : Optional[int]=False ):
lowercase_ : Optional[int] = {
# fully supervised kinetics-400 checkpoints
'''xclip-base-patch32''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth''',
'''xclip-base-patch32-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth'''
),
'''xclip-base-patch16''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth''',
'''xclip-base-patch16-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth'''
),
'''xclip-large-patch14''': '''https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb''',
'''xclip-large-patch14-16-frames''': '''https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f''',
# fully supervised kinetics-600 checkpoints
'''xclip-base-patch16-kinetics-600''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth'''
),
'''xclip-base-patch16-kinetics-600-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth'''
),
'''xclip-large-patch14-kinetics-600''': '''https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be''',
# few shot
'''xclip-base-patch16-hmdb-2-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth'''
),
'''xclip-base-patch16-hmdb-4-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth'''
),
'''xclip-base-patch16-hmdb-8-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth'''
),
'''xclip-base-patch16-hmdb-16-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth'''
),
'''xclip-base-patch16-ucf-2-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth'''
),
'''xclip-base-patch16-ucf-4-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth'''
),
'''xclip-base-patch16-ucf-8-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth'''
),
'''xclip-base-patch16-ucf-16-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth'''
),
# zero shot
'''xclip-base-patch16-zero-shot''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth''',
}
lowercase_ : int = model_to_url[model_name]
lowercase_ : Any = 8
if "16-frames" in model_name:
lowercase_ : str = 1_6
elif "shot" in model_name:
lowercase_ : Optional[int] = 3_2
lowercase_ : Dict = get_xclip_config(lowercase__ , lowercase__ )
lowercase_ : List[str] = XCLIPModel(lowercase__ )
model.eval()
if "drive" in checkpoint_url:
lowercase_ : Optional[int] = '''pytorch_model.bin'''
gdown.cached_download(lowercase__ , lowercase__ , quiet=lowercase__ )
lowercase_ : List[Any] = torch.load(lowercase__ , map_location='''cpu''' )['''model''']
else:
lowercase_ : Optional[Any] = torch.hub.load_state_dict_from_url(lowercase__ )['''model''']
lowercase_ : Optional[int] = convert_state_dict(lowercase__ , lowercase__ )
lowercase_ : List[str] = XCLIPModel(lowercase__ )
lowercase_ : str = model.load_state_dict(lowercase__ , strict=lowercase__ )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
lowercase_ : Dict = 3_3_6 if model_name == '''xclip-large-patch14-16-frames''' else 2_2_4
lowercase_ : Optional[int] = VideoMAEImageProcessor(size=lowercase__ )
lowercase_ : Tuple = CLIPTokenizer.from_pretrained('''openai/clip-vit-base-patch32''' )
lowercase_ : Union[str, Any] = CLIPTokenizerFast.from_pretrained('''openai/clip-vit-base-patch32''' )
lowercase_ : int = XCLIPProcessor(image_processor=lowercase__ , tokenizer=lowercase__ )
lowercase_ : Tuple = prepare_video(lowercase__ )
lowercase_ : List[Any] = processor(
text=['''playing sports''', '''eating spaghetti''', '''go shopping'''] , videos=lowercase__ , return_tensors='''pt''' , padding=lowercase__ )
print('''Shape of pixel values:''' , inputs.pixel_values.shape )
with torch.no_grad():
lowercase_ : List[Any] = model(**lowercase__ )
# Verify outputs
lowercase_ : List[str] = outputs.logits_per_video
lowercase_ : int = logits_per_video.softmax(dim=1 )
print('''Probs:''' , lowercase__ )
# kinetics-400
if model_name == "xclip-base-patch32":
lowercase_ : Dict = torch.tensor([[0.0019, 0.9951, 0.0030]] )
elif model_name == "xclip-base-patch32-16-frames":
lowercase_ : Optional[Any] = torch.tensor([[7.09_99e-04, 9.98_83e-01, 4.55_80e-04]] )
elif model_name == "xclip-base-patch16":
lowercase_ : Dict = torch.tensor([[0.0083, 0.9681, 0.0236]] )
elif model_name == "xclip-base-patch16-16-frames":
lowercase_ : List[str] = torch.tensor([[7.69_37e-04, 9.97_28e-01, 1.94_73e-03]] )
elif model_name == "xclip-large-patch14":
lowercase_ : Dict = torch.tensor([[0.0062, 0.9864, 0.0075]] )
elif model_name == "xclip-large-patch14-16-frames":
lowercase_ : Any = torch.tensor([[3.38_77e-04, 9.99_37e-01, 2.88_88e-04]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
lowercase_ : List[Any] = torch.tensor([[0.0555, 0.8914, 0.0531]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
lowercase_ : Tuple = torch.tensor([[3.85_54e-04, 9.99_29e-01, 3.27_54e-04]] )
elif model_name == "xclip-large-patch14-kinetics-600":
lowercase_ : Dict = torch.tensor([[0.0036, 0.9920, 0.0045]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
lowercase_ : Optional[int] = torch.tensor([[7.18_90e-06, 9.99_94e-01, 5.65_59e-05]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
lowercase_ : str = torch.tensor([[1.03_20e-05, 9.99_93e-01, 6.24_35e-05]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
lowercase_ : Optional[Any] = torch.tensor([[4.13_77e-06, 9.99_90e-01, 9.83_86e-05]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
lowercase_ : str = torch.tensor([[4.13_47e-05, 9.99_62e-01, 3.34_11e-04]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
lowercase_ : List[str] = torch.tensor([[8.58_57e-05, 9.99_28e-01, 6.32_91e-04]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
lowercase_ : Tuple = torch.tensor([[8.58_57e-05, 9.99_28e-01, 6.32_91e-04]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
lowercase_ : List[str] = torch.tensor([[0.0027, 0.9904, 0.0070]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
lowercase_ : Optional[Any] = torch.tensor([[9.82_19e-04, 9.95_93e-01, 3.08_63e-03]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
lowercase_ : Union[str, Any] = torch.tensor([[3.50_82e-04, 9.97_85e-01, 1.79_66e-03]] )
else:
raise ValueError(F'''Model name {model_name} not supported''' )
assert torch.allclose(lowercase__ , lowercase__ , atol=1e-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowercase__ )
if push_to_hub:
print('''Pushing model, processor and slow tokenizer files to the hub...''' )
model.push_to_hub(lowercase__ , organization='''nielsr''' )
processor.push_to_hub(lowercase__ , organization='''nielsr''' )
slow_tokenizer.push_to_hub(lowercase__ , organization='''nielsr''' )
if __name__ == "__main__":
__A : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''xclip-base-patch32''',
type=str,
help='''Name of the model.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the ๐ค hub.'''
)
__A : Optional[Any] = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 231 |
import os
import sys
import unittest
__lowerCAmelCase : List[Any] =os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
__lowerCAmelCase : Optional[Any] =os.path.join(git_repo_path, 'src', 'transformers')
__lowerCAmelCase : Optional[Any] ='\n{0} = None\n'
__lowerCAmelCase : Tuple ='\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n'
__lowerCAmelCase : Dict ='\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n'
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __magic_name__( self :Tuple ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : str = find_backend(''' _import_structure["models.albert"].append("AlbertTokenizerFast")''' )
self.assertIsNone(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = find_backend(''' if not is_tokenizers_available():''' )
self.assertEqual(lowerCAmelCase__ , '''tokenizers''' )
__SCREAMING_SNAKE_CASE : Dict = find_backend(''' if not is_tensorflow_text_available():''' )
self.assertEqual(lowerCAmelCase__ , '''tensorflow_text''' )
__SCREAMING_SNAKE_CASE : Tuple = find_backend(''' if not (is_sentencepiece_available() and is_tokenizers_available()):''' )
self.assertEqual(lowerCAmelCase__ , '''sentencepiece_and_tokenizers''' )
__SCREAMING_SNAKE_CASE : Any = find_backend(
''' if not (is_sentencepiece_available() and is_tensorflow_text_available()):''' )
self.assertEqual(lowerCAmelCase__ , '''sentencepiece_and_tensorflow_text''' )
__SCREAMING_SNAKE_CASE : List[str] = find_backend(
''' if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):''' )
self.assertEqual(lowerCAmelCase__ , '''sentencepiece_and_tokenizers_and_vision''' )
def __magic_name__( self :List[str] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('''torch''' , lowerCAmelCase__ )
self.assertIn('''tensorflow_text''' , lowerCAmelCase__ )
self.assertIn('''sentencepiece_and_tokenizers''' , lowerCAmelCase__ )
# Likewise, we can't assert on the exact content of a key
self.assertIn('''BertModel''' , objects['''torch'''] )
self.assertIn('''TFBertModel''' , objects['''tf'''] )
self.assertIn('''FlaxBertModel''' , objects['''flax'''] )
self.assertIn('''BertModel''' , objects['''torch'''] )
self.assertIn('''TFBertTokenizer''' , objects['''tensorflow_text'''] )
self.assertIn('''convert_slow_tokenizer''' , objects['''sentencepiece_and_tokenizers'''] )
def __magic_name__( self :Optional[Any] ) -> List[Any]:
__SCREAMING_SNAKE_CASE : List[Any] = create_dummy_object('''CONSTANT''' , '''\'torch\'''' )
self.assertEqual(lowerCAmelCase__ , '''\nCONSTANT = None\n''' )
__SCREAMING_SNAKE_CASE : List[str] = create_dummy_object('''function''' , '''\'torch\'''' )
self.assertEqual(
lowerCAmelCase__ , '''\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n''' )
__SCREAMING_SNAKE_CASE : int = '''
class FakeClass(metaclass=DummyObject):
_backends = \'torch\'
def __init__(self, *args, **kwargs):
requires_backends(self, \'torch\')
'''
__SCREAMING_SNAKE_CASE : Optional[Any] = create_dummy_object('''FakeClass''' , '''\'torch\'''' )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def __magic_name__( self :Optional[Any] ) -> Any:
__SCREAMING_SNAKE_CASE : str = '''# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, ["torch"])
class FakeClass(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
'''
__SCREAMING_SNAKE_CASE : List[Any] = create_dummy_files({'''torch''': ['''CONSTANT''', '''function''', '''FakeClass''']} )
self.assertEqual(dummy_files['''torch'''] , lowerCAmelCase__ )
| 696 | 0 |
'''simple docstring'''
def __a ( __lowerCamelCase : int = 1_000 ) -> int:
'''simple docstring'''
lowercase_ = -1
lowercase_ = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
lowercase_ = (n * n - 2 * a * n) // (2 * n - 2 * a)
lowercase_ = n - a - b
if c * c == (a * a + b * b):
lowercase_ = a * b * c
if candidate >= product:
lowercase_ = candidate
return product
if __name__ == "__main__":
print(F'''{solution() = }''')
| 714 | '''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
lowerCAmelCase_ : Any = logging.get_logger(__name__)
class lowercase ( __lowerCamelCase ):
lowerCamelCase_ =['pixel_values']
def __init__( self : Optional[int] , __lowerCAmelCase : bool = True , __lowerCAmelCase : int = 32 , __lowerCAmelCase : List[str]=PILImageResampling.BILINEAR , __lowerCAmelCase : bool = True , **__lowerCAmelCase : Union[str, Any] , ) -> None:
lowercase_ = do_resize
lowercase_ = do_rescale
lowercase_ = size_divisor
lowercase_ = resample
super().__init__(**__lowerCAmelCase)
def __UpperCAmelCase ( self : List[str] , __lowerCAmelCase : np.ndarray , __lowerCAmelCase : int , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[ChannelDimension] = None , **__lowerCAmelCase : Any) -> np.ndarray:
lowercase_ , lowercase_ = get_image_size(__lowerCAmelCase)
# Rounds the height and width down to the closest multiple of size_divisor
lowercase_ = height // size_divisor * size_divisor
lowercase_ = width // size_divisor * size_divisor
lowercase_ = resize(__lowerCAmelCase , (new_h, new_w) , resample=__lowerCAmelCase , data_format=__lowerCAmelCase , **__lowerCAmelCase)
return image
def __UpperCAmelCase ( self : str , __lowerCAmelCase : np.ndarray , __lowerCAmelCase : float , __lowerCAmelCase : Optional[ChannelDimension] = None , **__lowerCAmelCase : int) -> np.ndarray:
return rescale(image=__lowerCAmelCase , scale=__lowerCAmelCase , data_format=__lowerCAmelCase , **__lowerCAmelCase)
def __UpperCAmelCase ( self : Optional[Any] , __lowerCAmelCase : Union["PIL.Image.Image", TensorType, List["PIL.Image.Image"], List[TensorType]] , __lowerCAmelCase : Optional[bool] = None , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : str=None , __lowerCAmelCase : Optional[bool] = None , __lowerCAmelCase : Optional[Union[TensorType, str]] = None , __lowerCAmelCase : ChannelDimension = ChannelDimension.FIRST , **__lowerCAmelCase : List[str] , ) -> BatchFeature:
lowercase_ = do_resize if do_resize is not None else self.do_resize
lowercase_ = do_rescale if do_rescale is not None else self.do_rescale
lowercase_ = size_divisor if size_divisor is not None else self.size_divisor
lowercase_ = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError("size_divisor is required for resizing")
lowercase_ = make_list_of_images(__lowerCAmelCase)
if not valid_images(__lowerCAmelCase):
raise ValueError("Invalid image(s)")
# All transformations expect numpy arrays.
lowercase_ = [to_numpy_array(__lowerCAmelCase) for img in images]
if do_resize:
lowercase_ = [self.resize(__lowerCAmelCase , size_divisor=__lowerCAmelCase , resample=__lowerCAmelCase) for image in images]
if do_rescale:
lowercase_ = [self.rescale(__lowerCAmelCase , scale=1 / 255) for image in images]
lowercase_ = [to_channel_dimension_format(__lowerCAmelCase , __lowerCAmelCase) for image in images]
lowercase_ = {"pixel_values": images}
return BatchFeature(data=__lowerCAmelCase , tensor_type=__lowerCAmelCase)
| 461 | 0 |
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
_SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__)
class UpperCamelCase__ ( __lowerCamelCase ):
a__ : int = ['input_features', 'attention_mask']
def __init__( self : str, __lowerCamelCase : Union[str, Any]=80, __lowerCamelCase : int=1_60_00, __lowerCamelCase : int=80, __lowerCamelCase : str=0.0, __lowerCamelCase : List[str]=True, __lowerCamelCase : Dict=True, __lowerCamelCase : str=True, **__lowerCamelCase : Any, ) -> Optional[Any]:
super().__init__(feature_size=__lowerCamelCase, sampling_rate=__lowerCamelCase, padding_value=__lowerCamelCase, **__lowerCamelCase )
UpperCamelCase__ : Tuple = num_mel_bins
UpperCamelCase__ : List[Any] = do_ceptral_normalize
UpperCamelCase__ : List[Any] = normalize_means
UpperCamelCase__ : Union[str, Any] = normalize_vars
UpperCamelCase__ : str = True
def __lowercase( self : List[str], __lowerCamelCase : np.ndarray, ) -> np.ndarray:
UpperCamelCase__ : List[str] = waveform * (2**15) # Kaldi compliance: 16-bit signed integers
UpperCamelCase__ : str = torch.from_numpy(__lowerCamelCase ).unsqueeze(0 )
UpperCamelCase__ : List[str] = ta_kaldi.fbank(__lowerCamelCase, num_mel_bins=self.num_mel_bins, sample_frequency=self.sampling_rate )
return features.numpy()
@staticmethod
def __lowercase( __lowerCamelCase : np.ndarray, __lowerCamelCase : int, __lowerCamelCase : Optional[bool] = True, __lowerCamelCase : Optional[bool] = True, __lowerCamelCase : float = 0.0, ) -> np.ndarray:
# make sure we normalize float32 arrays
if normalize_means:
UpperCamelCase__ : int = x[:input_length].mean(axis=0 )
UpperCamelCase__ : str = np.subtract(__lowerCamelCase, __lowerCamelCase )
if normalize_vars:
UpperCamelCase__ : Any = x[:input_length].std(axis=0 )
UpperCamelCase__ : int = np.divide(__lowerCamelCase, __lowerCamelCase )
if input_length < x.shape[0]:
UpperCamelCase__ : int = padding_value
# make sure array is in float32
UpperCamelCase__ : List[Any] = x.astype(np.floataa )
return x
def __lowercase( self : Any, __lowerCamelCase : List[np.ndarray], __lowerCamelCase : Optional[np.ndarray] = None ) -> List[np.ndarray]:
UpperCamelCase__ : Union[str, Any] = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(__lowerCamelCase, __lowerCamelCase, self.normalize_means, self.normalize_vars, self.padding_value )
for x, n in zip(__lowerCamelCase, __lowerCamelCase )
]
def __call__( self : Tuple, __lowerCamelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]], __lowerCamelCase : Union[bool, str, PaddingStrategy] = False, __lowerCamelCase : Optional[int] = None, __lowerCamelCase : bool = False, __lowerCamelCase : Optional[int] = None, __lowerCamelCase : Optional[Union[str, TensorType]] = None, __lowerCamelCase : Optional[int] = None, __lowerCamelCase : Optional[bool] = None, **__lowerCamelCase : List[str], ) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'The model corresponding to this feature extractor: {self} was trained using a sampling rate of'
f' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'
f' {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
UpperCamelCase__ : Any = isinstance(__lowerCamelCase, np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'Only mono-channel audio is supported for input to {self}' )
UpperCamelCase__ : int = is_batched_numpy or (
isinstance(__lowerCamelCase, (list, tuple) ) and (isinstance(raw_speech[0], (np.ndarray, tuple, list) ))
)
if is_batched:
UpperCamelCase__ : Tuple = [np.asarray(__lowerCamelCase, dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(__lowerCamelCase, np.ndarray ):
UpperCamelCase__ : Optional[Any] = np.asarray(__lowerCamelCase, dtype=np.floataa )
elif isinstance(__lowerCamelCase, np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
UpperCamelCase__ : Optional[Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
UpperCamelCase__ : str = [raw_speech]
# extract fbank features
UpperCamelCase__ : Tuple = [self._extract_fbank_features(__lowerCamelCase ) for waveform in raw_speech]
# convert into correct format for padding
UpperCamelCase__ : Dict = BatchFeature({'''input_features''': features} )
UpperCamelCase__ : Dict = self.pad(
__lowerCamelCase, padding=__lowerCamelCase, max_length=__lowerCamelCase, truncation=__lowerCamelCase, pad_to_multiple_of=__lowerCamelCase, return_attention_mask=__lowerCamelCase, **__lowerCamelCase, )
# make sure list is in array format
UpperCamelCase__ : Optional[int] = padded_inputs.get('''input_features''' )
if isinstance(input_features[0], __lowerCamelCase ):
UpperCamelCase__ : Optional[Any] = [np.asarray(__lowerCamelCase, dtype=np.floataa ) for feature in input_features]
UpperCamelCase__ : Optional[int] = padded_inputs.get('''attention_mask''' )
if attention_mask is not None:
UpperCamelCase__ : str = [np.asarray(__lowerCamelCase, dtype=np.intaa ) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
UpperCamelCase__ : str = (
np.array(__lowerCamelCase, dtype=np.intaa )
if self._get_padding_strategies(__lowerCamelCase, max_length=__lowerCamelCase ) is not PaddingStrategy.DO_NOT_PAD
else None
)
UpperCamelCase__ : Tuple = self.normalize(
padded_inputs['''input_features'''], attention_mask=__lowerCamelCase )
if return_tensors is not None:
UpperCamelCase__ : Tuple = padded_inputs.convert_to_tensors(__lowerCamelCase )
return padded_inputs
| 344 |
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
_SCREAMING_SNAKE_CASE : Any = """sshleifer/bart-tiny-random"""
_SCREAMING_SNAKE_CASE : List[str] = """patrickvonplaten/t5-tiny-random"""
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
@cached_property
def __lowercase( self : str ) -> Dict:
return AutoConfig.from_pretrained(__lowerCamelCase )
def __lowercase( self : Optional[int] ) -> int:
UpperCamelCase__ ,*UpperCamelCase__ : Dict = create_student_by_copying_alternating_layers(__lowerCamelCase, tempfile.mkdtemp(), e=1, d=1 )
self.assertEqual(student.config.num_hidden_layers, 1 )
def __lowercase( self : List[Any] ) -> Optional[Any]:
UpperCamelCase__ ,*UpperCamelCase__ : Optional[Any] = create_student_by_copying_alternating_layers(__lowerCamelCase, tempfile.mkdtemp(), e=1, d=__lowerCamelCase )
def __lowercase( self : str ) -> List[Any]:
UpperCamelCase__ ,*UpperCamelCase__ : str = create_student_by_copying_alternating_layers(__lowerCamelCase, tempfile.mkdtemp(), e=1, d=__lowerCamelCase )
self.assertEqual(student.config.encoder_layers, 1 )
self.assertEqual(student.config.decoder_layers, self.teacher_config.encoder_layers )
def __lowercase( self : Union[str, Any] ) -> int:
UpperCamelCase__ ,*UpperCamelCase__ : int = create_student_by_copying_alternating_layers(__lowerCamelCase, tempfile.mkdtemp(), e=1, d=1 )
self.assertEqual(student.config.encoder_layers, 1 )
self.assertEqual(student.config.decoder_layers, 1 )
def __lowercase( self : Union[str, Any] ) -> Tuple:
with self.assertRaises(__lowerCamelCase ):
create_student_by_copying_alternating_layers(__lowerCamelCase, tempfile.mkdtemp(), e=__lowerCamelCase, d=__lowerCamelCase )
| 344 | 1 |
'''simple docstring'''
def a__ ( lowerCAmelCase__ ) -> int:
UpperCAmelCase__ : set[int] = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
UpperCAmelCase__ : set[int] = set()
return any(
node not in visited and depth_first_search(__lowercase , __lowercase , __lowercase , __lowercase )
for node in graph )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict:
visited.add(__lowercase )
rec_stk.add(__lowercase )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(__lowercase , __lowercase , __lowercase , __lowercase ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(__lowercase )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 704 |
'''simple docstring'''
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class lowerCamelCase_ :
def __init__( self : Dict , _A : Optional[Any] , _A : str=13 , _A : int=7 , _A : Dict=True , _A : List[Any]=True , _A : List[Any]=99 , _A : List[Any]=32 , _A : List[str]=5 , _A : List[str]=4 , _A : int=37 , _A : Optional[int]="gelu" , _A : Tuple=0.1 , _A : Optional[int]=0.1 , _A : Tuple=50 , _A : Optional[int]=0.0_2 , _A : Union[str, Any]=True , _A : Union[str, Any]=None , ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = parent
UpperCAmelCase__ : Tuple = batch_size
UpperCAmelCase__ : Optional[Any] = seq_length
UpperCAmelCase__ : Union[str, Any] = is_training
UpperCAmelCase__ : List[str] = use_input_mask
UpperCAmelCase__ : List[str] = vocab_size
UpperCAmelCase__ : Optional[int] = hidden_size
UpperCAmelCase__ : int = num_hidden_layers
UpperCAmelCase__ : List[str] = num_attention_heads
UpperCAmelCase__ : Optional[int] = intermediate_size
UpperCAmelCase__ : Optional[int] = hidden_act
UpperCAmelCase__ : str = hidden_dropout_prob
UpperCAmelCase__ : Optional[Any] = attention_probs_dropout_prob
UpperCAmelCase__ : List[Any] = max_position_embeddings
UpperCAmelCase__ : int = initializer_range
UpperCAmelCase__ : Optional[int] = use_labels
UpperCAmelCase__ : str = scope
def lowercase_ ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase__ : List[Any] = None
if self.use_input_mask:
UpperCAmelCase__ : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
if self.use_labels:
UpperCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase__ : Optional[Any] = self.get_config()
return config, input_ids, input_mask, token_labels
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
return BertGenerationConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=_A , initializer_range=self.initializer_range , )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : Dict = self.prepare_config_and_inputs()
UpperCAmelCase__ : Optional[Any] = True
UpperCAmelCase__ : Tuple = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCAmelCase__ : str = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def lowercase_ ( self : Dict , _A : List[str] , _A : List[Any] , _A : int , _A : str , **_A : Optional[int] , ):
'''simple docstring'''
UpperCAmelCase__ : str = BertGenerationEncoder(config=_A )
model.to(_A )
model.eval()
UpperCAmelCase__ : Optional[Any] = model(_A , attention_mask=_A )
UpperCAmelCase__ : Optional[int] = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase_ ( self : Tuple , _A : int , _A : int , _A : int , _A : List[str] , _A : Union[str, Any] , _A : List[str] , **_A : Any , ):
'''simple docstring'''
UpperCAmelCase__ : int = True
UpperCAmelCase__ : Any = BertGenerationEncoder(config=_A )
model.to(_A )
model.eval()
UpperCAmelCase__ : List[Any] = model(
_A , attention_mask=_A , encoder_hidden_states=_A , encoder_attention_mask=_A , )
UpperCAmelCase__ : Union[str, Any] = model(
_A , attention_mask=_A , encoder_hidden_states=_A , )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase_ ( self : Optional[int] , _A : Tuple , _A : Tuple , _A : List[Any] , _A : str , _A : Union[str, Any] , _A : int , **_A : List[str] , ):
'''simple docstring'''
UpperCAmelCase__ : Any = True
UpperCAmelCase__ : str = True
UpperCAmelCase__ : List[Any] = BertGenerationDecoder(config=_A ).to(_A ).eval()
# first forward pass
UpperCAmelCase__ : Optional[Any] = model(
_A , attention_mask=_A , encoder_hidden_states=_A , encoder_attention_mask=_A , use_cache=_A , )
UpperCAmelCase__ : List[str] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCAmelCase__ : Optional[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCAmelCase__ : Union[str, Any] = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
UpperCAmelCase__ : Tuple = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCAmelCase__ : Optional[int] = torch.cat([input_mask, next_mask] , dim=-1 )
UpperCAmelCase__ : int = model(
_A , attention_mask=_A , encoder_hidden_states=_A , encoder_attention_mask=_A , output_hidden_states=_A , )['''hidden_states'''][0]
UpperCAmelCase__ : Tuple = model(
_A , attention_mask=_A , encoder_hidden_states=_A , encoder_attention_mask=_A , past_key_values=_A , output_hidden_states=_A , )['''hidden_states'''][0]
# select random slice
UpperCAmelCase__ : int = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCAmelCase__ : Union[str, Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCAmelCase__ : int = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_A , _A , atol=1e-3 ) )
def lowercase_ ( self : Optional[int] , _A : Dict , _A : Tuple , _A : Optional[int] , _A : List[str] , *_A : Dict , ):
'''simple docstring'''
UpperCAmelCase__ : Dict = BertGenerationDecoder(_A )
model.to(_A )
model.eval()
UpperCAmelCase__ : Optional[Any] = model(_A , attention_mask=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : int = self.prepare_config_and_inputs()
UpperCAmelCase__ : int = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase_ ( __a , __a , __a , unittest.TestCase ):
lowerCAmelCase__ = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
lowerCAmelCase__ = (BertGenerationDecoder,) if is_torch_available() else ()
lowerCAmelCase__ = (
{'feature-extraction': BertGenerationEncoder, 'text-generation': BertGenerationDecoder}
if is_torch_available()
else {}
)
def lowercase_ ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = BertGenerationEncoderTester(self )
UpperCAmelCase__ : Any = ConfigTester(self , config_class=_A , hidden_size=37 )
def lowercase_ ( self : Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
UpperCAmelCase__ : Optional[Any] = '''bert'''
self.model_tester.create_and_check_model(_A , _A , _A , _A )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*_A )
def lowercase_ ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*_A )
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : str = self.model_tester.prepare_config_and_inputs_for_decoder()
UpperCAmelCase__ : List[str] = None
self.model_tester.create_and_check_model_as_decoder(
_A , _A , _A , _A , _A , _A , )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*_A )
@slow
def lowercase_ ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = BertGenerationEncoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' )
self.assertIsNotNone(_A )
@require_torch
class lowerCamelCase_ ( unittest.TestCase ):
@slow
def lowercase_ ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : int = BertGenerationEncoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' )
UpperCAmelCase__ : List[Any] = torch.tensor([[101, 7_592, 1_010, 2_026, 3_899, 2_003, 10_140, 102]] )
with torch.no_grad():
UpperCAmelCase__ : Union[str, Any] = model(_A )[0]
UpperCAmelCase__ : Union[str, Any] = torch.Size([1, 8, 1_024] )
self.assertEqual(output.shape , _A )
UpperCAmelCase__ : Optional[Any] = torch.tensor(
[[[0.1_7_7_5, 0.0_0_8_3, -0.0_3_2_1], [1.6_0_0_2, 0.1_2_8_7, 0.3_9_1_2], [2.1_4_7_3, 0.5_7_9_1, 0.6_0_6_6]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _A , atol=1e-4 ) )
@require_torch
class lowerCamelCase_ ( unittest.TestCase ):
@slow
def lowercase_ ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = BertGenerationDecoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' )
UpperCAmelCase__ : List[str] = torch.tensor([[101, 7_592, 1_010, 2_026, 3_899, 2_003, 10_140, 102]] )
with torch.no_grad():
UpperCAmelCase__ : Tuple = model(_A )[0]
UpperCAmelCase__ : Optional[int] = torch.Size([1, 8, 50_358] )
self.assertEqual(output.shape , _A )
UpperCAmelCase__ : Union[str, Any] = torch.tensor(
[[[-0.5_7_8_8, -2.5_9_9_4, -3.7_0_5_4], [0.0_4_3_8, 4.7_9_9_7, 1.8_7_9_5], [1.5_8_6_2, 6.6_4_0_9, 4.4_6_3_8]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _A , atol=1e-4 ) )
| 312 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase : Dict = logging.get_logger(__name__)
_UpperCAmelCase : List[str] = {
'''facebook/nllb-moe-54B''': '''https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json''',
}
class lowercase_ ( _UpperCamelCase ):
"""simple docstring"""
__lowerCAmelCase = "nllb-moe"
__lowerCAmelCase = ["past_key_values"]
__lowerCAmelCase = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : List[Any], UpperCamelCase__ : str=12_81_12, UpperCamelCase__ : str=10_24, UpperCamelCase__ : Optional[int]=12, UpperCamelCase__ : Any=40_96, UpperCamelCase__ : Union[str, Any]=16, UpperCamelCase__ : Dict=12, UpperCamelCase__ : Union[str, Any]=40_96, UpperCamelCase__ : Any=16, UpperCamelCase__ : Tuple=0.05, UpperCamelCase__ : Dict=0.05, UpperCamelCase__ : List[Any]=True, UpperCamelCase__ : Union[str, Any]=True, UpperCamelCase__ : List[Any]="relu", UpperCamelCase__ : int=10_24, UpperCamelCase__ : Any=0.1, UpperCamelCase__ : Optional[Any]=0.1, UpperCamelCase__ : Dict=0.0, UpperCamelCase__ : List[Any]=0.02, UpperCamelCase__ : str=2, UpperCamelCase__ : Optional[int]=True, UpperCamelCase__ : Optional[int]=False, UpperCamelCase__ : int="float32", UpperCamelCase__ : Optional[Any]=False, UpperCamelCase__ : Dict=1_28, UpperCamelCase__ : Optional[int]=64, UpperCamelCase__ : Tuple=4, UpperCamelCase__ : Any=4, UpperCamelCase__ : Dict=0.001, UpperCamelCase__ : List[str]=0.001, UpperCamelCase__ : Union[str, Any]="all", UpperCamelCase__ : Tuple=False, UpperCamelCase__ : Dict=False, UpperCamelCase__ : Any=1.0, UpperCamelCase__ : Tuple=0.2, UpperCamelCase__ : Optional[int]=1, UpperCamelCase__ : int=0, UpperCamelCase__ : List[str]=2, UpperCamelCase__ : str=False, **UpperCamelCase__ : Union[str, Any], ) -> Tuple:
_A = vocab_size
_A = max_position_embeddings
_A = d_model
_A = encoder_ffn_dim
_A = encoder_layers
_A = encoder_attention_heads
_A = decoder_ffn_dim
_A = decoder_layers
_A = decoder_attention_heads
_A = dropout
_A = attention_dropout
_A = activation_dropout
_A = activation_function
_A = init_std
_A = encoder_layerdrop
_A = decoder_layerdrop
_A = use_cache
_A = encoder_layers
_A = scale_embedding # scale factor will be sqrt(d_model) if True
_A = router_z_loss_coef
_A = router_aux_loss_coef
_A = decoder_sparse_step
_A = encoder_sparse_step
_A = num_experts
_A = expert_capacity
_A = router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f'`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}' )
_A = router_dtype
_A = router_ignore_padding_tokens
_A = batch_prioritized_routing
_A = second_expert_policy
_A = normalize_router_prob_before_dropping
_A = moe_eval_capacity_token_fraction
_A = moe_token_dropout
_A = output_router_logits
super().__init__(
pad_token_id=UpperCamelCase__, bos_token_id=UpperCamelCase__, eos_token_id=UpperCamelCase__, is_encoder_decoder=UpperCamelCase__, decoder_start_token_id=UpperCamelCase__, **UpperCamelCase__, )
| 107 |
'''simple docstring'''
# Author: OMKAR PATHAK, Nwachukwu Chidiebere
# Use a Python dictionary to construct the graph.
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
__lowerCAmelCase = TypeVar('''T''')
class __magic_name__ ( Generic[T] ):
def __init__( self : int ,_UpperCAmelCase : bool = True ):
_a : dict[T, list[T]] = {} # dictionary of lists
_a : Tuple = directed
def __lowercase ( self : Union[str, Any] ,_UpperCAmelCase : T ,_UpperCAmelCase : T ):
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(_UpperCAmelCase )
self.adj_list[destination_vertex].append(_UpperCAmelCase )
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(_UpperCAmelCase )
_a : Optional[Any] = [source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(_UpperCAmelCase )
_a : Union[str, Any] = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
_a : Union[str, Any] = [destination_vertex]
_a : Optional[int] = [source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(_UpperCAmelCase )
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(_UpperCAmelCase )
_a : int = []
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
_a : Tuple = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
_a : Tuple = [destination_vertex]
_a : str = []
return self
def __repr__( self : int ):
return pformat(self.adj_list )
| 358 | 0 |
'''simple docstring'''
def lowerCamelCase__ ( A : list[int] ):
'''simple docstring'''
UpperCAmelCase = []
if len(A ) == 1:
return [nums.copy()]
for _ in range(len(A ) ):
UpperCAmelCase = nums.pop(0 )
UpperCAmelCase = permute(A )
for perm in permutations:
perm.append(A )
result.extend(A )
nums.append(A )
return result
def lowerCamelCase__ ( A : int ):
'''simple docstring'''
def backtrack(A : Union[str, Any] ):
if start == len(A ) - 1:
output.append(nums[:] )
else:
for i in range(A , len(A ) ):
UpperCAmelCase , UpperCAmelCase = nums[i], nums[start]
backtrack(start + 1 )
UpperCAmelCase , UpperCAmelCase = nums[i], nums[start] # backtrack
UpperCAmelCase = []
backtrack(0 )
return output
if __name__ == "__main__":
import doctest
# use res to print the data in permute2 function
_lowercase : Optional[int] = permutea([1, 2, 3])
print(res)
doctest.testmod()
| 50 |
'''simple docstring'''
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCamelCase__( unittest.TestCase ):
def __init__( self : Dict , lowerCAmelCase : List[str] , lowerCAmelCase : List[Any]=3 , lowerCAmelCase : Tuple=32 , lowerCAmelCase : Optional[int]=3 , lowerCAmelCase : Optional[Any]=10 , lowerCAmelCase : Optional[Any]=[10, 20, 30, 40] , lowerCAmelCase : Optional[int]=[1, 1, 2, 1] , lowerCAmelCase : Union[str, Any]=True , lowerCAmelCase : Tuple=True , lowerCAmelCase : List[Any]="relu" , lowerCAmelCase : List[Any]=3 , lowerCAmelCase : Union[str, Any]=None , )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = image_size
UpperCAmelCase = num_channels
UpperCAmelCase = embeddings_size
UpperCAmelCase = hidden_sizes
UpperCAmelCase = depths
UpperCAmelCase = is_training
UpperCAmelCase = use_labels
UpperCAmelCase = hidden_act
UpperCAmelCase = num_labels
UpperCAmelCase = scope
UpperCAmelCase = len(lowerCAmelCase )
def a__( self : Optional[Any] )-> Tuple:
"""simple docstring"""
UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase = self.get_config()
return config, pixel_values
def a__( self : str )-> Optional[Any]:
"""simple docstring"""
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def a__( self : Union[str, Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : Any )-> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = FlaxRegNetModel(config=lowerCAmelCase )
UpperCAmelCase = model(lowerCAmelCase )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def a__( self : List[Any] , lowerCAmelCase : List[str] , lowerCAmelCase : Optional[Any] )-> List[str]:
"""simple docstring"""
UpperCAmelCase = self.num_labels
UpperCAmelCase = FlaxRegNetForImageClassification(config=lowerCAmelCase )
UpperCAmelCase = model(lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a__( self : List[str] )-> Any:
"""simple docstring"""
UpperCAmelCase = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase = config_and_inputs
UpperCAmelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_flax
class UpperCamelCase__( lowerCAmelCase , unittest.TestCase ):
__magic_name__ : Optional[int] = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
__magic_name__ : Optional[int] = False
__magic_name__ : List[str] = False
__magic_name__ : Dict = False
def a__( self : Union[str, Any] )-> None:
"""simple docstring"""
UpperCAmelCase = FlaxRegNetModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=lowerCAmelCase , has_text_modality=lowerCAmelCase )
def a__( self : List[str] )-> List[str]:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a__( self : Tuple )-> Tuple:
"""simple docstring"""
return
def a__( self : Optional[Any] )-> Tuple:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
def a__( self : Any )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase )
@unittest.skip(reason='''RegNet does not use inputs_embeds''' )
def a__( self : str )-> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip(reason='''RegNet does not support input and output embeddings''' )
def a__( self : Any )-> List[str]:
"""simple docstring"""
pass
def a__( self : Any )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(lowerCAmelCase )
UpperCAmelCase = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase = [*signature.parameters.keys()]
UpperCAmelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCAmelCase )
def a__( self : Tuple )-> int:
"""simple docstring"""
def check_hidden_states_output(lowerCAmelCase : Optional[Any] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : int ):
UpperCAmelCase = model_class(lowerCAmelCase )
UpperCAmelCase = model(**self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase = self.model_tester.num_stages
self.assertEqual(len(lowerCAmelCase ) , expected_num_stages + 1 )
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase = True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def a__( self : Union[str, Any] )-> List[str]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase = self._prepare_for_class(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase = model_class(lowerCAmelCase )
@jax.jit
def model_jitted(lowerCAmelCase : Tuple , **lowerCAmelCase : Tuple ):
return model(pixel_values=lowerCAmelCase , **lowerCAmelCase )
with self.subTest('''JIT Enabled''' ):
UpperCAmelCase = model_jitted(**lowerCAmelCase ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
UpperCAmelCase = model_jitted(**lowerCAmelCase ).to_tuple()
self.assertEqual(len(lowerCAmelCase ) , len(lowerCAmelCase ) )
for jitted_output, output in zip(lowerCAmelCase , lowerCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_flax
class UpperCamelCase__( unittest.TestCase ):
@cached_property
def a__( self : Dict )-> int:
"""simple docstring"""
return AutoImageProcessor.from_pretrained('''facebook/regnet-y-040''' ) if is_vision_available() else None
@slow
def a__( self : Union[str, Any] )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = FlaxRegNetForImageClassification.from_pretrained('''facebook/regnet-y-040''' )
UpperCAmelCase = self.default_image_processor
UpperCAmelCase = prepare_img()
UpperCAmelCase = image_processor(images=lowerCAmelCase , return_tensors='''np''' )
UpperCAmelCase = model(**lowerCAmelCase )
# verify the logits
UpperCAmelCase = (1, 1000)
self.assertEqual(outputs.logits.shape , lowerCAmelCase )
UpperCAmelCase = jnp.array([-0.4180, -1.5051, -3.4836] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , lowerCAmelCase , atol=1E-4 ) )
| 50 | 1 |
"""simple docstring"""
from __future__ import annotations
from math import pi, sqrt
def __snake_case ( __A : float , __A : float ) -> tuple:
'''simple docstring'''
if inductance <= 0:
raise ValueError('Inductance cannot be 0 or negative' )
elif capacitance <= 0:
raise ValueError('Capacitance cannot be 0 or negative' )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 265 |
"""simple docstring"""
def __snake_case ( __A : list , __A : int , __A : int = 0 , __A : int = 0 ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = right or len(__A ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(__A , __A , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 265 | 1 |
'''simple docstring'''
def lowerCamelCase__ ( _A , _A ):
_enforce_args(_A , _A )
if n == 0:
return 0
a : Tuple = float('-inf' )
for i in range(1 , n + 1 ):
a : Tuple = max(
_A , prices[i - 1] + naive_cut_rod_recursive(n - i , _A ) )
return max_revue
def lowerCamelCase__ ( _A , _A ):
_enforce_args(_A , _A )
a : Optional[Any] = [float('-inf' ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(_A , _A , _A )
def lowerCamelCase__ ( _A , _A , _A ):
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
a : Dict = float('-inf' )
for i in range(1 , n + 1 ):
a : Optional[Any] = max(
_A , prices[i - 1] + _top_down_cut_rod_recursive(n - i , _A , _A ) , )
a : List[Any] = max_revenue
return max_rev[n]
def lowerCamelCase__ ( _A , _A ):
_enforce_args(_A , _A )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
a : int = [float('-inf' ) for _ in range(n + 1 )]
a : Tuple = 0
for i in range(1 , n + 1 ):
a : int = max_rev[i]
for j in range(1 , i + 1 ):
a : Optional[Any] = max(_A , prices[j - 1] + max_rev[i - j] )
a : Optional[int] = max_revenue_i
return max_rev[n]
def lowerCamelCase__ ( _A , _A ):
if n < 0:
a : Dict = f"""n must be greater than or equal to 0. Got n = {n}"""
raise ValueError(_A )
if n > len(_A ):
a : Union[str, Any] = (
'Each integral piece of rod must have a corresponding price. '
f"""Got n = {n} but length of prices = {len(_A )}"""
)
raise ValueError(_A )
def lowerCamelCase__ ( ):
a : Union[str, Any] = [6, 10, 12, 15, 20, 23]
a : List[str] = len(_A )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
a : List[Any] = 36
a : int = top_down_cut_rod(_A , _A )
a : int = bottom_up_cut_rod(_A , _A )
a : Optional[Any] = naive_cut_rod_recursive(_A , _A )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main() | 705 |
'''simple docstring'''
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
lowerCAmelCase: List[str] = TypeVar('T')
def lowerCamelCase__ ( _A ):
return (position - 1) // 2
def lowerCamelCase__ ( _A ):
return (2 * position) + 1
def lowerCamelCase__ ( _A ):
return (2 * position) + 2
class a__( Generic[T] ):
def __init__( self : Optional[Any] ):
a : list[tuple[T, int]] = []
a : dict[T, int] = {}
a : int = 0
def __len__( self : Union[str, Any] ):
return self.elements
def __repr__( self : Any ):
return str(self.heap )
def lowercase_ ( self : Optional[int] ):
# Check if the priority queue is empty
return self.elements == 0
def lowercase_ ( self : Union[str, Any] , __snake_case : T , __snake_case : int ):
# Add an element with given priority to the queue
self.heap.append((elem, weight) )
a : int = self.elements
self.elements += 1
self._bubble_up(__snake_case )
def lowercase_ ( self : Union[str, Any] ):
# Remove and return the element with lowest weight (highest priority)
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
a , a : str = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
a , a : List[Any] = self.heap[0]
self._bubble_down(__snake_case )
return elem
def lowercase_ ( self : List[str] , __snake_case : T , __snake_case : int ):
# Update the weight of the given key
a : Any = self.position_map[elem]
a : Union[str, Any] = (elem, weight)
if position > 0:
a : Optional[Any] = get_parent_position(__snake_case )
a , a : Union[str, Any] = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(__snake_case )
else:
self._bubble_down(__snake_case )
else:
self._bubble_down(__snake_case )
def lowercase_ ( self : int , __snake_case : T ):
# Place a node at the proper position (upward movement) [to be used internally
# only]
a : Union[str, Any] = self.position_map[elem]
if curr_pos == 0:
return None
a : Union[str, Any] = get_parent_position(__snake_case )
a , a : Tuple = self.heap[curr_pos]
a , a : List[Any] = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(__snake_case , __snake_case )
return self._bubble_up(__snake_case )
return None
def lowercase_ ( self : Tuple , __snake_case : T ):
# Place a node at the proper position (downward movement) [to be used
# internally only]
a : str = self.position_map[elem]
a , a : List[Any] = self.heap[curr_pos]
a : int = get_child_left_position(__snake_case )
a : Dict = get_child_right_position(__snake_case )
if child_left_position < self.elements and child_right_position < self.elements:
a , a : int = self.heap[child_left_position]
a , a : Optional[Any] = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(__snake_case , __snake_case )
return self._bubble_down(__snake_case )
if child_left_position < self.elements:
a , a : Dict = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(__snake_case , __snake_case )
return self._bubble_down(__snake_case )
else:
return None
if child_right_position < self.elements:
a , a : List[Any] = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(__snake_case , __snake_case )
return self._bubble_down(__snake_case )
return None
def lowercase_ ( self : List[Any] , __snake_case : int , __snake_case : int ):
# Swap the nodes at the given positions
a : Optional[int] = self.heap[nodea_pos][0]
a : int = self.heap[nodea_pos][0]
a , a : List[str] = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
a : Tuple = nodea_pos
a : Any = nodea_pos
class a__( Generic[T] ):
def __init__( self : Optional[int] ):
a : dict[T, dict[T, int]] = {}
a : int = 0
def __repr__( self : List[str] ):
return str(self.connections )
def __len__( self : str ):
return self.nodes
def lowercase_ ( self : int , __snake_case : T ):
# Add a node in the graph if it is not in the graph
if node not in self.connections:
a : Optional[int] = {}
self.nodes += 1
def lowercase_ ( self : Dict , __snake_case : T , __snake_case : T , __snake_case : int ):
# Add an edge between 2 nodes in the graph
self.add_node(__snake_case )
self.add_node(__snake_case )
a : Any = weight
a : Dict = weight
def lowerCamelCase__ ( _A , ):
a : dict[T, int] = {node: maxsize for node in graph.connections}
a : dict[T, T | None] = {node: None for node in graph.connections}
a : MinPriorityQueue[T] = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(_A , _A )
if priority_queue.is_empty():
return dist, parent
# initialization
a : int = priority_queue.extract_min()
a : Dict = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
a : Tuple = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(_A , dist[neighbour] )
a : Dict = node
# running prim's algorithm
while not priority_queue.is_empty():
a : List[str] = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
a : int = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(_A , dist[neighbour] )
a : Optional[int] = node
return dist, parent | 195 | 0 |
'''simple docstring'''
from __future__ import annotations
import time
from collections.abc import Sequence
from random import randint
from matplotlib import pyplot as plt
def _a (lowercase__ : Sequence[float] , lowercase__ : int , lowercase__ : int ) -> tuple[int | None, int | None, float]:
"""simple docstring"""
if not arr:
return None, None, 0
if low == high:
return low, high, arr[low]
__snake_case = (low + high) // 2
__snake_case , __snake_case , __snake_case = max_subarray(lowercase__ , lowercase__ , lowercase__ )
__snake_case , __snake_case , __snake_case = max_subarray(lowercase__ , mid + 1 , lowercase__ )
__snake_case , __snake_case , __snake_case = max_cross_sum(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
if left_sum >= right_sum and left_sum >= cross_sum:
return left_low, left_high, left_sum
elif right_sum >= left_sum and right_sum >= cross_sum:
return right_low, right_high, right_sum
return cross_left, cross_right, cross_sum
def _a (lowercase__ : Sequence[float] , lowercase__ : int , lowercase__ : int , lowercase__ : int ) -> tuple[int, int, float]:
"""simple docstring"""
__snake_case , __snake_case = float('-inf' ), -1
__snake_case , __snake_case = float('-inf' ), -1
__snake_case = 0
for i in range(lowercase__ , low - 1 , -1 ):
summ += arr[i]
if summ > left_sum:
__snake_case = summ
__snake_case = i
__snake_case = 0
for i in range(mid + 1 , high + 1 ):
summ += arr[i]
if summ > right_sum:
__snake_case = summ
__snake_case = i
return max_left, max_right, (left_sum + right_sum)
def _a (lowercase__ : int ) -> float:
"""simple docstring"""
__snake_case = [randint(1 , lowercase__ ) for _ in range(lowercase__ )]
__snake_case = time.time()
max_subarray(lowercase__ , 0 , input_size - 1 )
__snake_case = time.time()
return end - start
def _a () -> None:
"""simple docstring"""
__snake_case = [1_0, 1_0_0, 1_0_0_0, 1_0_0_0_0, 5_0_0_0_0, 1_0_0_0_0_0, 2_0_0_0_0_0, 3_0_0_0_0_0, 4_0_0_0_0_0, 5_0_0_0_0_0]
__snake_case = [time_max_subarray(lowercase__ ) for input_size in input_sizes]
print('No of Inputs\t\tTime Taken' )
for input_size, runtime in zip(lowercase__ , lowercase__ ):
print(lowercase__ , '\t\t' , lowercase__ )
plt.plot(lowercase__ , lowercase__ )
plt.xlabel('Number of Inputs' )
plt.ylabel('Time taken in seconds' )
plt.show()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 56 |
import functools
def _a ( lowerCAmelCase , lowerCAmelCase )-> int:
# Validation
if not isinstance(lowerCAmelCase , lowerCAmelCase ) or not all(isinstance(lowerCAmelCase , lowerCAmelCase ) for day in days ):
raise ValueError('The parameter days should be a list of integers' )
if len(lowerCAmelCase ) != 3 or not all(isinstance(lowerCAmelCase , lowerCAmelCase ) for cost in costs ):
raise ValueError('The parameter costs should be a list of three integers' )
if len(lowerCAmelCase ) == 0:
return 0
if min(lowerCAmelCase ) <= 0:
raise ValueError('All days elements should be greater than 0' )
if max(lowerCAmelCase ) >= 366:
raise ValueError('All days elements should be less than 366' )
SCREAMING_SNAKE_CASE_ = set(lowerCAmelCase )
@functools.cache
def dynamic_programming(lowerCAmelCase ) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 360 | 0 |
"""simple docstring"""
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : Any = abs(_snake_case )
SCREAMING_SNAKE_CASE__ : List[str] = 0
while n > 0:
res += n % 10
n //= 10
return res
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : int = abs(_snake_case )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def lowercase_ ( _snake_case ):
return sum(int(_snake_case ) for c in str(abs(_snake_case ) ) )
def lowercase_ ( ):
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(_snake_case ,_snake_case ) -> None:
SCREAMING_SNAKE_CASE__ : int = f'''{func.__name__}({value})'''
SCREAMING_SNAKE_CASE__ : int = timeit(f'''__main__.{call}''' ,setup="""import __main__""" )
print(f'''{call:56} = {func(_snake_case )} -- {timing:.4f} seconds''' )
for value in (262_144, 1_125_899_906_842_624, 1_267_650_600_228_229_401_496_703_205_376):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(_snake_case ,_snake_case )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 545 |
"""simple docstring"""
import math
import os
import sys
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : List[str] = """"""
try:
with open(_snake_case ,"""rb""" ) as binary_file:
SCREAMING_SNAKE_CASE__ : str = binary_file.read()
for dat in data:
SCREAMING_SNAKE_CASE__ : int = f'''{dat:08b}'''
result += curr_byte
return result
except OSError:
print("""File not accessible""" )
sys.exit()
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ,_snake_case ):
lexicon.pop(_snake_case )
SCREAMING_SNAKE_CASE__ : Optional[int] = last_match_id
if math.loga(_snake_case ).is_integer():
for curr_key in lexicon:
SCREAMING_SNAKE_CASE__ : Optional[int] = """0""" + lexicon[curr_key]
SCREAMING_SNAKE_CASE__ : Tuple = bin(_snake_case )[2:]
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : List[str] = {"""0""": """0""", """1""": """1"""}
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = """""", """"""
SCREAMING_SNAKE_CASE__ : int = len(_snake_case )
for i in range(len(_snake_case ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
SCREAMING_SNAKE_CASE__ : Dict = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(_snake_case ,_snake_case ,_snake_case ,_snake_case )
index += 1
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """"""
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
SCREAMING_SNAKE_CASE__ : Optional[int] = lexicon[curr_string]
result += last_match_id
return result
def lowercase_ ( _snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : str = os.path.getsize(_snake_case )
SCREAMING_SNAKE_CASE__ : Any = bin(_snake_case )[2:]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = len(_snake_case )
return "0" * (length_length - 1) + file_length_binary + compressed
def lowercase_ ( _snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : Tuple = 8
try:
with open(_snake_case ,"""wb""" ) as opened_file:
SCREAMING_SNAKE_CASE__ : Optional[int] = [
to_write[i : i + byte_length]
for i in range(0 ,len(_snake_case ) ,_snake_case )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("""10000000""" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array:
opened_file.write(int(_snake_case ,2 ).to_bytes(1 ,byteorder="""big""" ) )
except OSError:
print("""File not accessible""" )
sys.exit()
def lowercase_ ( _snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = read_file_binary(_snake_case )
SCREAMING_SNAKE_CASE__ : List[str] = compress_data(_snake_case )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = add_file_length(_snake_case ,_snake_case )
write_file_binary(_snake_case ,_snake_case )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 545 | 1 |
"""simple docstring"""
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class snake_case__ ( unittest.TestCase ):
def __lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
UpperCAmelCase : List[Any] = FlaxDiffusionPipeline.from_pretrained(
"hf-internal-testing/tiny-stable-diffusion-pipe" , safety_checker=lowercase , cache_dir=lowercase )
UpperCAmelCase : Union[str, Any] = [t[-1] for t in os.walk(os.path.join(lowercase , os.listdir(lowercase )[0] , "snapshots" ) )]
UpperCAmelCase : int = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith(".bin" ) for f in files )
@slow
@require_flax
class snake_case__ ( unittest.TestCase ):
def __lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : List[Any] = FlaxStableDiffusionPipeline.from_pretrained(
"hf-internal-testing/tiny-stable-diffusion-pipe" , safety_checker=lowercase )
UpperCAmelCase : Dict = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
UpperCAmelCase : str = jax.random.PRNGKey(0 )
UpperCAmelCase : Union[str, Any] = 4
UpperCAmelCase : int = jax.device_count()
UpperCAmelCase : Dict = num_samples * [prompt]
UpperCAmelCase : str = pipeline.prepare_inputs(lowercase )
# shard inputs and rng
UpperCAmelCase : Tuple = replicate(lowercase )
UpperCAmelCase : List[str] = jax.random.split(lowercase , lowercase )
UpperCAmelCase : Tuple = shard(lowercase )
UpperCAmelCase : int = pipeline(lowercase , lowercase , lowercase , lowercase , jit=lowercase ).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.1_5_1_4_7_4_5 ) < 1E-3
assert np.abs(np.abs(lowercase , dtype=np.floataa ).sum() - 4_9_9_4_7.8_7_5 ) < 5E-1
UpperCAmelCase : Any = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(lowercase ) == num_samples
def __lowerCAmelCase ( self : str ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : Dict = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="flax" , safety_checker=lowercase )
UpperCAmelCase : Tuple = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
UpperCAmelCase : Tuple = jax.random.PRNGKey(0 )
UpperCAmelCase : Dict = 50
UpperCAmelCase : List[Any] = jax.device_count()
UpperCAmelCase : int = num_samples * [prompt]
UpperCAmelCase : Dict = pipeline.prepare_inputs(lowercase )
# shard inputs and rng
UpperCAmelCase : List[Any] = replicate(lowercase )
UpperCAmelCase : Optional[int] = jax.random.split(lowercase , lowercase )
UpperCAmelCase : Optional[int] = shard(lowercase )
UpperCAmelCase : Dict = pipeline(lowercase , lowercase , lowercase , lowercase , jit=lowercase ).images
assert images.shape == (num_samples, 1, 5_12, 5_12, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_5_6_5_2_4_0_1) ) < 1E-3
assert np.abs((np.abs(lowercase , dtype=np.floataa ).sum() - 2_3_8_3_8_0_8.2) ) < 5E-1
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : Optional[int] = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , safety_checker=lowercase )
UpperCAmelCase : List[str] = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
UpperCAmelCase : List[str] = jax.random.PRNGKey(0 )
UpperCAmelCase : str = 50
UpperCAmelCase : Union[str, Any] = jax.device_count()
UpperCAmelCase : int = num_samples * [prompt]
UpperCAmelCase : Dict = pipeline.prepare_inputs(lowercase )
# shard inputs and rng
UpperCAmelCase : List[str] = replicate(lowercase )
UpperCAmelCase : str = jax.random.split(lowercase , lowercase )
UpperCAmelCase : Tuple = shard(lowercase )
UpperCAmelCase : List[str] = pipeline(lowercase , lowercase , lowercase , lowercase , jit=lowercase ).images
assert images.shape == (num_samples, 1, 5_12, 5_12, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_4_0_0_3_9_0_6) ) < 1E-3
assert np.abs((np.abs(lowercase , dtype=np.floataa ).sum() - 2_3_7_3_5_1_6.7_5) ) < 5E-1
def __lowerCAmelCase ( self : Any ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : List[str] = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa )
UpperCAmelCase : Optional[int] = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
UpperCAmelCase : Optional[int] = jax.random.PRNGKey(0 )
UpperCAmelCase : str = 50
UpperCAmelCase : int = jax.device_count()
UpperCAmelCase : Optional[int] = num_samples * [prompt]
UpperCAmelCase : str = pipeline.prepare_inputs(lowercase )
# shard inputs and rng
UpperCAmelCase : Any = replicate(lowercase )
UpperCAmelCase : Union[str, Any] = jax.random.split(lowercase , lowercase )
UpperCAmelCase : str = shard(lowercase )
UpperCAmelCase : Any = pipeline(lowercase , lowercase , lowercase , lowercase , jit=lowercase ).images
assert images.shape == (num_samples, 1, 5_12, 5_12, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_4_0_0_3_9_0_6) ) < 1E-3
assert np.abs((np.abs(lowercase , dtype=np.floataa ).sum() - 2_3_7_3_5_1_6.7_5) ) < 5E-1
def __lowerCAmelCase ( self : str ):
'''simple docstring'''
UpperCAmelCase : str = FlaxDDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="scaled_linear" , set_alpha_to_one=lowercase , steps_offset=1 , )
UpperCAmelCase , UpperCAmelCase : List[Any] = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , scheduler=lowercase , safety_checker=lowercase , )
UpperCAmelCase : Any = scheduler.create_state()
UpperCAmelCase : Union[str, Any] = scheduler_state
UpperCAmelCase : List[Any] = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
UpperCAmelCase : List[str] = jax.random.PRNGKey(0 )
UpperCAmelCase : int = 50
UpperCAmelCase : int = jax.device_count()
UpperCAmelCase : str = num_samples * [prompt]
UpperCAmelCase : int = pipeline.prepare_inputs(lowercase )
# shard inputs and rng
UpperCAmelCase : Dict = replicate(lowercase )
UpperCAmelCase : Union[str, Any] = jax.random.split(lowercase , lowercase )
UpperCAmelCase : Dict = shard(lowercase )
UpperCAmelCase : Any = pipeline(lowercase , lowercase , lowercase , lowercase , jit=lowercase ).images
assert images.shape == (num_samples, 1, 5_12, 5_12, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_4_5_0_4_3_9_4_5) ) < 1E-3
assert np.abs((np.abs(lowercase , dtype=np.floataa ).sum() - 2_3_4_7_6_9_3.5) ) < 5E-1
def __lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase : Tuple = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
UpperCAmelCase : Optional[int] = jax.device_count()
UpperCAmelCase : Optional[Any] = num_samples * [prompt]
UpperCAmelCase : str = jax.random.split(jax.random.PRNGKey(0 ) , lowercase )
UpperCAmelCase , UpperCAmelCase : int = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , safety_checker=lowercase , )
UpperCAmelCase : Optional[int] = replicate(lowercase )
UpperCAmelCase : Any = pipeline.prepare_inputs(lowercase )
UpperCAmelCase : List[str] = shard(lowercase )
UpperCAmelCase : List[str] = pipeline(lowercase , lowercase , lowercase , jit=lowercase ).images
assert images.shape == (num_samples, 1, 5_12, 5_12, 3)
UpperCAmelCase : int = images[2, 0, 2_56, 10:17, 1]
# With memory efficient attention
UpperCAmelCase , UpperCAmelCase : Tuple = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , safety_checker=lowercase , use_memory_efficient_attention=lowercase , )
UpperCAmelCase : Union[str, Any] = replicate(lowercase )
UpperCAmelCase : List[Any] = pipeline.prepare_inputs(lowercase )
UpperCAmelCase : Union[str, Any] = shard(lowercase )
UpperCAmelCase : Dict = pipeline(lowercase , lowercase , lowercase , jit=lowercase ).images
assert images_eff.shape == (num_samples, 1, 5_12, 5_12, 3)
UpperCAmelCase : List[Any] = images[2, 0, 2_56, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1E-2
| 595 |
"""simple docstring"""
def lowercase_ ( _lowercase : int ):
'''simple docstring'''
UpperCAmelCase : List[str] = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(2_7))
print(perfect_cube(4))
| 595 | 1 |
'''simple docstring'''
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
lowerCamelCase : List[str] = logging.getLogger(__name__)
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
def __init__( self , A , A , A , A=None ) -> Optional[int]:
super().__init__(
UpperCamelCase_ , question_encoder_tokenizer=UpperCamelCase_ , generator_tokenizer=UpperCamelCase_ , index=UpperCamelCase_ , init_retrieval=UpperCamelCase_ , )
snake_case : Optional[int] = None
def UpperCAmelCase ( self , A ) -> Any:
logger.info("""initializing retrieval""" )
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info("""dist initialized""" )
# needs to be set manually
snake_case : int = self._infer_socket_ifname()
# avoid clash with the NCCL port
snake_case : Union[str, Any] = str(distributed_port + 1 )
snake_case : Optional[int] = dist.new_group(ranks=UpperCamelCase_ , backend="""gloo""" )
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info("""dist not initialized / main""" )
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group )
def UpperCAmelCase ( self ) -> Dict:
return dist.get_rank(group=self.process_group ) == 0
def UpperCAmelCase ( self , A , A , A=torch.floataa ) -> Tuple:
snake_case : Union[str, Any] = torch.empty(UpperCamelCase_ , dtype=UpperCamelCase_ )
dist.scatter(UpperCamelCase_ , src=0 , scatter_list=UpperCamelCase_ , group=self.process_group )
return target_tensor
def UpperCAmelCase ( self ) -> Optional[int]:
snake_case : Union[str, Any] = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
snake_case : List[str] = next((addr for addr in addrs if addr.startswith("""e""" )) , UpperCamelCase_ )
return ifname
def UpperCAmelCase ( self , A , A ) -> Tuple[np.ndarray, List[dict]]:
if not dist.is_initialized():
snake_case : Optional[Any] = self._main_retrieve(UpperCamelCase_ , UpperCamelCase_ )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(UpperCamelCase_ )
# distributed training
snake_case : int = dist.get_world_size(group=self.process_group )
# gather logic
snake_case : Any = None
if self._is_main():
snake_case : Optional[int] = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(UpperCamelCase_ )]
dist.gather(torch.tensor(UpperCamelCase_ ) , dst=0 , gather_list=UpperCamelCase_ , group=self.process_group )
# scatter logic
snake_case : Optional[Any] = question_hidden_states.shape[0]
snake_case : Optional[Any] = []
snake_case : int = []
if self._is_main():
assert len(UpperCamelCase_ ) == world_size
snake_case : Any = self._main_retrieve(torch.cat(UpperCamelCase_ ).numpy() , UpperCamelCase_ )
snake_case : int = torch.tensor(UpperCamelCase_ ), torch.tensor(UpperCamelCase_ )
snake_case : str = self._chunk_tensor(UpperCamelCase_ , UpperCamelCase_ )
snake_case : int = self._chunk_tensor(UpperCamelCase_ , UpperCamelCase_ )
snake_case : List[Any] = self._scattered(UpperCamelCase_ , [n_queries, n_docs] , target_type=torch.intaa )
snake_case : List[Any] = self._scattered(UpperCamelCase_ , [n_queries, n_docs, question_hidden_states.shape[1]] )
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(UpperCamelCase_ )
| 704 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase : Any = logging.get_logger(__name__)
lowerCamelCase : Optional[int] = {'vocab_file': 'spm_char.model'}
lowerCamelCase : List[str] = {
'vocab_file': {
'microsoft/speecht5_asr': 'https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model',
'microsoft/speecht5_tts': 'https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model',
'microsoft/speecht5_vc': 'https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model',
}
}
lowerCamelCase : List[Any] = {
'microsoft/speecht5_asr': 1_0_2_4,
'microsoft/speecht5_tts': 1_0_2_4,
'microsoft/speecht5_vc': 1_0_2_4,
}
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = VOCAB_FILES_NAMES
_snake_case = PRETRAINED_VOCAB_FILES_MAP
_snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case = ["""input_ids""", """attention_mask"""]
def __init__( self , A , A="<s>" , A="</s>" , A="<unk>" , A="<pad>" , A = None , **A , ) -> None:
snake_case : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=A , eos_token=A , unk_token=A , pad_token=A , sp_model_kwargs=self.sp_model_kwargs , **A , )
snake_case : Tuple = vocab_file
snake_case : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(A )
@property
def UpperCAmelCase ( self ) -> List[Any]:
return self.sp_model.get_piece_size()
def UpperCAmelCase ( self ) -> Optional[int]:
snake_case : Any = {self.convert_ids_to_tokens(A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> List[str]:
snake_case : Optional[Any] = self.__dict__.copy()
snake_case : Optional[Any] = None
return state
def __setstate__( self , A ) -> Tuple:
snake_case : Any = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
snake_case : List[Any] = {}
snake_case : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase ( self , A ) -> List[str]:
return self.sp_model.encode(A , out_type=A )
def UpperCAmelCase ( self , A ) -> Tuple:
return self.sp_model.piece_to_id(A )
def UpperCAmelCase ( self , A ) -> int:
snake_case : Union[str, Any] = self.sp_model.IdToPiece(A )
return token
def UpperCAmelCase ( self , A ) -> Tuple:
snake_case : Optional[int] = []
snake_case : str = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(A ) + token
snake_case : Dict = []
else:
current_sub_tokens.append(A )
out_string += self.sp_model.decode(A )
return out_string.strip()
def UpperCAmelCase ( self , A , A=None ) -> List[int]:
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def UpperCAmelCase ( self , A , A = None , A = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A , token_ids_a=A , already_has_special_tokens=A )
snake_case : Any = [1]
if token_ids_a is None:
return ([0] * len(A )) + suffix_ones
return ([0] * len(A )) + ([0] * len(A )) + suffix_ones
def UpperCAmelCase ( self , A , A = None ) -> Tuple[str]:
if not os.path.isdir(A ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case : Optional[Any] = os.path.join(
A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A )
elif not os.path.isfile(self.vocab_file ):
with open(A , """wb""" ) as fi:
snake_case : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(A )
return (out_vocab_file,)
| 684 | 0 |
"""simple docstring"""
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
UpperCAmelCase =get_tests_dir() + "/test_data/fsmt/fsmt_val_data.json"
with io.open(filename, "r", encoding="utf-8") as f:
UpperCAmelCase =json.load(f)
@require_torch
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self ,lowerCamelCase_ ) -> List[str]:
return FSMTTokenizer.from_pretrained(lowerCamelCase_ )
def UpperCamelCase__ ( self ,lowerCamelCase_ ) -> Optional[Any]:
A = FSMTForConditionalGeneration.from_pretrained(lowerCamelCase_ ).to(lowerCamelCase_ )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
["""en-ru""", 26.0],
["""ru-en""", 22.0],
["""en-de""", 22.0],
["""de-en""", 29.0],
] )
@slow
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ) -> Any:
# note: this test is not testing the best performance since it only evals a small batch
# but it should be enough to detect a regression in the output quality
A = f'facebook/wmt19-{pair}'
A = self.get_tokenizer(lowerCamelCase_ )
A = self.get_model(lowerCamelCase_ )
A = bleu_data[pair]["""src"""]
A = bleu_data[pair]["""tgt"""]
A = tokenizer(lowerCamelCase_ ,return_tensors="""pt""" ,truncation=lowerCamelCase_ ,padding="""longest""" ).to(lowerCamelCase_ )
A = model.generate(
input_ids=batch.input_ids ,num_beams=8 ,)
A = tokenizer.batch_decode(
lowerCamelCase_ ,skip_special_tokens=lowerCamelCase_ ,clean_up_tokenization_spaces=lowerCamelCase_ )
A = calculate_bleu(lowerCamelCase_ ,lowerCamelCase_ )
print(lowerCamelCase_ )
self.assertGreaterEqual(scores["""bleu"""] ,lowerCamelCase_ )
| 617 |
"""simple docstring"""
import functools
import gc
import inspect
import torch
from .imports import is_npu_available, is_xpu_available
def _A ( *_a : int ):
"""simple docstring"""
if not isinstance(_a , _a ):
A = list(_a )
for i in range(len(_a ) ):
A = None
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
return objects
def _A ( _a : Exception ):
"""simple docstring"""
A = [
"""CUDA out of memory.""", # CUDA OOM
"""cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.""", # CUDNN SNAFU
"""DefaultCPUAllocator: can't allocate memory""", # CPU OOM
]
if isinstance(_a , _a ) and len(exception.args ) == 1:
return any(err in exception.args[0] for err in _statements )
return False
def _A ( _a : callable = None , _a : int = 1_2_8 ):
"""simple docstring"""
if function is None:
return functools.partial(_a , starting_batch_size=_a )
A = starting_batch_size
def decorator(*_a : Union[str, Any] , **_a : List[Any] ):
nonlocal batch_size
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
A = list(inspect.signature(_a ).parameters.keys() )
# Guard against user error
if len(_a ) < (len(_a ) + 1):
A = """, """.join([f'{arg}={value}' for arg, value in zip(params[1:] , args[1:] )] )
raise TypeError(
f'Batch size was passed into `{function.__name__}` as the first argument when called.'
f'Remove this as the decorator already does so: `{function.__name__}({arg_str})`' )
while True:
if batch_size == 0:
raise RuntimeError("""No executable batch size found, reached zero.""" )
try:
return function(_a , *_a , **_a )
except Exception as e:
if should_reduce_batch_size(_a ):
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
batch_size //= 2
else:
raise
return decorator
| 617 | 1 |
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __snake_case :
def __init__( self, A, A=3, A=32, A=3, A=10, A=[10, 20, 30, 40], A=[1, 1, 2, 1], A=True, A=True, A="relu", A=3, A=None, ):
"""simple docstring"""
lowerCamelCase : str = parent
lowerCamelCase : Optional[Any] = batch_size
lowerCamelCase : Optional[Any] = image_size
lowerCamelCase : Optional[int] = num_channels
lowerCamelCase : List[Any] = embeddings_size
lowerCamelCase : Optional[int] = hidden_sizes
lowerCamelCase : int = depths
lowerCamelCase : List[str] = is_training
lowerCamelCase : Tuple = use_labels
lowerCamelCase : Optional[int] = hidden_act
lowerCamelCase : Optional[Any] = num_labels
lowerCamelCase : Dict = scope
lowerCamelCase : Optional[int] = len(__lowercase )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase : Optional[int] = None
if self.use_labels:
lowerCamelCase : Any = ids_tensor([self.batch_size], self.num_labels )
lowerCamelCase : List[str] = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return ResNetConfig(
num_channels=self.num_channels, embeddings_size=self.embeddings_size, hidden_sizes=self.hidden_sizes, depths=self.depths, hidden_act=self.hidden_act, num_labels=self.num_labels, image_size=self.image_size, )
def UpperCAmelCase_ ( self, A, A, A ):
"""simple docstring"""
lowerCamelCase : Tuple = TFResNetModel(config=__lowercase )
lowerCamelCase : Any = model(__lowercase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32), )
def UpperCAmelCase_ ( self, A, A, A ):
"""simple docstring"""
lowerCamelCase : str = self.num_labels
lowerCamelCase : List[str] = TFResNetForImageClassification(__lowercase )
lowerCamelCase : Optional[Any] = model(__lowercase, labels=__lowercase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = self.prepare_config_and_inputs()
lowerCamelCase , lowerCamelCase , lowerCamelCase : Optional[Any] = config_and_inputs
lowerCamelCase : str = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class __snake_case ( lowercase__ , lowercase__ , unittest.TestCase):
_lowerCAmelCase = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
_lowerCAmelCase = (
{"""feature-extraction""": TFResNetModel, """image-classification""": TFResNetForImageClassification}
if is_tf_available()
else {}
)
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : Tuple = TFResNetModelTester(self )
lowerCamelCase : Optional[Any] = ConfigTester(self, config_class=__lowercase, has_text_modality=__lowercase )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return
@unittest.skip(reason='ResNet does not use inputs_embeds' )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='ResNet does not support input and output embeddings' )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
pass
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase , lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase : int = model_class(__lowercase )
lowerCamelCase : str = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase : int = [*signature.parameters.keys()]
lowerCamelCase : Union[str, Any] = ['pixel_values']
self.assertListEqual(arg_names[:1], __lowercase )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
def check_hidden_states_output(A, A, A ):
lowerCamelCase : Dict = model_class(__lowercase )
lowerCamelCase : Union[str, Any] = model(**self._prepare_for_class(__lowercase, __lowercase ) )
lowerCamelCase : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCamelCase : Tuple = self.model_tester.num_stages
self.assertEqual(len(__lowercase ), expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ), [self.model_tester.image_size // 4, self.model_tester.image_size // 4], )
lowerCamelCase , lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase : str = ['basic', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowerCamelCase : Any = layer_type
lowerCamelCase : Dict = True
check_hidden_states_output(__lowercase, __lowercase, __lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase : Dict = True
check_hidden_states_output(__lowercase, __lowercase, __lowercase )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowercase )
@slow
def UpperCAmelCase_ ( self ):
"""simple docstring"""
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase : str = TFResNetModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
def UpperCAmelCase ( ):
lowerCamelCase : Any = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
return image
@require_tf
@require_vision
class __snake_case ( unittest.TestCase):
@cached_property
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : Dict = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
lowerCamelCase : str = self.default_image_processor
lowerCamelCase : List[Any] = prepare_img()
lowerCamelCase : Any = image_processor(images=__lowercase, return_tensors='tf' )
# forward pass
lowerCamelCase : List[str] = model(**__lowercase )
# verify the logits
lowerCamelCase : Optional[int] = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape, __lowercase )
lowerCamelCase : List[Any] = tf.constant([-11.1069, -9.7877, -8.3777] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy(), __lowercase, atol=1e-4 ) )
| 715 |
'''simple docstring'''
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
A = {
'E': 12.70,
'T': 9.06,
'A': 8.17,
'O': 7.51,
'I': 6.97,
'N': 6.75,
'S': 6.33,
'H': 6.09,
'R': 5.99,
'D': 4.25,
'L': 4.03,
'C': 2.78,
'U': 2.76,
'M': 2.41,
'W': 2.36,
'F': 2.23,
'G': 2.02,
'Y': 1.97,
'P': 1.93,
'B': 1.29,
'V': 0.98,
'K': 0.77,
'J': 0.15,
'X': 0.15,
'Q': 0.10,
'Z': 0.07,
}
A = 'ETAOINSHRDLCUMWFGYPBVKJXQZ'
A = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
def UpperCAmelCase ( UpperCAmelCase__ : str):
lowerCamelCase : Optional[Any] = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def UpperCAmelCase ( UpperCAmelCase__ : tuple):
return x[0]
def UpperCAmelCase ( UpperCAmelCase__ : str):
lowerCamelCase : Optional[Any] = get_letter_count(UpperCAmelCase__)
lowerCamelCase : dict[int, list[str]] = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(UpperCAmelCase__)
lowerCamelCase : dict[int, str] = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find , reverse=UpperCAmelCase__)
lowerCamelCase : int = ''.join(freq_to_letter[freq])
lowerCamelCase : Any = list(freq_to_letter_str.items())
freq_pairs.sort(key=UpperCAmelCase__ , reverse=UpperCAmelCase__)
lowerCamelCase : list[str] = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(UpperCAmelCase__)
def UpperCAmelCase ( UpperCAmelCase__ : str):
lowerCamelCase : Optional[Any] = get_frequency_order(UpperCAmelCase__)
lowerCamelCase : Optional[Any] = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 449 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
SCREAMING_SNAKE_CASE: Union[str, Any] = {
'''configuration_layoutlmv3''': [
'''LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''LayoutLMv3Config''',
'''LayoutLMv3OnnxConfig''',
],
'''processing_layoutlmv3''': ['''LayoutLMv3Processor'''],
'''tokenization_layoutlmv3''': ['''LayoutLMv3Tokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE: Optional[int] = ['''LayoutLMv3TokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE: int = [
'''LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LayoutLMv3ForQuestionAnswering''',
'''LayoutLMv3ForSequenceClassification''',
'''LayoutLMv3ForTokenClassification''',
'''LayoutLMv3Model''',
'''LayoutLMv3PreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE: Optional[Any] = [
'''TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFLayoutLMv3ForQuestionAnswering''',
'''TFLayoutLMv3ForSequenceClassification''',
'''TFLayoutLMv3ForTokenClassification''',
'''TFLayoutLMv3Model''',
'''TFLayoutLMv3PreTrainedModel''',
]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE: Dict = ['''LayoutLMv3FeatureExtractor''']
SCREAMING_SNAKE_CASE: Any = ['''LayoutLMv3ImageProcessor''']
if TYPE_CHECKING:
from .configuration_layoutlmva import (
LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP,
LayoutLMvaConfig,
LayoutLMvaOnnxConfig,
)
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_layoutlmva import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
TFLayoutLMvaPreTrainedModel,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
else:
import sys
SCREAMING_SNAKE_CASE: List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 360 |
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class lowercase_ :
lowerCAmelCase__ =42 # [batch_size x 3]
lowerCAmelCase__ =42 # [batch_size x 3]
lowerCAmelCase__ =42 # [batch_size x 3]
lowerCAmelCase__ =42 # [batch_size x 3]
lowerCAmelCase__ =42
lowerCAmelCase__ =42
lowerCAmelCase__ =42
lowerCAmelCase__ =42
lowerCAmelCase__ =42
def __a ( self : Optional[Any] ):
"""simple docstring"""
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def __a ( self : Union[str, Any] ):
"""simple docstring"""
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def __a ( self : Optional[int] ):
"""simple docstring"""
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def __a ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = torch.arange(self.height * self.width )
SCREAMING_SNAKE_CASE_ = torch.stack(
[
pixel_indices % self.width,
torch.div(snake_case__ , self.width , rounding_mode='trunc' ),
] , axis=1 , )
return coords
@property
def __a ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ = self.shape
SCREAMING_SNAKE_CASE_ = int(np.prod(snake_case__ ) )
SCREAMING_SNAKE_CASE_ = self.get_image_coords()
SCREAMING_SNAKE_CASE_ = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
SCREAMING_SNAKE_CASE_ = self.get_camera_rays(snake_case__ )
SCREAMING_SNAKE_CASE_ = rays.view(snake_case__ , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def __a ( self : Optional[Any] , snake_case__ : torch.Tensor ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
SCREAMING_SNAKE_CASE_ = coords.view(snake_case__ , -1 , 2 )
SCREAMING_SNAKE_CASE_ = self.resolution()
SCREAMING_SNAKE_CASE_ = self.fov()
SCREAMING_SNAKE_CASE_ = (flat.float() / (res - 1)) * 2 - 1
SCREAMING_SNAKE_CASE_ = fracs * torch.tan(fov / 2 )
SCREAMING_SNAKE_CASE_ = fracs.view(snake_case__ , -1 , 2 )
SCREAMING_SNAKE_CASE_ = (
self.z.view(snake_case__ , 1 , 3 )
+ self.x.view(snake_case__ , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(snake_case__ , 1 , 3 ) * fracs[:, :, 1:]
)
SCREAMING_SNAKE_CASE_ = directions / directions.norm(dim=-1 , keepdim=snake_case__ )
SCREAMING_SNAKE_CASE_ = torch.stack(
[
torch.broadcast_to(self.origin.view(snake_case__ , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(snake_case__ , *snake_case__ , 2 , 3 )
def __a ( self : Optional[int] , snake_case__ : int , snake_case__ : int ):
"""simple docstring"""
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=snake_case__ , height=snake_case__ , x_fov=self.x_fov , y_fov=self.y_fov , )
def _a ( lowerCAmelCase )-> DifferentiableProjectiveCamera:
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = []
for theta in np.linspace(0 , 2 * np.pi , num=20 ):
SCREAMING_SNAKE_CASE_ = np.array([np.sin(lowerCAmelCase ), np.cos(lowerCAmelCase ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
SCREAMING_SNAKE_CASE_ = -z * 4
SCREAMING_SNAKE_CASE_ = np.array([np.cos(lowerCAmelCase ), -np.sin(lowerCAmelCase ), 0.0] )
SCREAMING_SNAKE_CASE_ = np.cross(lowerCAmelCase , lowerCAmelCase )
origins.append(lowerCAmelCase )
xs.append(lowerCAmelCase )
ys.append(lowerCAmelCase )
zs.append(lowerCAmelCase )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(lowerCAmelCase , axis=0 ) ).float() , x=torch.from_numpy(np.stack(lowerCAmelCase , axis=0 ) ).float() , y=torch.from_numpy(np.stack(lowerCAmelCase , axis=0 ) ).float() , z=torch.from_numpy(np.stack(lowerCAmelCase , axis=0 ) ).float() , width=lowerCAmelCase , height=lowerCAmelCase , x_fov=0.7 , y_fov=0.7 , shape=(1, len(lowerCAmelCase )) , ) | 360 | 1 |
def _lowercase ( UpperCAmelCase_ = 1_000_000):
"""simple docstring"""
snake_case__ : str = limit + 1
snake_case__ : str = [0] * limit
for first_term in range(1 , UpperCAmelCase_):
for n in range(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_):
snake_case__ : Tuple = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
snake_case__ : int = sum(1 for x in frequency[1:limit] if x == 10)
return count
if __name__ == "__main__":
print(F"""{solution() = }""")
| 127 |
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
lowercase_: Optional[int] = 'src/diffusers'
lowercase_: List[str] = '.'
# This is to make sure the diffusers module imported is the one in the repo.
lowercase_: List[str] = importlib.util.spec_from_file_location(
'diffusers',
os.path.join(DIFFUSERS_PATH, '__init__.py'),
submodule_search_locations=[DIFFUSERS_PATH],
)
lowercase_: Optional[int] = spec.loader.load_module()
def _lowercase ( UpperCAmelCase_ , UpperCAmelCase_):
"""simple docstring"""
return line.startswith(UpperCAmelCase_) or len(UpperCAmelCase_) <= 1 or re.search(R"""^\s*\)(\s*->.*:|:)\s*$""" , UpperCAmelCase_) is not None
def _lowercase ( UpperCAmelCase_):
"""simple docstring"""
snake_case__ : int = object_name.split(""".""")
snake_case__ : int = 0
# First let's find the module where our object lives.
snake_case__ : Tuple = parts[i]
while i < len(UpperCAmelCase_) and not os.path.isfile(os.path.join(UpperCAmelCase_ , F'{module}.py')):
i += 1
if i < len(UpperCAmelCase_):
snake_case__ : List[str] = os.path.join(UpperCAmelCase_ , parts[i])
if i >= len(UpperCAmelCase_):
raise ValueError(F'`object_name` should begin with the name of a module of diffusers but got {object_name}.')
with open(os.path.join(UpperCAmelCase_ , F'{module}.py') , """r""" , encoding="""utf-8""" , newline="""\n""") as f:
snake_case__ : Any = f.readlines()
# Now let's find the class / func in the code!
snake_case__ : Optional[Any] = """"""
snake_case__ : Dict = 0
for name in parts[i + 1 :]:
while (
line_index < len(UpperCAmelCase_) and re.search(RF'^{indent}(class|def)\s+{name}(\(|\:)' , lines[line_index]) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(UpperCAmelCase_):
raise ValueError(F' {object_name} does not match any function or class in {module}.')
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
snake_case__ : Dict = line_index
while line_index < len(UpperCAmelCase_) and _should_continue(lines[line_index] , UpperCAmelCase_):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1]) <= 1:
line_index -= 1
snake_case__ : int = lines[start_index:line_index]
return "".join(UpperCAmelCase_)
lowercase_: str = re.compile(r'^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)')
lowercase_: List[str] = re.compile(r'^\s*(\S+)->(\S+)(\s+.*|$)')
lowercase_: Tuple = re.compile(r'<FILL\s+[^>]*>')
def _lowercase ( UpperCAmelCase_):
"""simple docstring"""
snake_case__ : List[Any] = code.split("""\n""")
snake_case__ : Any = 0
while idx < len(UpperCAmelCase_) and len(lines[idx]) == 0:
idx += 1
if idx < len(UpperCAmelCase_):
return re.search(R"""^(\s*)\S""" , lines[idx]).groups()[0]
return ""
def _lowercase ( UpperCAmelCase_):
"""simple docstring"""
snake_case__ : str = len(get_indent(UpperCAmelCase_)) > 0
if has_indent:
snake_case__ : Optional[Any] = F'class Bla:\n{code}'
snake_case__ : Union[str, Any] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 , preview=UpperCAmelCase_)
snake_case__ : Union[str, Any] = black.format_str(UpperCAmelCase_ , mode=UpperCAmelCase_)
snake_case__ , snake_case__ : Union[str, Any] = style_docstrings_in_code(UpperCAmelCase_)
return result[len("""class Bla:\n""") :] if has_indent else result
def _lowercase ( UpperCAmelCase_ , UpperCAmelCase_=False):
"""simple docstring"""
with open(UpperCAmelCase_ , """r""" , encoding="""utf-8""" , newline="""\n""") as f:
snake_case__ : Dict = f.readlines()
snake_case__ : Dict = []
snake_case__ : str = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(UpperCAmelCase_):
snake_case__ : Union[str, Any] = _re_copy_warning.search(lines[line_index])
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
snake_case__ , snake_case__ , snake_case__ : Tuple = search.groups()
snake_case__ : Optional[int] = find_code_in_diffusers(UpperCAmelCase_)
snake_case__ : Any = get_indent(UpperCAmelCase_)
snake_case__ : Any = line_index + 1 if indent == theoretical_indent else line_index + 2
snake_case__ : Optional[int] = theoretical_indent
snake_case__ : int = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
snake_case__ : Any = True
while line_index < len(UpperCAmelCase_) and should_continue:
line_index += 1
if line_index >= len(UpperCAmelCase_):
break
snake_case__ : Any = lines[line_index]
snake_case__ : Tuple = _should_continue(UpperCAmelCase_ , UpperCAmelCase_) and re.search(F'^{indent}# End copy' , UpperCAmelCase_) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1]) <= 1:
line_index -= 1
snake_case__ : Union[str, Any] = lines[start_index:line_index]
snake_case__ : Any = """""".join(UpperCAmelCase_)
# Remove any nested `Copied from` comments to avoid circular copies
snake_case__ : Union[str, Any] = [line for line in theoretical_code.split("""\n""") if _re_copy_warning.search(UpperCAmelCase_) is None]
snake_case__ : Optional[Any] = """\n""".join(UpperCAmelCase_)
# Before comparing, use the `replace_pattern` on the original code.
if len(UpperCAmelCase_) > 0:
snake_case__ : List[Any] = replace_pattern.replace("""with""" , """""").split(""",""")
snake_case__ : int = [_re_replace_pattern.search(UpperCAmelCase_) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
snake_case__ , snake_case__ , snake_case__ : List[str] = pattern.groups()
snake_case__ : str = re.sub(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
if option.strip() == "all-casing":
snake_case__ : Tuple = re.sub(obja.lower() , obja.lower() , UpperCAmelCase_)
snake_case__ : Tuple = re.sub(obja.upper() , obja.upper() , UpperCAmelCase_)
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
snake_case__ : Optional[int] = blackify(lines[start_index - 1] + theoretical_code)
snake_case__ : Dict = theoretical_code[len(lines[start_index - 1]) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index])
if overwrite:
snake_case__ : str = lines[:start_index] + [theoretical_code] + lines[line_index:]
snake_case__ : str = start_index + 1
if overwrite and len(UpperCAmelCase_) > 0:
# Warn the user a file has been modified.
print(F'Detected changes, rewriting {filename}.')
with open(UpperCAmelCase_ , """w""" , encoding="""utf-8""" , newline="""\n""") as f:
f.writelines(UpperCAmelCase_)
return diffs
def _lowercase ( UpperCAmelCase_ = False):
"""simple docstring"""
snake_case__ : int = glob.glob(os.path.join(UpperCAmelCase_ , """**/*.py""") , recursive=UpperCAmelCase_)
snake_case__ : int = []
for filename in all_files:
snake_case__ : Tuple = is_copy_consistent(UpperCAmelCase_ , UpperCAmelCase_)
diffs += [F'- {filename}: copy does not match {d[0]} at line {d[1]}' for d in new_diffs]
if not overwrite and len(UpperCAmelCase_) > 0:
snake_case__ : Tuple = """\n""".join(UpperCAmelCase_)
raise Exception(
"""Found the following copy inconsistencies:\n"""
+ diff
+ """\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.""")
if __name__ == "__main__":
lowercase_: Optional[Any] = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
lowercase_: Optional[Any] = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 127 | 1 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class _lowercase ( __lowercase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : str = TextToVideoSDPipeline
_SCREAMING_SNAKE_CASE : Tuple = TEXT_TO_IMAGE_PARAMS
_SCREAMING_SNAKE_CASE : List[str] = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
_SCREAMING_SNAKE_CASE : List[str] = frozenset(
[
"num_inference_steps",
"generator",
"latents",
"return_dict",
"callback",
"callback_steps",
] )
def a ( self : Union[str, Any] ) -> Dict:
torch.manual_seed(0 )
__snake_case = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'DownBlock3D') , up_block_types=('UpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D') , cross_attention_dim=32 , attention_head_dim=4 , )
__snake_case = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , clip_sample=SCREAMING_SNAKE_CASE_ , set_alpha_to_one=SCREAMING_SNAKE_CASE_ , )
torch.manual_seed(0 )
__snake_case = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
__snake_case = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='gelu' , projection_dim=512 , )
__snake_case = CLIPTextModel(SCREAMING_SNAKE_CASE_ )
__snake_case = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
__snake_case = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
}
return components
def a ( self : Any , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0 ) -> Union[str, Any]:
if str(SCREAMING_SNAKE_CASE_ ).startswith('mps' ):
__snake_case = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
else:
__snake_case = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
__snake_case = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'pt',
}
return inputs
def a ( self : Any ) -> Union[str, Any]:
__snake_case = 'cpu' # ensure determinism for the device-dependent torch.Generator
__snake_case = self.get_dummy_components()
__snake_case = TextToVideoSDPipeline(**SCREAMING_SNAKE_CASE_ )
__snake_case = sd_pipe.to(SCREAMING_SNAKE_CASE_ )
sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
__snake_case = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
__snake_case = 'np'
__snake_case = sd_pipe(**SCREAMING_SNAKE_CASE_ ).frames
__snake_case = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
__snake_case = np.array([1_5_8.0, 1_6_0.0, 1_5_3.0, 1_2_5.0, 1_0_0.0, 1_2_1.0, 1_1_1.0, 9_3.0, 1_1_3.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def a ( self : Optional[int] ) -> str:
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=SCREAMING_SNAKE_CASE_ , expected_max_diff=3e-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def a ( self : Any ) -> int:
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=SCREAMING_SNAKE_CASE_ , expected_max_diff=1e-2 )
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def a ( self : List[str] ) -> Optional[int]:
pass
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def a ( self : Optional[int] ) -> Any:
pass
@unittest.skip(reason='`num_images_per_prompt` argument is not supported for this pipeline.' )
def a ( self : List[Any] ) -> List[str]:
pass
def a ( self : Dict ) -> Optional[int]:
return super().test_progress_bar()
@slow
@skip_mps
class _lowercase ( unittest.TestCase ):
def a ( self : List[Any] ) -> str:
__snake_case = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy' )
__snake_case = TextToVideoSDPipeline.from_pretrained('damo-vilab/text-to-video-ms-1.7b' )
__snake_case = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
__snake_case = pipe.to('cuda' )
__snake_case = 'Spiderman is surfing'
__snake_case = torch.Generator(device='cpu' ).manual_seed(0 )
__snake_case = pipe(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , num_inference_steps=25 , output_type='pt' ).frames
__snake_case = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
def a ( self : Dict ) -> Any:
__snake_case = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy' )
__snake_case = TextToVideoSDPipeline.from_pretrained('damo-vilab/text-to-video-ms-1.7b' )
__snake_case = pipe.to('cuda' )
__snake_case = 'Spiderman is surfing'
__snake_case = torch.Generator(device='cpu' ).manual_seed(0 )
__snake_case = pipe(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , num_inference_steps=2 , output_type='pt' ).frames
__snake_case = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
| 56 |
"""simple docstring"""
import os
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_doctest_list.py
snake_case_ : Any = """."""
if __name__ == "__main__":
snake_case_ : List[str] = os.path.join(REPO_PATH, """utils/documentation_tests.txt""")
snake_case_ : Any = []
snake_case_ : Tuple = []
with open(doctest_file_path) as fp:
for line in fp:
snake_case_ : List[Any] = line.strip()
snake_case_ : List[Any] = os.path.join(REPO_PATH, line)
if not (os.path.isfile(path) or os.path.isdir(path)):
non_existent_paths.append(line)
all_paths.append(path)
if len(non_existent_paths) > 0:
snake_case_ : Union[str, Any] = """\n""".join(non_existent_paths)
raise ValueError(f'''`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}''')
if all_paths != sorted(all_paths):
raise ValueError("""Files in `utils/documentation_tests.txt` are not in alphabetical order.""")
| 595 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
lowercase_ : List[str] = {'''configuration_speech_encoder_decoder''': ['''SpeechEncoderDecoderConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : int = ['''SpeechEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Dict = ['''FlaxSpeechEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
lowercase_ : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 706 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase_ : Optional[Any] = {
'''configuration_roc_bert''': ['''ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RoCBertConfig'''],
'''tokenization_roc_bert''': ['''RoCBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Tuple = [
'''ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RoCBertForCausalLM''',
'''RoCBertForMaskedLM''',
'''RoCBertForMultipleChoice''',
'''RoCBertForPreTraining''',
'''RoCBertForQuestionAnswering''',
'''RoCBertForSequenceClassification''',
'''RoCBertForTokenClassification''',
'''RoCBertLayer''',
'''RoCBertModel''',
'''RoCBertPreTrainedModel''',
'''load_tf_weights_in_roc_bert''',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
lowercase_ : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 652 | 0 |
'''simple docstring'''
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
__lowerCamelCase : List[str] = logging.get_logger(__name__)
class UpperCAmelCase ( _lowercase ):
UpperCAmelCase : Tuple = '''vision-encoder-decoder'''
UpperCAmelCase : List[Any] = True
def __init__(self : List[Any] , **A__ : List[str] ) -> Optional[int]:
super().__init__(**A__ )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
f'A configuraton of type {self.model_type} cannot be instantiated because '
f'not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}' )
lowercase = kwargs.pop("encoder" )
lowercase = encoder_config.pop("model_type" )
lowercase = kwargs.pop("decoder" )
lowercase = decoder_config.pop("model_type" )
lowercase = AutoConfig.for_model(A__ , **A__ )
lowercase = AutoConfig.for_model(A__ , **A__ )
lowercase = True
@classmethod
def UpperCAmelCase__ (cls : int , A__ : PretrainedConfig , A__ : PretrainedConfig , **A__ : str ) -> PretrainedConfig:
logger.info("Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config" )
lowercase = True
lowercase = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **A__ )
def UpperCAmelCase__ (self : List[Any] ) -> Union[str, Any]:
lowercase = copy.deepcopy(self.__dict__ )
lowercase = self.encoder.to_dict()
lowercase = self.decoder.to_dict()
lowercase = self.__class__.model_type
return output
class UpperCAmelCase ( _lowercase ):
UpperCAmelCase : Dict = version.parse('''1.11''' )
@property
def UpperCAmelCase__ (self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def UpperCAmelCase__ (self : List[Any] ) -> float:
return 1e-4
@property
def UpperCAmelCase__ (self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict({"last_hidden_state": {0: "batch", 1: "encoder_sequence"}} )
class UpperCAmelCase ( _lowercase ):
@property
def UpperCAmelCase__ (self : Dict ) -> Mapping[str, Mapping[int, str]]:
lowercase = OrderedDict()
lowercase = {0: "batch", 1: "past_decoder_sequence + sequence"}
lowercase = {0: "batch", 1: "past_decoder_sequence + sequence"}
lowercase = {0: "batch", 1: "encoder_sequence"}
return common_inputs
def UpperCAmelCase__ (self : Any , A__ : "PreTrainedTokenizerBase" , A__ : int = -1 , A__ : int = -1 , A__ : bool = False , A__ : Optional["TensorType"] = None , ) -> Mapping[str, Any]:
import torch
lowercase = OrderedDict()
lowercase = super().generate_dummy_inputs(
A__ , batch_size=A__ , seq_length=A__ , is_pair=A__ , framework=A__ )
lowercase , lowercase = dummy_input["input_ids"].shape
lowercase = (batch, encoder_sequence, self._config.encoder_hidden_size)
lowercase = dummy_input.pop("input_ids" )
lowercase = dummy_input.pop("attention_mask" )
lowercase = torch.zeros(A__ )
return common_inputs
class UpperCAmelCase ( _lowercase ):
@property
def UpperCAmelCase__ (self : Any ) -> None:
pass
def UpperCAmelCase__ (self : List[str] , A__ : PretrainedConfig ) -> OnnxConfig:
return VisionEncoderDecoderEncoderOnnxConfig(A__ )
def UpperCAmelCase__ (self : List[Any] , A__ : PretrainedConfig , A__ : PretrainedConfig , A__ : str = "default" ) -> OnnxConfig:
lowercase = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(A__ , A__ )
| 310 |
'''simple docstring'''
from collections.abc import Generator
def UpperCAmelCase_ ( ):
"""simple docstring"""
lowercase , lowercase = 0, 1
while True:
lowercase , lowercase = b, a + b
yield b
def UpperCAmelCase_ ( lowerCAmelCase_ = 1000 ):
"""simple docstring"""
lowercase = 1
lowercase = fibonacci_generator()
while len(str(next(lowerCAmelCase_ ) ) ) < n:
answer += 1
return answer + 1
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 310 | 1 |
'''simple docstring'''
import unittest
from knapsack import greedy_knapsack as kp
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCamelCase ( self : Dict):
'''simple docstring'''
__lowercase =[1_0, 2_0, 3_0, 4_0, 5_0, 6_0]
__lowercase =[2, 4, 6, 8, 1_0, 1_2]
__lowercase =1_0_0
self.assertEqual(kp.calc_profit(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase) , 2_1_0)
def __lowerCamelCase ( self : int):
'''simple docstring'''
self.assertRaisesRegex(_lowerCAmelCase , 'max_weight must greater than zero.')
def __lowerCamelCase ( self : Any):
'''simple docstring'''
self.assertRaisesRegex(_lowerCAmelCase , 'Weight can not be negative.')
def __lowerCamelCase ( self : int):
'''simple docstring'''
self.assertRaisesRegex(_lowerCAmelCase , 'Profit can not be negative.')
def __lowerCamelCase ( self : Dict):
'''simple docstring'''
self.assertRaisesRegex(_lowerCAmelCase , 'max_weight must greater than zero.')
def __lowerCamelCase ( self : int):
'''simple docstring'''
self.assertRaisesRegex(
_lowerCAmelCase , 'The length of profit and weight must be same.')
if __name__ == "__main__":
unittest.main()
| 454 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _UpperCamelCase ( A ):
'''simple docstring'''
lowerCAmelCase__ = ["""image_processor""", """tokenizer"""]
lowerCAmelCase__ = """ViltImageProcessor"""
lowerCAmelCase__ = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self : str , _lowerCAmelCase : Optional[Any]=None , _lowerCAmelCase : int=None , **_lowerCAmelCase : Optional[Any]):
'''simple docstring'''
__lowercase =None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , _lowerCAmelCase , )
__lowercase =kwargs.pop('feature_extractor')
__lowercase =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.')
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.')
super().__init__(_lowerCAmelCase , _lowerCAmelCase)
__lowercase =self.image_processor
def __call__( self : List[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , _lowerCAmelCase : bool = True , _lowerCAmelCase : Union[bool, str, PaddingStrategy] = False , _lowerCAmelCase : Union[bool, str, TruncationStrategy] = None , _lowerCAmelCase : Optional[int] = None , _lowerCAmelCase : int = 0 , _lowerCAmelCase : Optional[int] = None , _lowerCAmelCase : Optional[bool] = None , _lowerCAmelCase : Optional[bool] = None , _lowerCAmelCase : bool = False , _lowerCAmelCase : bool = False , _lowerCAmelCase : bool = False , _lowerCAmelCase : bool = False , _lowerCAmelCase : bool = True , _lowerCAmelCase : Optional[Union[str, TensorType]] = None , **_lowerCAmelCase : Any , ):
'''simple docstring'''
__lowercase =self.tokenizer(
text=_lowerCAmelCase , add_special_tokens=_lowerCAmelCase , padding=_lowerCAmelCase , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase , stride=_lowerCAmelCase , pad_to_multiple_of=_lowerCAmelCase , return_token_type_ids=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , return_overflowing_tokens=_lowerCAmelCase , return_special_tokens_mask=_lowerCAmelCase , return_offsets_mapping=_lowerCAmelCase , return_length=_lowerCAmelCase , verbose=_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase , )
# add pixel_values + pixel_mask
__lowercase =self.image_processor(_lowerCAmelCase , return_tensors=_lowerCAmelCase)
encoding.update(_lowerCAmelCase)
return encoding
def __lowerCamelCase ( self : List[Any] , *_lowerCAmelCase : str , **_lowerCAmelCase : Union[str, Any]):
'''simple docstring'''
return self.tokenizer.batch_decode(*_lowerCAmelCase , **_lowerCAmelCase)
def __lowerCamelCase ( self : Union[str, Any] , *_lowerCAmelCase : str , **_lowerCAmelCase : Optional[Any]):
'''simple docstring'''
return self.tokenizer.decode(*_lowerCAmelCase , **_lowerCAmelCase)
@property
def __lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__lowercase =self.tokenizer.model_input_names
__lowercase =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
@property
def __lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , _lowerCAmelCase , )
return self.image_processor_class
@property
def __lowerCamelCase ( self : Tuple):
'''simple docstring'''
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , _lowerCAmelCase , )
return self.image_processor
| 454 | 1 |
"""simple docstring"""
import numpy as np
from cva import destroyAllWindows, imread, imshow, waitKey
class __UpperCamelCase :
def __init__( self : str , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : int , UpperCAmelCase : int ) -> Tuple:
if dst_width < 0 or dst_height < 0:
raise ValueError('Destination width/height should be > 0' )
lowerCAmelCase :int = img
lowerCAmelCase :Optional[Any] = img.shape[1]
lowerCAmelCase :str = img.shape[0]
lowerCAmelCase :str = dst_width
lowerCAmelCase :int = dst_height
lowerCAmelCase :Any = self.src_w / self.dst_w
lowerCAmelCase :str = self.src_h / self.dst_h
lowerCAmelCase :Tuple = (
np.ones((self.dst_h, self.dst_w, 3) , np.uinta ) * 255
)
def UpperCAmelCase__ ( self : Optional[int] ) -> str:
for i in range(self.dst_h ):
for j in range(self.dst_w ):
lowerCAmelCase :Dict = self.img[self.get_y(__lowerCamelCase )][self.get_x(__lowerCamelCase )]
def UpperCAmelCase__ ( self : Union[str, Any] , UpperCAmelCase : int ) -> Optional[Any]:
return int(self.ratio_x * x )
def UpperCAmelCase__ ( self : List[Any] , UpperCAmelCase : int ) -> List[Any]:
return int(self.ratio_y * y )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE = 8_00, 6_00
__SCREAMING_SNAKE_CASE = imread('image_data/lena.jpg', 1)
__SCREAMING_SNAKE_CASE = NearestNeighbour(im, dst_w, dst_h)
n.process()
imshow(
F"""Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}""", n.output
)
waitKey(0)
destroyAllWindows() | 553 |
"""simple docstring"""
import operator
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_ = False , lowerCAmelCase_ = None ) -> list:
_snake_case = operator.lt if reverse else operator.gt
_snake_case = solution or []
if not arr:
return solution
_snake_case = [arr.pop(0 )]
for i, item in enumerate(lowerCAmelCase_ ):
if _operator(lowerCAmelCase_ , sublist[-1] ):
sublist.append(lowerCAmelCase_ )
arr.pop(lowerCAmelCase_ )
# merging sublist into solution list
if not solution:
solution.extend(lowerCAmelCase_ )
else:
while sublist:
_snake_case = sublist.pop(0 )
for i, xx in enumerate(lowerCAmelCase_ ):
if not _operator(lowerCAmelCase_ , lowerCAmelCase_ ):
solution.insert(lowerCAmelCase_ , lowerCAmelCase_ )
break
else:
solution.append(lowerCAmelCase_ )
strand_sort(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 103 | 0 |
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''vocab_file''': '''vocab.txt''',
'''merges_file''': '''bpe.codes''',
}
lowerCAmelCase__ = {
'''vocab_file''': {
'''vinai/phobert-base''': '''https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt''',
'''vinai/phobert-large''': '''https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt''',
},
'''merges_file''': {
'''vinai/phobert-base''': '''https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes''',
'''vinai/phobert-large''': '''https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes''',
},
}
lowerCAmelCase__ = {
'''vinai/phobert-base''': 2_5_6,
'''vinai/phobert-large''': 2_5_6,
}
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : int = set()
lowercase__ : Optional[int] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowercase__ : str = char
lowercase__ : int = set(lowerCamelCase__ )
return pairs
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
lowercase_ = VOCAB_FILES_NAMES
lowercase_ = PRETRAINED_VOCAB_FILES_MAP
lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[int]="<s>" , SCREAMING_SNAKE_CASE : List[str]="</s>" , SCREAMING_SNAKE_CASE : Union[str, Any]="</s>" , SCREAMING_SNAKE_CASE : Any="<s>" , SCREAMING_SNAKE_CASE : Dict="<unk>" , SCREAMING_SNAKE_CASE : Dict="<pad>" , SCREAMING_SNAKE_CASE : Union[str, Any]="<mask>" , **SCREAMING_SNAKE_CASE : List[Any] , ):
super().__init__(
bos_token=SCREAMING_SNAKE_CASE , eos_token=SCREAMING_SNAKE_CASE , unk_token=SCREAMING_SNAKE_CASE , sep_token=SCREAMING_SNAKE_CASE , cls_token=SCREAMING_SNAKE_CASE , pad_token=SCREAMING_SNAKE_CASE , mask_token=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
lowercase__ : Any = vocab_file
lowercase__ : Tuple = merges_file
lowercase__ : Dict = {}
lowercase__ : Any = 0
lowercase__ : int = 1
lowercase__ : Union[str, Any] = 2
lowercase__ : Union[str, Any] = 3
self.add_from_file(SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = {v: k for k, v in self.encoder.items()}
with open(SCREAMING_SNAKE_CASE , encoding="utf-8" ) as merges_handle:
lowercase__ : Dict = merges_handle.read().split("\n" )[:-1]
lowercase__ : Union[str, Any] = [tuple(merge.split()[:-1] ) for merge in merges]
lowercase__ : Any = dict(zip(SCREAMING_SNAKE_CASE , range(len(SCREAMING_SNAKE_CASE ) ) ) )
lowercase__ : Union[str, Any] = {}
def snake_case ( self : Dict , SCREAMING_SNAKE_CASE : List[int] , SCREAMING_SNAKE_CASE : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase__ : Dict = [self.cls_token_id]
lowercase__ : Dict = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE : List[int] , SCREAMING_SNAKE_CASE : Optional[List[int]] = None , SCREAMING_SNAKE_CASE : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE , token_ids_a=SCREAMING_SNAKE_CASE , already_has_special_tokens=SCREAMING_SNAKE_CASE )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1]
def snake_case ( self : Dict , SCREAMING_SNAKE_CASE : List[int] , SCREAMING_SNAKE_CASE : Optional[List[int]] = None ):
lowercase__ : Any = [self.sep_token_id]
lowercase__ : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def snake_case ( self : Optional[int] ):
return len(self.encoder )
def snake_case ( self : Dict ):
return dict(self.encoder , **self.added_tokens_encoder )
def snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE : Optional[Any] ):
if token in self.cache:
return self.cache[token]
lowercase__ : Tuple = tuple(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = tuple(list(word[:-1] ) + [word[-1] + "</w>"] )
lowercase__ : Union[str, Any] = get_pairs(SCREAMING_SNAKE_CASE )
if not pairs:
return token
while True:
lowercase__ : Union[str, Any] = min(SCREAMING_SNAKE_CASE , key=lambda SCREAMING_SNAKE_CASE : self.bpe_ranks.get(SCREAMING_SNAKE_CASE , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
lowercase__ : List[str] = bigram
lowercase__ : Dict = []
lowercase__ : Optional[Any] = 0
while i < len(SCREAMING_SNAKE_CASE ):
try:
lowercase__ : Any = word.index(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowercase__ : Any = j
if word[i] == first and i < len(SCREAMING_SNAKE_CASE ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowercase__ : int = tuple(SCREAMING_SNAKE_CASE )
lowercase__ : Dict = new_word
if len(SCREAMING_SNAKE_CASE ) == 1:
break
else:
lowercase__ : Any = get_pairs(SCREAMING_SNAKE_CASE )
lowercase__ : str = "@@ ".join(SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = word[:-4]
lowercase__ : Dict = word
return word
def snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE : str ):
lowercase__ : List[str] = []
lowercase__ : Any = re.findall(r"\S+\n?" , SCREAMING_SNAKE_CASE )
for token in words:
split_tokens.extend(list(self.bpe(SCREAMING_SNAKE_CASE ).split(" " ) ) )
return split_tokens
def snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE : Any ):
return self.encoder.get(SCREAMING_SNAKE_CASE , self.encoder.get(self.unk_token ) )
def snake_case ( self : Any , SCREAMING_SNAKE_CASE : List[Any] ):
return self.decoder.get(SCREAMING_SNAKE_CASE , self.unk_token )
def snake_case ( self : int , SCREAMING_SNAKE_CASE : List[str] ):
lowercase__ : Any = " ".join(SCREAMING_SNAKE_CASE ).replace("@@ " , "" ).strip()
return out_string
def snake_case ( self : Dict , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[str] = None ):
if not os.path.isdir(SCREAMING_SNAKE_CASE ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase__ : Dict = os.path.join(
SCREAMING_SNAKE_CASE , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
lowercase__ : Any = os.path.join(
SCREAMING_SNAKE_CASE , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE )
if os.path.abspath(self.merges_file ) != os.path.abspath(SCREAMING_SNAKE_CASE ):
copyfile(self.merges_file , SCREAMING_SNAKE_CASE )
return out_vocab_file, out_merge_file
def snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE : List[Any] ):
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
try:
with open(SCREAMING_SNAKE_CASE , "r" , encoding="utf-8" ) as fd:
self.add_from_file(SCREAMING_SNAKE_CASE )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(f"""Incorrect encoding detected in {f}, please rebuild the dataset""" )
return
lowercase__ : Any = f.readlines()
for lineTmp in lines:
lowercase__ : Tuple = lineTmp.strip()
lowercase__ : str = line.rfind(" " )
if idx == -1:
raise ValueError("Incorrect dictionary format, expected '<token> <cnt>'" )
lowercase__ : str = line[:idx]
lowercase__ : str = len(self.encoder )
| 718 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ = {
'''configuration_timesformer''': ['''TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimesformerConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimesformerModel''',
'''TimesformerForVideoClassification''',
'''TimesformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 81 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE :str = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE :Tuple = {
'caidas/swin2sr-classicalsr-x2-64': (
'https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json'
),
}
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case_ = "swin2sr"
snake_case_ = {
"hidden_size": "embed_dim",
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : List[str] ,A : str=64 ,A : Union[str, Any]=1 ,A : List[Any]=3 ,A : Dict=1_80 ,A : List[str]=[6, 6, 6, 6, 6, 6] ,A : Any=[6, 6, 6, 6, 6, 6] ,A : int=8 ,A : Dict=2.0 ,A : List[str]=True ,A : Dict=0.0 ,A : Tuple=0.0 ,A : Dict=0.1 ,A : List[Any]="gelu" ,A : int=False ,A : Optional[Any]=0.02 ,A : str=1E-5 ,A : List[Any]=2 ,A : Union[str, Any]=1.0 ,A : Any="1conv" ,A : Optional[int]="pixelshuffle" ,**A : Union[str, Any] ,):
super().__init__(**A )
__A = image_size
__A = patch_size
__A = num_channels
__A = embed_dim
__A = depths
__A = len(A )
__A = num_heads
__A = window_size
__A = mlp_ratio
__A = qkv_bias
__A = hidden_dropout_prob
__A = attention_probs_dropout_prob
__A = drop_path_rate
__A = hidden_act
__A = use_absolute_embeddings
__A = layer_norm_eps
__A = initializer_range
__A = upscale
__A = img_range
__A = resi_connection
__A = upsampler
| 55 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_A : Optional[int] = {
'''configuration_luke''': ['''LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LukeConfig'''],
'''tokenization_luke''': ['''LukeTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Optional[Any] = [
'''LUKE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LukeForEntityClassification''',
'''LukeForEntityPairClassification''',
'''LukeForEntitySpanClassification''',
'''LukeForMultipleChoice''',
'''LukeForQuestionAnswering''',
'''LukeForSequenceClassification''',
'''LukeForTokenClassification''',
'''LukeForMaskedLM''',
'''LukeModel''',
'''LukePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig
from .tokenization_luke import LukeTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_luke import (
LUKE_PRETRAINED_MODEL_ARCHIVE_LIST,
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
LukeForMaskedLM,
LukeForMultipleChoice,
LukeForQuestionAnswering,
LukeForSequenceClassification,
LukeForTokenClassification,
LukeModel,
LukePreTrainedModel,
)
else:
import sys
_A : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 427 | 0 |
def UpperCamelCase ( __lowercase : int ,__lowercase : Any ):
'''simple docstring'''
return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2
def UpperCamelCase ( __lowercase : Tuple ,__lowercase : List[Any]=0 ):
'''simple docstring'''
return sorted(__lowercase ,key=lambda __lowercase : x[column] )
def UpperCamelCase ( __lowercase : Optional[Any] ,__lowercase : str ,__lowercase : Optional[Any]=float('inf' ) ):
'''simple docstring'''
for i in range(points_counts - 1 ):
for j in range(i + 1 ,__lowercase ):
A_ : int = euclidean_distance_sqr(points[i] ,points[j] )
if current_dis < min_dis:
A_ : int = current_dis
return min_dis
def UpperCamelCase ( __lowercase : Tuple ,__lowercase : int ,__lowercase : Optional[int]=float('inf' ) ):
'''simple docstring'''
for i in range(min(6 ,points_counts - 1 ) ,__lowercase ):
for j in range(max(0 ,i - 6 ) ,__lowercase ):
A_ : Dict = euclidean_distance_sqr(points[i] ,points[j] )
if current_dis < min_dis:
A_ : Any = current_dis
return min_dis
def UpperCamelCase ( __lowercase : int ,__lowercase : Dict ,__lowercase : Union[str, Any] ):
'''simple docstring'''
if points_counts <= 3:
return dis_between_closest_pair(__lowercase ,__lowercase )
# recursion
A_ : Any = points_counts // 2
A_ : Dict = closest_pair_of_points_sqr(
__lowercase ,points_sorted_on_y[:mid] ,__lowercase )
A_ : str = closest_pair_of_points_sqr(
__lowercase ,points_sorted_on_y[mid:] ,points_counts - mid )
A_ : Optional[Any] = min(__lowercase ,__lowercase )
A_ : str = []
for point in points_sorted_on_x:
if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis:
cross_strip.append(__lowercase )
A_ : Tuple = dis_between_closest_in_strip(
__lowercase ,len(__lowercase ) ,__lowercase )
return min(__lowercase ,__lowercase )
def UpperCamelCase ( __lowercase : Any ,__lowercase : List[Any] ):
'''simple docstring'''
A_ : Union[str, Any] = column_based_sort(__lowercase ,column=0 )
A_ : List[str] = column_based_sort(__lowercase ,column=1 )
return (
closest_pair_of_points_sqr(
__lowercase ,__lowercase ,__lowercase )
) ** 0.5
if __name__ == "__main__":
_UpperCAmelCase = [(2, 3), (12, 30), (40, 50), (5, 1), (12, 10), (3, 4)]
print("""Distance:""", closest_pair_of_points(points, len(points)))
| 70 | from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase_ ( self ):
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
A_ : Any = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : Optional[Any] = TFAutoModel.from_pretrained(lowercase , from_pt=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : Dict = AutoModel.from_pretrained(lowercase , from_tf=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
@slow
def lowerCAmelCase_ ( self ):
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
A_ : int = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : str = TFAutoModelForPreTraining.from_pretrained(lowercase , from_pt=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : str = AutoModelForPreTraining.from_pretrained(lowercase , from_tf=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
@slow
def lowerCAmelCase_ ( self ):
"""simple docstring"""
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : List[Any] = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : Dict = TFAutoModelForCausalLM.from_pretrained(lowercase , from_pt=lowercase )
A_ , A_ : Optional[int] = TFAutoModelForCausalLM.from_pretrained(
lowercase , output_loading_info=lowercase , from_pt=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : Tuple = AutoModelForCausalLM.from_pretrained(lowercase , from_tf=lowercase )
A_ , A_ : List[str] = AutoModelForCausalLM.from_pretrained(
lowercase , output_loading_info=lowercase , from_tf=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
@slow
def lowerCAmelCase_ ( self ):
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : Tuple = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : int = TFAutoModelWithLMHead.from_pretrained(lowercase , from_pt=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : int = AutoModelWithLMHead.from_pretrained(lowercase , from_tf=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
@slow
def lowerCAmelCase_ ( self ):
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : str = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : Optional[int] = TFAutoModelForMaskedLM.from_pretrained(lowercase , from_pt=lowercase )
A_ , A_ : str = TFAutoModelForMaskedLM.from_pretrained(
lowercase , output_loading_info=lowercase , from_pt=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : List[Any] = AutoModelForMaskedLM.from_pretrained(lowercase , from_tf=lowercase )
A_ , A_ : Tuple = AutoModelForMaskedLM.from_pretrained(
lowercase , output_loading_info=lowercase , from_tf=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
@slow
def lowerCAmelCase_ ( self ):
"""simple docstring"""
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : Dict = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(lowercase , from_pt=lowercase )
A_ , A_ : Union[str, Any] = TFAutoModelForSeqaSeqLM.from_pretrained(
lowercase , output_loading_info=lowercase , from_pt=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : List[str] = AutoModelForSeqaSeqLM.from_pretrained(lowercase , from_tf=lowercase )
A_ , A_ : List[str] = AutoModelForSeqaSeqLM.from_pretrained(
lowercase , output_loading_info=lowercase , from_tf=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
@slow
def lowerCAmelCase_ ( self ):
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
A_ : List[str] = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : Optional[Any] = TFAutoModelForSequenceClassification.from_pretrained(lowercase , from_pt=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : Optional[int] = AutoModelForSequenceClassification.from_pretrained(lowercase , from_tf=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
@slow
def lowerCAmelCase_ ( self ):
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
A_ : List[Any] = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : str = TFAutoModelForQuestionAnswering.from_pretrained(lowercase , from_pt=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : List[Any] = AutoModelForQuestionAnswering.from_pretrained(lowercase , from_tf=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Optional[Any] = TFAutoModelWithLMHead.from_pretrained(lowercase , from_pt=lowercase )
self.assertIsInstance(lowercase , lowercase )
self.assertEqual(model.num_parameters() , 1_4_4_1_0 )
self.assertEqual(model.num_parameters(only_trainable=lowercase ) , 1_4_4_1_0 )
A_ : Dict = AutoModelWithLMHead.from_pretrained(lowercase , from_tf=lowercase )
self.assertIsInstance(lowercase , lowercase )
self.assertEqual(model.num_parameters() , 1_4_4_1_0 )
self.assertEqual(model.num_parameters(only_trainable=lowercase ) , 1_4_4_1_0 )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Dict = TFAutoModelWithLMHead.from_pretrained(lowercase , from_pt=lowercase )
self.assertIsInstance(lowercase , lowercase )
self.assertEqual(model.num_parameters() , 1_4_4_1_0 )
self.assertEqual(model.num_parameters(only_trainable=lowercase ) , 1_4_4_1_0 )
A_ : Dict = AutoModelWithLMHead.from_pretrained(lowercase , from_tf=lowercase )
self.assertIsInstance(lowercase , lowercase )
self.assertEqual(model.num_parameters() , 1_4_4_1_0 )
self.assertEqual(model.num_parameters(only_trainable=lowercase ) , 1_4_4_1_0 )
| 70 | 1 |
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
SCREAMING_SNAKE_CASE : Dict = "bart"
SCREAMING_SNAKE_CASE : str = True
@st.cache(allow_output_mutation=lowerCamelCase_ )
def UpperCamelCase_( ) -> Dict:
if LOAD_DENSE_INDEX:
_lowercase : str = AutoTokenizer.from_pretrained('yjernite/retribert-base-uncased' )
_lowercase : Any = AutoModel.from_pretrained('yjernite/retribert-base-uncased' ).to('cuda:0' )
_lowercase : str = qar_model.eval()
else:
_lowercase , _lowercase : str = (None, None)
if MODEL_TYPE == "bart":
_lowercase : Any = AutoTokenizer.from_pretrained('yjernite/bart_eli5' )
_lowercase : Tuple = AutoModelForSeqaSeqLM.from_pretrained('yjernite/bart_eli5' ).to('cuda:0' )
_lowercase : Dict = torch.load('seq2seq_models/eli5_bart_model_blm_2.pth' )
sas_model.load_state_dict(save_dict['model'] )
_lowercase : int = sas_model.eval()
else:
_lowercase , _lowercase : int = make_qa_sas_model(
model_name='t5-small' , from_file='seq2seq_models/eli5_t5_model_1024_4.pth' , device='cuda:0' )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=lowerCamelCase_ )
def UpperCamelCase_( ) -> Optional[Any]:
if LOAD_DENSE_INDEX:
_lowercase : List[Any] = faiss.StandardGpuResources()
_lowercase : List[Any] = datasets.load_dataset(path='wiki_snippets' , name='wiki40b_en_100_0' )['train']
_lowercase : Dict = np.memmap(
'wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat' , dtype='float32' , mode='r' , shape=(wikiaab_passages.num_rows, 128) , )
_lowercase : Any = faiss.IndexFlatIP(128 )
_lowercase : Optional[int] = faiss.index_cpu_to_gpu(lowerCamelCase_ , 1 , lowerCamelCase_ )
wikiaab_gpu_index_flat.add(lowerCamelCase_ ) # TODO fix for larger GPU
else:
_lowercase , _lowercase : Union[str, Any] = (None, None)
_lowercase : Optional[int] = Elasticsearch([{'host': 'localhost', 'port': '9200'}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=lowerCamelCase_ )
def UpperCamelCase_( ) -> List[Any]:
_lowercase : Tuple = datasets.load_dataset('eli5' , name='LFQA_reddit' )
_lowercase : int = elia['train_eli5']
_lowercase : Any = np.memmap(
'eli5_questions_reps.dat' , dtype='float32' , mode='r' , shape=(elia_train.num_rows, 128) )
_lowercase : Any = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(lowerCamelCase_ )
return (elia_train, eli5_train_q_index)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = load_indexes()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = load_models()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = load_train_data()
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_=10 ) -> List[Any]:
_lowercase : Any = embed_questions_for_retrieval([question] , lowerCamelCase_ , lowerCamelCase_ )
_lowercase , _lowercase : Any = eli5_train_q_index.search(lowerCamelCase_ , lowerCamelCase_ )
_lowercase : Any = [elia_train[int(lowerCamelCase_ )] for i in I[0]]
return nn_examples
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_="wiki40b" , lowerCamelCase_="dense" , lowerCamelCase_=10 ) -> Dict:
if source == "none":
_lowercase , _lowercase : List[Any] = (' <P> '.join(['' for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
_lowercase , _lowercase : Optional[Any] = query_qa_dense_index(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
else:
_lowercase , _lowercase : List[str] = query_es_index(
lowerCamelCase_ , lowerCamelCase_ , index_name='english_wiki40b_snippets_100w' , n_results=lowerCamelCase_ , )
_lowercase : int = [
(res['article_title'], res['section_title'].strip(), res['score'], res['passage_text']) for res in hit_lst
]
_lowercase : Tuple = 'question: {} context: {}'.format(lowerCamelCase_ , lowerCamelCase_ )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda lowerCamelCase_ : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda lowerCamelCase_ : None),
} )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=64 , lowerCamelCase_=256 , lowerCamelCase_=False , lowerCamelCase_=2 , lowerCamelCase_=0.95 , lowerCamelCase_=0.8 ) -> List[str]:
with torch.no_grad():
_lowercase : Dict = qa_sas_generate(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , num_answers=1 , num_beams=lowerCamelCase_ , min_len=lowerCamelCase_ , max_len=lowerCamelCase_ , do_sample=lowerCamelCase_ , temp=lowerCamelCase_ , top_p=lowerCamelCase_ , top_k=lowerCamelCase_ , max_input_length=1024 , device='cuda:0' , )[0]
return (answer, support_list)
st.title("Long Form Question Answering with ELI5")
# Start sidebar
SCREAMING_SNAKE_CASE : List[Any] = "<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>"
SCREAMING_SNAKE_CASE : Dict = "\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class=\"img-container\"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n" % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
SCREAMING_SNAKE_CASE : str = "\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n"
st.sidebar.markdown(description, unsafe_allow_html=True)
SCREAMING_SNAKE_CASE : Union[str, Any] = [
"Answer the question",
"View the retrieved document only",
"View the most similar ELI5 question and answer",
"Show me everything, please!",
]
SCREAMING_SNAKE_CASE : str = st.sidebar.checkbox("Demo options")
if demo_options:
SCREAMING_SNAKE_CASE : Union[str, Any] = st.sidebar.selectbox(
"",
action_list,
index=3,
)
SCREAMING_SNAKE_CASE : Any = action_list.index(action_st)
SCREAMING_SNAKE_CASE : Union[str, Any] = st.sidebar.selectbox(
"",
["Show full text of passages", "Show passage section titles"],
index=0,
)
SCREAMING_SNAKE_CASE : Union[str, Any] = show_type == "Show full text of passages"
else:
SCREAMING_SNAKE_CASE : Any = 3
SCREAMING_SNAKE_CASE : List[Any] = True
SCREAMING_SNAKE_CASE : Tuple = st.sidebar.checkbox("Retrieval options")
if retrieval_options:
SCREAMING_SNAKE_CASE : str = "\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n "
st.sidebar.markdown(retriever_info)
SCREAMING_SNAKE_CASE : List[Any] = st.sidebar.selectbox("Which Wikipedia format should the model use?", ["wiki40b", "none"])
SCREAMING_SNAKE_CASE : str = st.sidebar.selectbox("Which Wikipedia indexer should the model use?", ["dense", "sparse", "mixed"])
else:
SCREAMING_SNAKE_CASE : Optional[int] = "wiki40b"
SCREAMING_SNAKE_CASE : List[Any] = "dense"
SCREAMING_SNAKE_CASE : str = "beam"
SCREAMING_SNAKE_CASE : List[str] = 2
SCREAMING_SNAKE_CASE : int = 64
SCREAMING_SNAKE_CASE : List[Any] = 256
SCREAMING_SNAKE_CASE : Union[str, Any] = None
SCREAMING_SNAKE_CASE : int = None
SCREAMING_SNAKE_CASE : str = st.sidebar.checkbox("Generation options")
if generate_options:
SCREAMING_SNAKE_CASE : Tuple = "\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder's output probabilities.\n "
st.sidebar.markdown(generate_info)
SCREAMING_SNAKE_CASE : Dict = st.sidebar.selectbox("Would you like to use beam search or sample an answer?", ["beam", "sampled"])
SCREAMING_SNAKE_CASE : str = st.sidebar.slider(
"Minimum generation length", min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
SCREAMING_SNAKE_CASE : Union[str, Any] = st.sidebar.slider(
"Maximum generation length", min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
SCREAMING_SNAKE_CASE : Dict = st.sidebar.slider("Beam size", min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = st.sidebar.slider(
"Nucleus sampling p", min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
SCREAMING_SNAKE_CASE : Dict = st.sidebar.slider(
"Temperature", min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
SCREAMING_SNAKE_CASE : List[Any] = None
# start main text
SCREAMING_SNAKE_CASE : Optional[int] = [
"<MY QUESTION>",
"How do people make chocolate?",
"Why do we get a fever when we are sick?",
"How can different animals perceive different colors?",
"What is natural language processing?",
"What's the best way to treat a sunburn?",
"What exactly are vitamins ?",
"How does nuclear energy provide electricity?",
"What's the difference between viruses and bacteria?",
"Why are flutes classified as woodwinds when most of them are made out of metal ?",
"Why do people like drinking coffee even though it tastes so bad?",
"What happens when wine ages? How does it make the wine taste better?",
"If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?",
"How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?",
"How does New Zealand have so many large bird predators?",
]
SCREAMING_SNAKE_CASE : Optional[Any] = st.selectbox(
"What would you like to ask? ---- select <MY QUESTION> to enter a new query",
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
SCREAMING_SNAKE_CASE : int = st.text_input("Enter your question here:", "")
else:
SCREAMING_SNAKE_CASE : int = question_s
if st.button("Show me!"):
if action in [0, 1, 3]:
if index_type == "mixed":
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = make_support(question, source=wiki_source, method="dense", n_results=10)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = make_support(question, source=wiki_source, method="sparse", n_results=10)
SCREAMING_SNAKE_CASE : Dict = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
SCREAMING_SNAKE_CASE : int = support_list[:10]
SCREAMING_SNAKE_CASE : Dict = "<P> " + " <P> ".join([res[-1] for res in support_list])
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == "sampled"),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown("### The model generated answer is:")
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown("--- \n ### The model is drawing information from the following Wikipedia passages:")
for i, res in enumerate(support_list):
SCREAMING_SNAKE_CASE : int = "https://en.wikipedia.org/wiki/{}".format(res[0].replace(" ", "_"))
SCREAMING_SNAKE_CASE : str = res[1].strip()
if sec_titles == "":
SCREAMING_SNAKE_CASE : Optional[int] = "[{}]({})".format(res[0], wiki_url)
else:
SCREAMING_SNAKE_CASE : Dict = sec_titles.split(" & ")
SCREAMING_SNAKE_CASE : Union[str, Any] = " & ".join(
["[{}]({}#{})".format(sec.strip(), wiki_url, sec.strip().replace(" ", "_")) for sec in sec_list]
)
st.markdown(
"{0:02d} - **Article**: {1:<18} <br> _Section_: {2}".format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
"> <span style=\"font-family:arial; font-size:10pt;\">" + res[-1] + "</span>", unsafe_allow_html=True
)
if action in [2, 3]:
SCREAMING_SNAKE_CASE : List[str] = find_nearest_training(question)
SCREAMING_SNAKE_CASE : Optional[int] = nn_train_list[0]
st.markdown(
"--- \n ### The most similar question in the ELI5 training set was: \n\n {}".format(train_exple["title"])
)
SCREAMING_SNAKE_CASE : Any = [
"{}. {}".format(i + 1, " \n".join([line.strip() for line in ans.split("\n") if line.strip() != ""]))
for i, (ans, sc) in enumerate(zip(train_exple["answers"]["text"], train_exple["answers"]["score"]))
if i == 0 or sc > 2
]
st.markdown("##### Its answers were: \n\n {}".format("\n".join(answers_st)))
SCREAMING_SNAKE_CASE : str = "\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n"
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 89 |
import argparse
from copy import deepcopy
import numpy as np
from datasets import ClassLabel, DatasetDict, load_dataset
from evaluate import load
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
Trainer,
TrainerCallback,
TrainingArguments,
set_seed,
)
def UpperCamelCase () -> Union[str, Any]:
A__ : int = argparse.ArgumentParser()
parser.add_argument("""--model_ckpt""" , type=lowercase_ , default="""microsoft/unixcoder-base-nine""" )
parser.add_argument("""--num_epochs""" , type=lowercase_ , default=5 )
parser.add_argument("""--batch_size""" , type=lowercase_ , default=6 )
parser.add_argument("""--gradient_accumulation_steps""" , type=lowercase_ , default=1 )
parser.add_argument("""--freeze""" , type=lowercase_ , default=lowercase_ )
parser.add_argument("""--learning_rate""" , type=lowercase_ , default=5E-4 )
parser.add_argument("""--seed""" , type=lowercase_ , default=0 )
parser.add_argument("""--lr_scheduler_type""" , type=lowercase_ , default="""cosine""" )
parser.add_argument("""--num_warmup_steps""" , type=lowercase_ , default=10 )
parser.add_argument("""--weight_decay""" , type=lowercase_ , default=0.01 )
parser.add_argument("""--output_dir""" , type=lowercase_ , default="""./results""" )
return parser.parse_args()
A_ : Any = load('accuracy')
def UpperCamelCase (lowercase_: Dict ) -> Any:
A__ , A__ : int = eval_pred
A__ : List[str] = np.argmax(lowercase_ , axis=1 )
return metric.compute(predictions=lowercase_ , references=lowercase_ )
class _a (__magic_name__ ):
'''simple docstring'''
def __init__( self , A__ ):
super().__init__()
A__ : Any = trainer
def __A ( self , A__ , A__ , A__ , **A__ ):
if control.should_evaluate:
A__ : Dict = deepcopy(A__ )
self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix="""train""" )
return control_copy
def UpperCamelCase () -> Optional[int]:
A__ : Optional[Any] = get_args()
set_seed(args.seed )
A__ : Optional[Any] = load_dataset("""codeparrot/codecomplex""" , split="""train""" )
A__ : Tuple = dataset.train_test_split(test_size=0.2 )
A__ : List[str] = train_test["""test"""].train_test_split(test_size=0.5 )
A__ : Union[str, Any] = DatasetDict(
{
"""train""": train_test["""train"""],
"""test""": test_validation["""train"""],
"""valid""": test_validation["""test"""],
} )
print("""Loading tokenizer and model""" )
A__ : Dict = AutoTokenizer.from_pretrained(args.model_ckpt )
A__ : List[str] = tokenizer.eos_token
A__ : List[Any] = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt , num_labels=7 )
A__ : Dict = model.config.eos_token_id
if args.freeze:
for param in model.roberta.parameters():
A__ : Optional[int] = False
A__ : Dict = ClassLabel(num_classes=7 , names=list(set(train_test_validation["""train"""]["""complexity"""] ) ) )
def tokenize(lowercase_: Optional[int] ):
A__ : Union[str, Any] = tokenizer(example["""src"""] , truncation=lowercase_ , max_length=1024 )
A__ : Any = labels.straint(example["""complexity"""] )
return {
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"label": label,
}
A__ : int = train_test_validation.map(
lowercase_ , batched=lowercase_ , remove_columns=train_test_validation["""train"""].column_names , )
A__ : List[str] = DataCollatorWithPadding(tokenizer=lowercase_ )
A__ : Any = TrainingArguments(
output_dir=args.output_dir , learning_rate=args.learning_rate , lr_scheduler_type=args.lr_scheduler_type , evaluation_strategy="""epoch""" , save_strategy="""epoch""" , logging_strategy="""epoch""" , per_device_train_batch_size=args.batch_size , per_device_eval_batch_size=args.batch_size , num_train_epochs=args.num_epochs , gradient_accumulation_steps=args.gradient_accumulation_steps , weight_decay=0.01 , metric_for_best_model="""accuracy""" , run_name="""complexity-java""" , report_to="""wandb""" , )
A__ : int = Trainer(
model=lowercase_ , args=lowercase_ , train_dataset=tokenized_datasets["""train"""] , eval_dataset=tokenized_datasets["""valid"""] , tokenizer=lowercase_ , data_collator=lowercase_ , compute_metrics=lowercase_ , )
print("""Training...""" )
trainer.add_callback(CustomCallback(lowercase_ ) )
trainer.train()
if __name__ == "__main__":
main()
| 456 | 0 |
from ..utils import DummyObject, requires_backends
class A (metaclass=SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCamelCase : Any = ['''keras_nlp''']
def __init__( self : Any , *__lowerCAmelCase : Any , **__lowerCAmelCase : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self , ["""keras_nlp"""] )
| 247 |
from __future__ import annotations
A : Optional[int] = 8.988e9 # units = N * m^s * C^-2
def __lowerCamelCase ( __a :float , __a :float , __a :float , __a :float ) -> dict[str, float]:
"""simple docstring"""
A__ = abs(chargea * chargea )
if (force, chargea, chargea, distance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if distance < 0:
raise ValueError("""Distance cannot be negative""" )
if force == 0:
A__ = COULOMBS_CONSTANT * charge_product / (distance**2)
return {"force": force}
elif chargea == 0:
A__ = abs(__a ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge1": chargea}
elif chargea == 0:
A__ = abs(__a ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge2": chargea}
elif distance == 0:
A__ = (COULOMBS_CONSTANT * charge_product / abs(__a )) ** 0.5
return {"distance": distance}
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 247 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_lowercase: Optional[Any] = {
'''configuration_wav2vec2''': ['''WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Wav2Vec2Config'''],
'''feature_extraction_wav2vec2''': ['''Wav2Vec2FeatureExtractor'''],
'''processing_wav2vec2''': ['''Wav2Vec2Processor'''],
'''tokenization_wav2vec2''': ['''Wav2Vec2CTCTokenizer''', '''Wav2Vec2Tokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase: Any = [
'''WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Wav2Vec2ForAudioFrameClassification''',
'''Wav2Vec2ForCTC''',
'''Wav2Vec2ForMaskedLM''',
'''Wav2Vec2ForPreTraining''',
'''Wav2Vec2ForSequenceClassification''',
'''Wav2Vec2ForXVector''',
'''Wav2Vec2Model''',
'''Wav2Vec2PreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase: int = [
'''TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWav2Vec2ForCTC''',
'''TFWav2Vec2Model''',
'''TFWav2Vec2PreTrainedModel''',
'''TFWav2Vec2ForSequenceClassification''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase: Any = [
'''FlaxWav2Vec2ForCTC''',
'''FlaxWav2Vec2ForPreTraining''',
'''FlaxWav2Vec2Model''',
'''FlaxWav2Vec2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
_lowercase: Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 192 | from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase: List[Any] = logging.get_logger(__name__)
_lowercase: int = {
'''alibaba-damo/mgp-str-base''': '''https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json''',
}
class lowerCamelCase__ ( UpperCAmelCase ):
UpperCamelCase__ ="mgp-str"
def __init__( self : int , lowercase__ : List[Any]=[32, 1_28] , lowercase__ : str=4 , lowercase__ : List[str]=3 , lowercase__ : Tuple=27 , lowercase__ : Optional[Any]=38 , lowercase__ : Optional[Any]=5_02_57 , lowercase__ : Dict=3_05_22 , lowercase__ : Dict=7_68 , lowercase__ : Tuple=12 , lowercase__ : int=12 , lowercase__ : int=4.0 , lowercase__ : Tuple=True , lowercase__ : List[Any]=False , lowercase__ : int=1e-5 , lowercase__ : int=0.0 , lowercase__ : Optional[Any]=0.0 , lowercase__ : List[Any]=0.0 , lowercase__ : Union[str, Any]=False , lowercase__ : str=0.0_2 , **lowercase__ : Optional[int] , ):
super().__init__(**lowercase__ )
_lowerCAmelCase = image_size
_lowerCAmelCase = patch_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = max_token_length
_lowerCAmelCase = num_character_labels
_lowerCAmelCase = num_bpe_labels
_lowerCAmelCase = num_wordpiece_labels
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = mlp_ratio
_lowerCAmelCase = distilled
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = drop_rate
_lowerCAmelCase = qkv_bias
_lowerCAmelCase = attn_drop_rate
_lowerCAmelCase = drop_path_rate
_lowerCAmelCase = output_aa_attentions
_lowerCAmelCase = initializer_range
| 192 | 1 |
def snake_case (UpperCamelCase : List[str] ):
'''simple docstring'''
return "".join([hex(_snake_case )[2:].zfill(2 ).upper() for byte in list(_snake_case )] )
def snake_case (UpperCamelCase : str ):
'''simple docstring'''
if (len(_snake_case ) % 2) != 0:
raise ValueError(
"""Base16 encoded data is invalid:
Data does not have an even number of hex digits.""" )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(_snake_case ) <= set("""0123456789ABCDEF""" ):
raise ValueError(
"""Base16 encoded data is invalid:
Data is not uppercase hex or it contains invalid characters.""" )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(_snake_case ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 708 |
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def snake_case (UpperCamelCase : Optional[Any] ):
'''simple docstring'''
lowerCamelCase__ = FileLock(str(tmpdir / """foo.lock""" ) )
lowerCamelCase__ = FileLock(str(tmpdir / """foo.lock""" ) )
lowerCamelCase__ = 0.0_1
with locka.acquire():
with pytest.raises(UpperCamelCase ):
lowerCamelCase__ = time.time()
locka.acquire(UpperCamelCase )
assert time.time() - _start > timeout
def snake_case (UpperCamelCase : Optional[Any] ):
'''simple docstring'''
lowerCamelCase__ = """a""" * 1000 + """.lock"""
lowerCamelCase__ = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(""".lock""" )
assert not locka._lock_file.endswith(UpperCamelCase )
assert len(os.path.basename(locka._lock_file ) ) <= 255
lowerCamelCase__ = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(UpperCamelCase ):
locka.acquire(0 )
| 235 | 0 |
from math import ceil
def lowercase_ ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
snake_case__ : Union[str, Any] =list(range(0 , SCREAMING_SNAKE_CASE ) )
snake_case__ : int =[item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
snake_case__ : List[Any] =[]
for i in device_map_blocks:
if device_map_blocks.count(SCREAMING_SNAKE_CASE ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(SCREAMING_SNAKE_CASE )
# Missing blocks
snake_case__ : str =[i for i in blocks if i not in device_map_blocks]
snake_case__ : Optional[int] =[i for i in device_map_blocks if i not in blocks]
if len(SCREAMING_SNAKE_CASE ) != 0:
raise ValueError(
'''Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device.'''
''' These attention blocks were specified more than once: ''' + str(SCREAMING_SNAKE_CASE ) )
if len(SCREAMING_SNAKE_CASE ) != 0:
raise ValueError(
'''There are attention blocks for this model that are not specified in the device_map. Add these attention '''
'''blocks to a device on the device_map: ''' + str(SCREAMING_SNAKE_CASE ) )
if len(SCREAMING_SNAKE_CASE ) != 0:
raise ValueError(
'''The device_map contains more attention blocks than this model has. Remove these from the device_map:'''
+ str(SCREAMING_SNAKE_CASE ) )
def lowercase_ ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
snake_case__ : Optional[Any] =list(range(SCREAMING_SNAKE_CASE ) )
snake_case__ : int =int(ceil(n_layers / len(SCREAMING_SNAKE_CASE ) ) )
snake_case__ : Union[str, Any] =[layers[i : i + n_blocks] for i in range(0 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )]
return dict(zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
| 381 |
def lowercase_ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
snake_case__ : List[Any] =word.split()
def justify(SCREAMING_SNAKE_CASE : list , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ) -> str:
snake_case__ : Optional[Any] =max_width - width
snake_case__ : Tuple =len(SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
snake_case__ : List[Any] =words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
snake_case__ : Tuple =spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
snake_case__ : str =(
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(SCREAMING_SNAKE_CASE ):
num_spaces_between_words_list[i] += 1
snake_case__ : str =[]
for i in range(SCREAMING_SNAKE_CASE ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * ''' ''' )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] =[]
snake_case__ : list[str] =[]
snake_case__ : Dict =0
for word in words:
if width + len(SCREAMING_SNAKE_CASE ) + len(SCREAMING_SNAKE_CASE ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(SCREAMING_SNAKE_CASE )
width += len(SCREAMING_SNAKE_CASE )
else:
# justify the line and add it to result
answer.append(justify(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
# reset new line and new width
snake_case__, snake_case__ : Tuple =[word], len(SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] =max_width - width - len(SCREAMING_SNAKE_CASE )
answer.append(''' '''.join(SCREAMING_SNAKE_CASE ) + (remaining_spaces + 1) * ''' ''' )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 381 | 1 |
"""simple docstring"""
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
snake_case = logging.get_logger(__name__)
def snake_case ( lowerCAmelCase_=None , lowerCAmelCase_=None ) -> Dict:
return field(default_factory=lambda: default , metadata=_lowercase )
@dataclass
class UpperCAmelCase :
A__ : List[str] = list_field(
default=[],metadata={
'''help''': (
'''Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version'''
''' of all available models'''
)
},)
A__ : List[int] = list_field(
default=[8],metadata={'''help''': '''List of batch sizes for which memory and time performance will be evaluated'''} )
A__ : List[int] = list_field(
default=[8, 32, 128, 512],metadata={'''help''': '''List of sequence lengths for which memory and time performance will be evaluated'''},)
A__ : bool = field(
default=a__,metadata={'''help''': '''Whether to benchmark inference of model. Inference can be disabled via --no-inference.'''},)
A__ : bool = field(
default=a__,metadata={'''help''': '''Whether to run on available cuda devices. Cuda can be disabled via --no-cuda.'''},)
A__ : bool = field(
default=a__,metadata={'''help''': '''Whether to run on available tpu devices. TPU can be disabled via --no-tpu.'''} )
A__ : bool = field(default=a__,metadata={'''help''': '''Use FP16 to accelerate inference.'''} )
A__ : bool = field(default=a__,metadata={'''help''': '''Benchmark training of model'''} )
A__ : bool = field(default=a__,metadata={'''help''': '''Verbose memory tracing'''} )
A__ : bool = field(
default=a__,metadata={'''help''': '''Whether to perform speed measurements. Speed measurements can be disabled via --no-speed.'''},)
A__ : bool = field(
default=a__,metadata={
'''help''': '''Whether to perform memory measurements. Memory measurements can be disabled via --no-memory'''
},)
A__ : bool = field(default=a__,metadata={'''help''': '''Trace memory line by line'''} )
A__ : bool = field(default=a__,metadata={'''help''': '''Save result to a CSV file'''} )
A__ : bool = field(default=a__,metadata={'''help''': '''Save all print statements in a log file'''} )
A__ : bool = field(default=a__,metadata={'''help''': '''Whether to print environment information'''} )
A__ : bool = field(
default=a__,metadata={
'''help''': (
'''Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use'''
''' multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled'''
''' for debugging / testing and on TPU.'''
)
},)
A__ : str = field(
default=F'''inference_time_{round(time() )}.csv''',metadata={'''help''': '''CSV filename used if saving time results to csv.'''},)
A__ : str = field(
default=F'''inference_memory_{round(time() )}.csv''',metadata={'''help''': '''CSV filename used if saving memory results to csv.'''},)
A__ : str = field(
default=F'''train_time_{round(time() )}.csv''',metadata={'''help''': '''CSV filename used if saving time results to csv for training.'''},)
A__ : str = field(
default=F'''train_memory_{round(time() )}.csv''',metadata={'''help''': '''CSV filename used if saving memory results to csv for training.'''},)
A__ : str = field(
default=F'''env_info_{round(time() )}.csv''',metadata={'''help''': '''CSV filename used if saving environment information.'''},)
A__ : str = field(
default=F'''log_{round(time() )}.csv''',metadata={'''help''': '''Log filename used if print statements are saved in log.'''},)
A__ : int = field(default=3,metadata={'''help''': '''Times an experiment will be run.'''} )
A__ : bool = field(
default=a__,metadata={
'''help''': (
'''Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain'''
''' model weights.'''
)
},)
def __UpperCAmelCase ( self : Tuple ):
"""simple docstring"""
warnings.warn(
f"""The class {self.__class__} is deprecated. Hugging Face Benchmarking utils"""
''' are deprecated in general and it is advised to use external Benchmarking libraries '''
''' to benchmark Transformer models.''' , lowerCAmelCase__ , )
def __UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
return json.dumps(dataclasses.asdict(self ) , indent=2 )
@property
def __UpperCAmelCase ( self : str ):
"""simple docstring"""
if len(self.models ) <= 0:
raise ValueError(
'''Please make sure you provide at least one model name / model identifier, *e.g.* `--models'''
''' bert-base-cased` or `args.models = [\'bert-base-cased\'].''' )
return self.models
@property
def __UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
if not self.multi_process:
return False
elif self.is_tpu:
logger.info('''Multiprocessing is currently not possible on TPU.''' )
return False
else:
return True
| 709 |
"""simple docstring"""
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case = logging.get_logger(__name__)
snake_case = {
'''huggingface/time-series-transformer-tourism-monthly''': (
'''https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json'''
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
A__ : Any = '''time_series_transformer'''
A__ : List[str] = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
'''num_hidden_layers''': '''encoder_layers''',
}
def __init__( self : Optional[Any] , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : str = "student_t" , __lowerCamelCase : str = "nll" , __lowerCamelCase : int = 1 , __lowerCamelCase : List[int] = [1, 2, 3, 4, 5, 6, 7] , __lowerCamelCase : Optional[Union[str, bool]] = "mean" , __lowerCamelCase : int = 0 , __lowerCamelCase : int = 0 , __lowerCamelCase : int = 0 , __lowerCamelCase : int = 0 , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : int = 3_2 , __lowerCamelCase : int = 3_2 , __lowerCamelCase : int = 2 , __lowerCamelCase : int = 2 , __lowerCamelCase : int = 2 , __lowerCamelCase : int = 2 , __lowerCamelCase : bool = True , __lowerCamelCase : str = "gelu" , __lowerCamelCase : int = 6_4 , __lowerCamelCase : float = 0.1 , __lowerCamelCase : float = 0.1 , __lowerCamelCase : float = 0.1 , __lowerCamelCase : float = 0.1 , __lowerCamelCase : float = 0.1 , __lowerCamelCase : int = 1_0_0 , __lowerCamelCase : float = 0.0_2 , __lowerCamelCase : Optional[Any]=True , **__lowerCamelCase : List[Any] , ):
"""simple docstring"""
# time series specific configuration
_snake_case = prediction_length
_snake_case = context_length or prediction_length
_snake_case = distribution_output
_snake_case = loss
_snake_case = input_size
_snake_case = num_time_features
_snake_case = lags_sequence
_snake_case = scaling
_snake_case = num_dynamic_real_features
_snake_case = num_static_real_features
_snake_case = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(__lowerCamelCase ) != num_static_categorical_features:
raise ValueError(
'''The cardinality should be a list of the same length as `num_static_categorical_features`''' )
_snake_case = cardinality
else:
_snake_case = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(__lowerCamelCase ) != num_static_categorical_features:
raise ValueError(
'''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' )
_snake_case = embedding_dimension
else:
_snake_case = [min(5_0 , (cat + 1) // 2 ) for cat in self.cardinality]
_snake_case = num_parallel_samples
# Transformer architecture configuration
_snake_case = input_size * len(__lowerCamelCase ) + self._number_of_features
_snake_case = d_model
_snake_case = encoder_attention_heads
_snake_case = decoder_attention_heads
_snake_case = encoder_ffn_dim
_snake_case = decoder_ffn_dim
_snake_case = encoder_layers
_snake_case = decoder_layers
_snake_case = dropout
_snake_case = attention_dropout
_snake_case = activation_dropout
_snake_case = encoder_layerdrop
_snake_case = decoder_layerdrop
_snake_case = activation_function
_snake_case = init_std
_snake_case = use_cache
super().__init__(is_encoder_decoder=__lowerCamelCase , **__lowerCamelCase )
@property
def __UpperCAmelCase ( self : Dict ):
"""simple docstring"""
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 404 | 0 |
from math import sqrt
import numpy as np
from sympy import symbols
# Coefficient
# Speed of light (m/s)
lowerCamelCase : Optional[int] = 2_9_9_7_9_2_4_5_8
# Symbols
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : Optional[Any] = symbols('ct x y z')
def lowercase__( A ):
if velocity > c:
raise ValueError('Speed must not exceed light speed 299,792,458 [m/s]!' )
elif velocity < 1:
# Usually the speed should be much higher than 1 (c order of magnitude)
raise ValueError('Speed must be greater than or equal to 1!' )
return velocity / c
def lowercase__( A ):
return 1 / sqrt(1 - beta(A ) ** 2 )
def lowercase__( A ):
return np.array(
[
[gamma(A ), -gamma(A ) * beta(A ), 0, 0],
[-gamma(A ) * beta(A ), gamma(A ), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
] )
def lowercase__( A , A = None ):
# Ensure event is not empty
if event is None:
snake_case__ : Optional[Any] = np.array([ct, x, y, z] ) # Symbolic four vector
else:
event[0] *= c # x0 is ct (speed of light * time)
return transformation_matrix(A ) @ event
if __name__ == "__main__":
import doctest
doctest.testmod()
# Example of symbolic vector:
lowerCamelCase : Tuple = transform(2_9_9_7_9_2_4_5)
print('Example of four vector: ')
print(F"""ct' = {four_vector[0]}""")
print(F"""x' = {four_vector[1]}""")
print(F"""y' = {four_vector[2]}""")
print(F"""z' = {four_vector[3]}""")
# Substitute symbols with numerical values
lowerCamelCase : List[Any] = {ct: c, x: 1, y: 1, z: 1}
lowerCamelCase : str = [four_vector[i].subs(sub_dict) for i in range(4)]
print(F"""\n{numerical_vector}""")
| 170 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCamelCase : int = {
'configuration_biogpt': ['BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BioGptConfig'],
'tokenization_biogpt': ['BioGptTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Tuple = [
'BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BioGptForCausalLM',
'BioGptForTokenClassification',
'BioGptForSequenceClassification',
'BioGptModel',
'BioGptPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
lowerCamelCase : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 170 | 1 |
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
A = logging.getLogger(__name__)
@dataclass
class __a :
'''simple docstring'''
UpperCAmelCase__ : int = 42
UpperCAmelCase__ : List[Any] = 42
UpperCAmelCase__ : Dict = 42
@dataclass
class __a :
'''simple docstring'''
UpperCAmelCase__ : List[Any] = 42
UpperCAmelCase__ : str = 42
UpperCAmelCase__ : Any = None
UpperCAmelCase__ : Optional[Any] = None
class __a ( __A ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = """train"""
UpperCAmelCase__ : Any = """dev"""
UpperCAmelCase__ : Optional[Any] = """test"""
class __a :
'''simple docstring'''
@staticmethod
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ ):
raise NotImplementedError
@staticmethod
def __snake_case ( UpperCamelCase__ ):
raise NotImplementedError
@staticmethod
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=False , UpperCamelCase__="[CLS]" , UpperCamelCase__=1 , UpperCamelCase__="[SEP]" , UpperCamelCase__=False , UpperCamelCase__=False , UpperCamelCase__=0 , UpperCamelCase__=0 , UpperCamelCase__=-100 , UpperCamelCase__=0 , UpperCamelCase__=True , ):
SCREAMING_SNAKE_CASE_ : Tuple = {label: i for i, label in enumerate(_SCREAMING_SNAKE_CASE )}
SCREAMING_SNAKE_CASE_ : Any = []
for ex_index, example in enumerate(_SCREAMING_SNAKE_CASE ):
if ex_index % 10000 == 0:
logger.info('Writing example %d of %d' , _SCREAMING_SNAKE_CASE , len(_SCREAMING_SNAKE_CASE ) )
SCREAMING_SNAKE_CASE_ : List[Any] = []
SCREAMING_SNAKE_CASE_ : int = []
for word, label in zip(example.words , example.labels ):
SCREAMING_SNAKE_CASE_ : Any = tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(_SCREAMING_SNAKE_CASE ) > 0:
tokens.extend(_SCREAMING_SNAKE_CASE )
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(_SCREAMING_SNAKE_CASE ) - 1) )
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
SCREAMING_SNAKE_CASE_ : Tuple = tokenizer.num_special_tokens_to_add()
if len(_SCREAMING_SNAKE_CASE ) > max_seq_length - special_tokens_count:
SCREAMING_SNAKE_CASE_ : Any = tokens[: (max_seq_length - special_tokens_count)]
SCREAMING_SNAKE_CASE_ : Tuple = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
SCREAMING_SNAKE_CASE_ : Any = [sequence_a_segment_id] * len(_SCREAMING_SNAKE_CASE )
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
SCREAMING_SNAKE_CASE_ : List[str] = [cls_token] + tokens
SCREAMING_SNAKE_CASE_ : Any = [pad_token_label_id] + label_ids
SCREAMING_SNAKE_CASE_ : Optional[int] = [cls_token_segment_id] + segment_ids
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE )
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
SCREAMING_SNAKE_CASE_ : Dict = [1 if mask_padding_with_zero else 0] * len(_SCREAMING_SNAKE_CASE )
# Zero-pad up to the sequence length.
SCREAMING_SNAKE_CASE_ : int = max_seq_length - len(_SCREAMING_SNAKE_CASE )
if pad_on_left:
SCREAMING_SNAKE_CASE_ : str = ([pad_token] * padding_length) + input_ids
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
SCREAMING_SNAKE_CASE_ : List[Any] = ([pad_token_segment_id] * padding_length) + segment_ids
SCREAMING_SNAKE_CASE_ : Optional[int] = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(_SCREAMING_SNAKE_CASE ) == max_seq_length
assert len(_SCREAMING_SNAKE_CASE ) == max_seq_length
assert len(_SCREAMING_SNAKE_CASE ) == max_seq_length
assert len(_SCREAMING_SNAKE_CASE ) == max_seq_length
if ex_index < 5:
logger.info('*** Example ***' )
logger.info('guid: %s' , example.guid )
logger.info('tokens: %s' , ' '.join([str(_SCREAMING_SNAKE_CASE ) for x in tokens] ) )
logger.info('input_ids: %s' , ' '.join([str(_SCREAMING_SNAKE_CASE ) for x in input_ids] ) )
logger.info('input_mask: %s' , ' '.join([str(_SCREAMING_SNAKE_CASE ) for x in input_mask] ) )
logger.info('segment_ids: %s' , ' '.join([str(_SCREAMING_SNAKE_CASE ) for x in segment_ids] ) )
logger.info('label_ids: %s' , ' '.join([str(_SCREAMING_SNAKE_CASE ) for x in label_ids] ) )
if "token_type_ids" not in tokenizer.model_input_names:
SCREAMING_SNAKE_CASE_ : Optional[int] = None
features.append(
InputFeatures(
input_ids=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , label_ids=_SCREAMING_SNAKE_CASE ) )
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class __a ( __A ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = 42
UpperCAmelCase__ : int = nn.CrossEntropyLoss().ignore_index
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__=False , UpperCamelCase__ = Split.train , ):
SCREAMING_SNAKE_CASE_ : Dict = os.path.join(
_SCREAMING_SNAKE_CASE , 'cached_{}_{}_{}'.format(mode.value , tokenizer.__class__.__name__ , str(_SCREAMING_SNAKE_CASE ) ) , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
SCREAMING_SNAKE_CASE_ : Any = cached_features_file + '.lock'
with FileLock(_SCREAMING_SNAKE_CASE ):
if os.path.exists(_SCREAMING_SNAKE_CASE ) and not overwrite_cache:
logger.info(F'''Loading features from cached file {cached_features_file}''' )
SCREAMING_SNAKE_CASE_ : Any = torch.load(_SCREAMING_SNAKE_CASE )
else:
logger.info(F'''Creating features from dataset file at {data_dir}''' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = token_classification_task.read_examples_from_file(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# TODO clean up all this to leverage built-in features of tokenizers
SCREAMING_SNAKE_CASE_ : Optional[int] = token_classification_task.convert_examples_to_features(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cls_token_at_end=bool(model_type in ['xlnet'] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['xlnet'] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=_SCREAMING_SNAKE_CASE , pad_on_left=bool(tokenizer.padding_side == 'left' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info(F'''Saving features into cached file {cached_features_file}''' )
torch.save(self.features , _SCREAMING_SNAKE_CASE )
def __len__( self ):
return len(self.features )
def __getitem__( self , UpperCamelCase__ ):
return self.features[i]
if is_tf_available():
import tensorflow as tf
class __a :
'''simple docstring'''
UpperCAmelCase__ : List[Any] = 42
UpperCAmelCase__ : List[str] = -100
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__=False , UpperCamelCase__ = Split.train , ):
SCREAMING_SNAKE_CASE_ : Optional[int] = token_classification_task.read_examples_from_file(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# TODO clean up all this to leverage built-in features of tokenizers
SCREAMING_SNAKE_CASE_ : Tuple = token_classification_task.convert_examples_to_features(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cls_token_at_end=bool(model_type in ['xlnet'] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['xlnet'] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=_SCREAMING_SNAKE_CASE , pad_on_left=bool(tokenizer.padding_side == 'left' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf.data.Dataset.from_generator(
_SCREAMING_SNAKE_CASE , ({'input_ids': tf.intaa, 'attention_mask': tf.intaa}, tf.intaa) , (
{'input_ids': tf.TensorShape([None] ), 'attention_mask': tf.TensorShape([None] )},
tf.TensorShape([None] ),
) , )
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf.data.Dataset.from_generator(
_SCREAMING_SNAKE_CASE , ({'input_ids': tf.intaa, 'attention_mask': tf.intaa, 'token_type_ids': tf.intaa}, tf.intaa) , (
{
'input_ids': tf.TensorShape([None] ),
'attention_mask': tf.TensorShape([None] ),
'token_type_ids': tf.TensorShape([None] ),
},
tf.TensorShape([None] ),
) , )
def __snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) )
return self.dataset
def __len__( self ):
return len(self.features )
def __getitem__( self , UpperCamelCase__ ):
return self.features[i] | 713 |
from timeit import timeit
def _lowerCamelCase( lowerCAmelCase__ : int ):
'''simple docstring'''
if number < 0:
raise ValueError('the value of input must not be negative' )
SCREAMING_SNAKE_CASE_ : Tuple = 0
while number:
number &= number - 1
result += 1
return result
def _lowerCamelCase( lowerCAmelCase__ : int ):
'''simple docstring'''
if number < 0:
raise ValueError('the value of input must not be negative' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def _lowerCamelCase( ):
'''simple docstring'''
def do_benchmark(lowerCAmelCase__ : int ) -> None:
SCREAMING_SNAKE_CASE_ : int = 'import __main__ as z'
print(F'''Benchmark when {number = }:''' )
print(F'''{get_set_bits_count_using_modulo_operator(lowerCAmelCase__ ) = }''' )
SCREAMING_SNAKE_CASE_ : int = timeit('z.get_set_bits_count_using_modulo_operator(25)' , setup=lowerCAmelCase__ )
print(F'''timeit() runs in {timing} seconds''' )
print(F'''{get_set_bits_count_using_brian_kernighans_algorithm(lowerCAmelCase__ ) = }''' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = timeit(
'z.get_set_bits_count_using_brian_kernighans_algorithm(25)' , setup=lowerCAmelCase__ , )
print(F'''timeit() runs in {timing} seconds''' )
for number in (25, 37, 58, 0):
do_benchmark(lowerCAmelCase__ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark() | 97 | 0 |
"""simple docstring"""
def A_ ( snake_case_ : int ):
'''simple docstring'''
UpperCamelCase : int = [1]
UpperCamelCase , UpperCamelCase , UpperCamelCase : Tuple = 0, 0, 0
UpperCamelCase : Tuple = ugly_nums[ia] * 2
UpperCamelCase : Dict = ugly_nums[ia] * 3
UpperCamelCase : int = ugly_nums[ia] * 5
for _ in range(1 ,snake_case_ ):
UpperCamelCase : Any = min(snake_case_ ,snake_case_ ,snake_case_ )
ugly_nums.append(snake_case_ )
if next_num == next_a:
ia += 1
UpperCamelCase : Any = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
UpperCamelCase : Optional[Any] = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
UpperCamelCase : Optional[int] = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(F'''{ugly_numbers(200) = }''')
| 499 |
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
__A : Union[str, Any] = logging.get_logger(__name__)
__A : List[str] = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
# See all LED models at https://huggingface.co/models?filter=LED
__A : Optional[int] = {
'''vocab_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''',
},
'''merges_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''',
},
}
__A : Optional[Any] = {
'''allenai/led-base-16384''': 16384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def A_ ( ):
'''simple docstring'''
UpperCamelCase : str = (
list(range(ord("""!""" ) ,ord("""~""" ) + 1 ) ) + list(range(ord("""ยก""" ) ,ord("""ยฌ""" ) + 1 ) ) + list(range(ord("""ยฎ""" ) ,ord("""รฟ""" ) + 1 ) )
)
UpperCamelCase : Any = bs[:]
UpperCamelCase : List[Any] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(snake_case_ )
cs.append(2**8 + n )
n += 1
UpperCamelCase : Any = [chr(snake_case_ ) for n in cs]
return dict(zip(snake_case_ ,snake_case_ ) )
def A_ ( snake_case_ : Dict ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = set()
UpperCamelCase : int = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCamelCase : List[Any] = char
return pairs
class lowerCamelCase ( _UpperCAmelCase ):
lowercase : Optional[int] = VOCAB_FILES_NAMES
lowercase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
lowercase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase : Union[str, Any] = ['input_ids', 'attention_mask']
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_="replace" , SCREAMING_SNAKE_CASE_="<s>" , SCREAMING_SNAKE_CASE_="</s>" , SCREAMING_SNAKE_CASE_="</s>" , SCREAMING_SNAKE_CASE_="<s>" , SCREAMING_SNAKE_CASE_="<unk>" , SCREAMING_SNAKE_CASE_="<pad>" , SCREAMING_SNAKE_CASE_="<mask>" , SCREAMING_SNAKE_CASE_=False , **SCREAMING_SNAKE_CASE_ , ):
UpperCamelCase : Union[str, Any] = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else bos_token
UpperCamelCase : int = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else eos_token
UpperCamelCase : Optional[int] = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else sep_token
UpperCamelCase : Tuple = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else cls_token
UpperCamelCase : Optional[int] = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else unk_token
UpperCamelCase : Optional[Any] = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCamelCase : Union[str, Any] = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else mask_token
super().__init__(
errors=SCREAMING_SNAKE_CASE_ , bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
with open(SCREAMING_SNAKE_CASE_ , encoding="""utf-8""" ) as vocab_handle:
UpperCamelCase : List[Any] = json.load(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = {v: k for k, v in self.encoder.items()}
UpperCamelCase : str = errors # how to handle errors in decoding
UpperCamelCase : Any = bytes_to_unicode()
UpperCamelCase : int = {v: k for k, v in self.byte_encoder.items()}
with open(SCREAMING_SNAKE_CASE_ , encoding="""utf-8""" ) as merges_handle:
UpperCamelCase : Tuple = merges_handle.read().split("""\n""" )[1:-1]
UpperCamelCase : Dict = [tuple(merge.split() ) for merge in bpe_merges]
UpperCamelCase : Optional[Any] = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
UpperCamelCase : Dict = {}
UpperCamelCase : Optional[Any] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
UpperCamelCase : Union[str, Any] = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def a_ ( self ):
return len(self.encoder )
def a_ ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def a_ ( self , SCREAMING_SNAKE_CASE_ ):
if token in self.cache:
return self.cache[token]
UpperCamelCase : Any = tuple(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = get_pairs(SCREAMING_SNAKE_CASE_ )
if not pairs:
return token
while True:
UpperCamelCase : Union[str, Any] = min(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : self.bpe_ranks.get(SCREAMING_SNAKE_CASE_ , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
UpperCamelCase , UpperCamelCase : Tuple = bigram
UpperCamelCase : Union[str, Any] = []
UpperCamelCase : List[Any] = 0
while i < len(SCREAMING_SNAKE_CASE_ ):
try:
UpperCamelCase : int = word.index(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCamelCase : Dict = j
if word[i] == first and i < len(SCREAMING_SNAKE_CASE_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCamelCase : List[Any] = tuple(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = new_word
if len(SCREAMING_SNAKE_CASE_ ) == 1:
break
else:
UpperCamelCase : Optional[int] = get_pairs(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = """ """.join(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = word
return word
def a_ ( self , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Dict = []
for token in re.findall(self.pat , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Dict = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(SCREAMING_SNAKE_CASE_ ).split(""" """ ) )
return bpe_tokens
def a_ ( self , SCREAMING_SNAKE_CASE_ ):
return self.encoder.get(SCREAMING_SNAKE_CASE_ , self.encoder.get(self.unk_token ) )
def a_ ( self , SCREAMING_SNAKE_CASE_ ):
return self.decoder.get(SCREAMING_SNAKE_CASE_ )
def a_ ( self , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Any = """""".join(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors )
return text
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ):
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
UpperCamelCase : Any = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCamelCase : List[Any] = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(SCREAMING_SNAKE_CASE_ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=SCREAMING_SNAKE_CASE_ , ensure_ascii=SCREAMING_SNAKE_CASE_ ) + """\n""" )
UpperCamelCase : Optional[int] = 0
with open(SCREAMING_SNAKE_CASE_ , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda SCREAMING_SNAKE_CASE_ : kv[1] ):
if index != token_index:
logger.warning(
f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
""" Please check that the tokenizer is not corrupted!""" )
UpperCamelCase : Optional[int] = token_index
writer.write(""" """.join(SCREAMING_SNAKE_CASE_ ) + """\n""" )
index += 1
return vocab_file, merge_file
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCamelCase : Dict = [self.cls_token_id]
UpperCamelCase : List[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE_ , token_ids_a=SCREAMING_SNAKE_CASE_ , already_has_special_tokens=SCREAMING_SNAKE_CASE_ )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ):
UpperCamelCase : Tuple = [self.sep_token_id]
UpperCamelCase : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False , **SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Union[str, Any] = kwargs.pop("""add_prefix_space""" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(SCREAMING_SNAKE_CASE_ ) > 0 and not text[0].isspace()):
UpperCamelCase : List[Any] = """ """ + text
return (text, kwargs)
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = PaddingStrategy.DO_NOT_PAD , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , ):
UpperCamelCase : Any = super()._pad(
encoded_inputs=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , padding_strategy=SCREAMING_SNAKE_CASE_ , pad_to_multiple_of=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , )
# Load from model defaults
if return_attention_mask is None:
UpperCamelCase : Optional[Any] = """attention_mask""" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
UpperCamelCase : List[Any] = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
UpperCamelCase : List[Any] = len(encoded_inputs["""global_attention_mask"""] ) != len(SCREAMING_SNAKE_CASE_ )
if needs_to_be_padded:
UpperCamelCase : Any = len(SCREAMING_SNAKE_CASE_ ) - len(encoded_inputs["""global_attention_mask"""] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
UpperCamelCase : Tuple = (
encoded_inputs["""global_attention_mask"""] + [-1] * difference
)
elif self.padding_side == "left":
UpperCamelCase : Optional[Any] = [-1] * difference + encoded_inputs[
"""global_attention_mask"""
]
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) )
return encoded_inputs
| 499 | 1 |
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
UpperCAmelCase__ = "src/transformers"
UpperCAmelCase__ = "docs/source/en/tasks"
def A ( _UpperCAmelCase : int , _UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any] ) -> int:
'''simple docstring'''
with open(_UpperCAmelCase , 'r' , encoding='utf-8' , newline='\n' ) as f:
_UpperCAmelCase = f.readlines()
# Find the start prompt.
_UpperCAmelCase = 0
while not lines[start_index].startswith(_UpperCAmelCase ):
start_index += 1
start_index += 1
_UpperCAmelCase = start_index
while not lines[end_index].startswith(_UpperCAmelCase ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
UpperCAmelCase__ = direct_transformers_import(TRANSFORMERS_PATH)
UpperCAmelCase__ = {
"asr.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
"audio_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
"language_modeling.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
"image_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
"masked_language_modeling.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
"multiple_choice.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
"object_detection.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
"question_answering.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
"semantic_segmentation.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
"sequence_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
"summarization.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
"token_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
"translation.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
"video_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
"document_question_answering.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
"monocular_depth_estimation.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
UpperCAmelCase__ = {
"summarization.md": ("nllb",),
"translation.md": ("nllb",),
}
def A ( _UpperCAmelCase : str ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase = TASK_GUIDE_TO_MODELS[task_guide]
_UpperCAmelCase = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(_UpperCAmelCase , set() )
_UpperCAmelCase = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([F"[{name}](../model_doc/{code})" for code, name in model_names.items()] ) + "\n"
def A ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[str]=False ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = _find_text_in_file(
filename=os.path.join(_UpperCAmelCase , _UpperCAmelCase ) , start_prompt='<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->' , end_prompt='<!--End of the generated tip-->' , )
_UpperCAmelCase = get_model_list_for_task(_UpperCAmelCase )
if current_list != new_list:
if overwrite:
with open(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
F"The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`"
' to fix this.' )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
UpperCAmelCase__ = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 709 |
def A ( _UpperCAmelCase : int , _UpperCAmelCase : int ) -> str:
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError('the value of both inputs must be positive' )
_UpperCAmelCase = str(bin(_UpperCAmelCase ) )[2:] # remove the leading "0b"
_UpperCAmelCase = str(bin(_UpperCAmelCase ) )[2:] # remove the leading "0b"
_UpperCAmelCase = max(len(_UpperCAmelCase ) , len(_UpperCAmelCase ) )
return "0b" + "".join(
str(int(char_a != char_b ) )
for char_a, char_b in zip(a_binary.zfill(_UpperCAmelCase ) , b_binary.zfill(_UpperCAmelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 639 | 0 |
from __future__ import annotations
from typing import TypedDict
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = 42
UpperCAmelCase = 42
def _snake_case ( __snake_case ):
if not isinstance(__snake_case , __snake_case ):
raise TypeError('''The parameter s type must be str.''' )
return [s[i:] + s[:i] for i in range(len(__snake_case ) )]
def _snake_case ( __snake_case ):
if not isinstance(__snake_case , __snake_case ):
raise TypeError('''The parameter s type must be str.''' )
if not s:
raise ValueError('''The parameter s must not be empty.''' )
_UpperCamelCase = all_rotations(__snake_case )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
_UpperCamelCase = {
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(__snake_case ),
}
return response
def _snake_case ( __snake_case , __snake_case ):
if not isinstance(__snake_case , __snake_case ):
raise TypeError('''The parameter bwt_string type must be str.''' )
if not bwt_string:
raise ValueError('''The parameter bwt_string must not be empty.''' )
try:
_UpperCamelCase = int(__snake_case )
except ValueError:
raise TypeError(
'''The parameter idx_original_string type must be int or passive'''
''' of cast to int.''' )
if idx_original_string < 0:
raise ValueError('''The parameter idx_original_string must not be lower than 0.''' )
if idx_original_string >= len(__snake_case ):
raise ValueError(
'''The parameter idx_original_string must be lower than''' ''' len(bwt_string).''' )
_UpperCamelCase = [''''''] * len(__snake_case )
for _ in range(len(__snake_case ) ):
for i in range(len(__snake_case ) ):
_UpperCamelCase = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
_lowerCAmelCase = "Provide a string that I will generate its BWT transform: "
_lowerCAmelCase = input(entry_msg).strip()
_lowerCAmelCase = bwt_transform(s)
print(
f'Burrows Wheeler transform for string \'{s}\' results '
f'in \'{result["bwt_string"]}\''
)
_lowerCAmelCase = reverse_bwt(result["bwt_string"], result["idx_original_string"])
print(
f'Reversing Burrows Wheeler transform for entry \'{result["bwt_string"]}\' '
f'we get original string \'{original_string}\''
)
| 10 | import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class lowerCAmelCase_ ( __lowercase, __lowercase, __lowercase, unittest.TestCase ):
UpperCAmelCase = StableUnCLIPPipeline
UpperCAmelCase = TEXT_TO_IMAGE_PARAMS
UpperCAmelCase = TEXT_TO_IMAGE_BATCH_PARAMS
UpperCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
UpperCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
UpperCAmelCase = False
def UpperCamelCase_ ( self : Optional[int] ):
_UpperCamelCase = 32
_UpperCamelCase = embedder_hidden_size
# prior components
torch.manual_seed(0 )
_UpperCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
_UpperCamelCase = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_A , projection_dim=_A , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
_UpperCamelCase = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=_A , num_layers=1 , )
torch.manual_seed(0 )
_UpperCamelCase = DDPMScheduler(
variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=1000 , clip_sample=_A , clip_sample_range=5.0 , beta_schedule='''squaredcos_cap_v2''' , )
# regular denoising components
torch.manual_seed(0 )
_UpperCamelCase = StableUnCLIPImageNormalizer(embedding_dim=_A )
_UpperCamelCase = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' )
torch.manual_seed(0 )
_UpperCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
_UpperCamelCase = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_A , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
_UpperCamelCase = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='''projection''' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=_A , layers_per_block=1 , upcast_attention=_A , use_linear_projection=_A , )
torch.manual_seed(0 )
_UpperCamelCase = DDIMScheduler(
beta_schedule='''scaled_linear''' , beta_start=0.0_0085 , beta_end=0.012 , prediction_type='''v_prediction''' , set_alpha_to_one=_A , steps_offset=1 , )
torch.manual_seed(0 )
_UpperCamelCase = AutoencoderKL()
_UpperCamelCase = {
# prior components
'''prior_tokenizer''': prior_tokenizer,
'''prior_text_encoder''': prior_text_encoder,
'''prior''': prior,
'''prior_scheduler''': prior_scheduler,
# image noising components
'''image_normalizer''': image_normalizer,
'''image_noising_scheduler''': image_noising_scheduler,
# regular denoising components
'''tokenizer''': tokenizer,
'''text_encoder''': text_encoder,
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
}
return components
def UpperCamelCase_ ( self : Dict , _A : Tuple , _A : Dict=0 ):
if str(_A ).startswith('''mps''' ):
_UpperCamelCase = torch.manual_seed(_A )
else:
_UpperCamelCase = torch.Generator(device=_A ).manual_seed(_A )
_UpperCamelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''prior_num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def UpperCamelCase_ ( self : Dict ):
_UpperCamelCase = torch_device == '''cpu'''
self._test_attention_slicing_forward_pass(test_max_difference=_A )
def UpperCamelCase_ ( self : List[Any] ):
_UpperCamelCase = torch_device in ['''cpu''', '''mps''']
self._test_inference_batch_single_identical(test_max_difference=_A )
@slow
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
def UpperCamelCase_ ( self : Optional[Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self : List[str] ):
_UpperCamelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy''' )
_UpperCamelCase = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
_UpperCamelCase = torch.Generator(device='''cpu''' ).manual_seed(0 )
_UpperCamelCase = pipe('''anime turle''' , generator=_A , output_type='''np''' )
_UpperCamelCase = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_A , _A )
def UpperCamelCase_ ( self : Optional[Any] ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_UpperCamelCase = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
_UpperCamelCase = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
_UpperCamelCase = pipe(
'''anime turtle''' , prior_num_inference_steps=2 , num_inference_steps=2 , output_type='''np''' , )
_UpperCamelCase = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 10 | 1 |
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(_lowercase , int(b / 2 ) ) * actual_power(_lowercase , int(b / 2 ) )
else:
return a * actual_power(_lowercase , int(b / 2 ) ) * actual_power(_lowercase , int(b / 2 ) )
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
if b < 0:
return 1 / actual_power(_lowercase , _lowercase )
return actual_power(_lowercase , _lowercase )
if __name__ == "__main__":
print(power(-2, -3)) | 300 |
import re
import tempfile
from pathlib import Path
import pytest
import yaml
from datasets.utils.readme import ReadMe
# @pytest.fixture
# def example_yaml_structure():
__a = yaml.safe_load(
'\\nname: ""\nallow_empty: false\nallow_empty_text: true\nsubsections:\n - name: "Dataset Card for X" # First-level markdown heading\n allow_empty: false\n allow_empty_text: true\n subsections:\n - name: "Table of Contents"\n allow_empty: false\n allow_empty_text: false\n subsections: null\n - name: "Dataset Description"\n allow_empty: false\n allow_empty_text: false\n subsections:\n - name: "Dataset Summary"\n allow_empty: false\n allow_empty_text: false\n subsections: null\n - name: "Supported Tasks and Leaderboards"\n allow_empty: true\n allow_empty_text: true\n subsections: null\n - name: Languages\n allow_empty: false\n allow_empty_text: true\n subsections: null\n'
)
__a = {
'name': 'root',
'text': '',
'is_empty_text': True,
'subsections': [
{
'name': 'Dataset Card for My Dataset',
'text': '',
'is_empty_text': True,
'subsections': [
{'name': 'Table of Contents', 'text': 'Some text here.', 'is_empty_text': False, 'subsections': []},
{
'name': 'Dataset Description',
'text': 'Some text here.',
'is_empty_text': False,
'subsections': [
{
'name': 'Dataset Summary',
'text': 'Some text here.',
'is_empty_text': False,
'subsections': [],
},
{
'name': 'Supported Tasks and Leaderboards',
'text': '',
'is_empty_text': True,
'subsections': [],
},
{'name': 'Languages', 'text': 'Language Text', 'is_empty_text': False, 'subsections': []},
],
},
],
}
],
}
__a = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
__a = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n#### Extra Ignored Subsection\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
__a = {
'name': 'root',
'text': '',
'is_empty_text': True,
'subsections': [
{
'name': 'Dataset Card for My Dataset',
'text': '',
'is_empty_text': True,
'subsections': [
{'name': 'Table of Contents', 'text': 'Some text here.', 'is_empty_text': False, 'subsections': []},
{
'name': 'Dataset Description',
'text': 'Some text here.',
'is_empty_text': False,
'subsections': [
{
'name': 'Dataset Summary',
'text': 'Some text here.',
'is_empty_text': False,
'subsections': [
{
'name': 'Extra Ignored Subsection',
'text': '',
'is_empty_text': True,
'subsections': [],
}
],
},
{
'name': 'Supported Tasks and Leaderboards',
'text': '',
'is_empty_text': True,
'subsections': [],
},
{'name': 'Languages', 'text': 'Language Text', 'is_empty_text': False, 'subsections': []},
],
},
],
}
],
}
__a = '\\n---\n---\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
__a = (
'The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README.'
)
__a = '\\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
__a = (
'The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README.'
)
__a = '\\n---\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
__a = 'The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README.'
__a = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
__a = 'The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored).'
__a = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n'
__a = 'The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found \'None\'.'
__a = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Languages\nLanguage Text\n'
__a = 'The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`.'
__a = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\n'
__a = 'The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty.'
__a = '\\n---\nlanguage:\n- zh\n- en\n---\n\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
__a = 'The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.'
__a = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n# Dataset Card My Dataset\n'
__a = 'The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README.'
__a = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
__a = 'The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README.'
__a = ''
__a = 'The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README.'
__a = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
__a = 'The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections.'
@pytest.mark.parametrize(
'''readme_md, expected_dict''' , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
assert ReadMe.from_string(_lowercase , _lowercase ).to_dict() == expected_dict
@pytest.mark.parametrize(
'''readme_md, expected_error''' , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
with pytest.raises(_lowercase , match=re.escape(expected_error.format(path='''root''' ) ) ):
UpperCAmelCase_ : Union[str, Any] = ReadMe.from_string(_lowercase , _lowercase )
readme.validate()
@pytest.mark.parametrize(
'''readme_md, expected_error''' , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
with pytest.raises(_lowercase , match=re.escape(expected_error.format(path='''root''' ) ) ):
ReadMe.from_string(_lowercase , _lowercase )
@pytest.mark.parametrize(
'''readme_md,''' , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
ReadMe.from_string(_lowercase , _lowercase , suppress_parsing_errors=_lowercase )
@pytest.mark.parametrize(
'''readme_md, expected_dict''' , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase_ : Dict = Path(_lowercase ) / '''README.md'''
with open(_lowercase , '''w+''' ) as readme_file:
readme_file.write(_lowercase )
UpperCAmelCase_ : Optional[int] = ReadMe.from_readme(_lowercase , _lowercase ).to_dict()
assert out["name"] == path
assert out["text"] == ""
assert out["is_empty_text"]
assert out["subsections"] == expected_dict["subsections"]
@pytest.mark.parametrize(
'''readme_md, expected_error''' , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase_ : Optional[int] = Path(_lowercase ) / '''README.md'''
with open(_lowercase , '''w+''' ) as readme_file:
readme_file.write(_lowercase )
UpperCAmelCase_ : List[Any] = expected_error.format(path=_lowercase )
with pytest.raises(_lowercase , match=re.escape(_lowercase ) ):
UpperCAmelCase_ : Any = ReadMe.from_readme(_lowercase , _lowercase )
readme.validate()
@pytest.mark.parametrize(
'''readme_md, expected_error''' , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase_ : List[Any] = Path(_lowercase ) / '''README.md'''
with open(_lowercase , '''w+''' ) as readme_file:
readme_file.write(_lowercase )
UpperCAmelCase_ : List[str] = expected_error.format(path=_lowercase )
with pytest.raises(_lowercase , match=re.escape(_lowercase ) ):
ReadMe.from_readme(_lowercase , _lowercase )
@pytest.mark.parametrize(
'''readme_md,''' , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase_ : Dict = Path(_lowercase ) / '''README.md'''
with open(_lowercase , '''w+''' ) as readme_file:
readme_file.write(_lowercase )
ReadMe.from_readme(_lowercase , _lowercase , suppress_parsing_errors=_lowercase ) | 300 | 1 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
def __init__( self : List[str] ):
__lowercase = []
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : List[str] ,lowercase__ : Tuple ,lowercase__ : str ,**lowercase__ : Any ):
self.events.append('''on_init_end''' )
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : List[str] ,lowercase__ : Optional[Any] ,lowercase__ : int ,**lowercase__ : Optional[int] ):
self.events.append('''on_train_begin''' )
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : Tuple ,lowercase__ : int ,lowercase__ : int ,**lowercase__ : List[str] ):
self.events.append('''on_train_end''' )
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : Any ,lowercase__ : Union[str, Any] ,lowercase__ : Any ,**lowercase__ : Optional[Any] ):
self.events.append('''on_epoch_begin''' )
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : Optional[Any] ,lowercase__ : int ,lowercase__ : Any ,**lowercase__ : Optional[int] ):
self.events.append('''on_epoch_end''' )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : List[str] ,lowercase__ : str ,lowercase__ : List[str] ,**lowercase__ : List[str] ):
self.events.append('''on_step_begin''' )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : Union[str, Any] ,lowercase__ : int ,lowercase__ : Optional[int] ,**lowercase__ : Dict ):
self.events.append('''on_step_end''' )
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : Any ,lowercase__ : Tuple ,lowercase__ : Union[str, Any] ,**lowercase__ : Any ):
self.events.append('''on_evaluate''' )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : str ,lowercase__ : Union[str, Any] ,lowercase__ : int ,**lowercase__ : Optional[Any] ):
self.events.append('''on_predict''' )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : List[str] ,lowercase__ : Union[str, Any] ,lowercase__ : Optional[Any] ,**lowercase__ : int ):
self.events.append('''on_save''' )
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : List[str] ,lowercase__ : Tuple ,lowercase__ : List[str] ,**lowercase__ : List[str] ):
self.events.append('''on_log''' )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : str ,lowercase__ : int ,lowercase__ : Dict ,**lowercase__ : str ):
self.events.append('''on_prediction_step''' )
@require_torch
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase = tempfile.mkdtemp()
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
shutil.rmtree(self.output_dir )
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : Optional[Any]=0 ,lowercase__ : Any=0 ,lowercase__ : Tuple=6_4 ,lowercase__ : Optional[int]=6_4 ,lowercase__ : Optional[Any]=None ,lowercase__ : str=False ,**lowercase__ : Any ):
# disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure
# its set to False since the tests later on depend on its value.
__lowercase = RegressionDataset(length=lowercase__ )
__lowercase = RegressionDataset(length=lowercase__ )
__lowercase = RegressionModelConfig(a=lowercase__ ,b=lowercase__ )
__lowercase = RegressionPreTrainedModel(lowercase__ )
__lowercase = TrainingArguments(self.output_dir ,disable_tqdm=lowercase__ ,report_to=[] ,**lowercase__ )
return Trainer(
lowercase__ ,lowercase__ ,train_dataset=lowercase__ ,eval_dataset=lowercase__ ,callbacks=lowercase__ ,)
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : Optional[int] ,lowercase__ : Any ):
self.assertEqual(len(lowercase__ ) ,len(lowercase__ ) )
# Order doesn't matter
__lowercase = sorted(lowercase__ ,key=lambda lowercase__ : cb.__name__ if isinstance(lowercase__ ,lowercase__ ) else cb.__class__.__name__ )
__lowercase = sorted(lowercase__ ,key=lambda lowercase__ : cb.__name__ if isinstance(lowercase__ ,lowercase__ ) else cb.__class__.__name__ )
for cba, cba in zip(lowercase__ ,lowercase__ ):
if isinstance(lowercase__ ,lowercase__ ) and isinstance(lowercase__ ,lowercase__ ):
self.assertEqual(lowercase__ ,lowercase__ )
elif isinstance(lowercase__ ,lowercase__ ) and not isinstance(lowercase__ ,lowercase__ ):
self.assertEqual(lowercase__ ,cba.__class__ )
elif not isinstance(lowercase__ ,lowercase__ ) and isinstance(lowercase__ ,lowercase__ ):
self.assertEqual(cba.__class__ ,lowercase__ )
else:
self.assertEqual(lowercase__ ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : Union[str, Any] ):
__lowercase = ['''on_init_end''', '''on_train_begin''']
__lowercase = 0
__lowercase = len(trainer.get_eval_dataloader() )
__lowercase = ['''on_prediction_step'''] * len(trainer.get_eval_dataloader() ) + ['''on_log''', '''on_evaluate''']
for _ in range(trainer.state.num_train_epochs ):
expected_events.append('''on_epoch_begin''' )
for _ in range(lowercase__ ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append('''on_log''' )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append('''on_save''' )
expected_events.append('''on_epoch_end''' )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def SCREAMING_SNAKE_CASE ( self : str ):
__lowercase = self.get_trainer()
__lowercase = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
# Callbacks passed at init are added to the default callbacks
__lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(lowercase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
__lowercase = self.get_trainer(disable_tqdm=lowercase__ )
__lowercase = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
__lowercase = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
__lowercase = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(lowercase__ )
expected_callbacks.remove(lowercase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
__lowercase = self.get_trainer()
__lowercase = trainer.pop_callback(lowercase__ )
self.assertEqual(cb.__class__ ,lowercase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
trainer.add_callback(lowercase__ )
expected_callbacks.insert(0 ,lowercase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
# We can also add, pop, or remove by instance
__lowercase = self.get_trainer()
__lowercase = trainer.callback_handler.callbacks[0]
trainer.remove_callback(lowercase__ )
expected_callbacks.remove(lowercase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
__lowercase = self.get_trainer()
__lowercase = trainer.callback_handler.callbacks[0]
__lowercase = trainer.pop_callback(lowercase__ )
self.assertEqual(lowercase__ ,lowercase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
trainer.add_callback(lowercase__ )
expected_callbacks.insert(0 ,lowercase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Dict ):
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action='''ignore''' ,category=lowercase__ )
__lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
__lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase__ ,self.get_expected_events(lowercase__ ) )
# Independent log/save/eval
__lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] ,logging_steps=5 )
trainer.train()
__lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase__ ,self.get_expected_events(lowercase__ ) )
__lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] ,save_steps=5 )
trainer.train()
__lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase__ ,self.get_expected_events(lowercase__ ) )
__lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] ,eval_steps=5 ,evaluation_strategy='''steps''' )
trainer.train()
__lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase__ ,self.get_expected_events(lowercase__ ) )
__lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] ,evaluation_strategy='''epoch''' )
trainer.train()
__lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase__ ,self.get_expected_events(lowercase__ ) )
# A bit of everything
__lowercase = self.get_trainer(
callbacks=[MyTestTrainerCallback] ,logging_steps=3 ,save_steps=1_0 ,eval_steps=5 ,evaluation_strategy='''steps''' ,)
trainer.train()
__lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase__ ,self.get_expected_events(lowercase__ ) )
# warning should be emitted for duplicated callbacks
with patch('''transformers.trainer_callback.logger.warning''' ) as warn_mock:
__lowercase = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] ,)
assert str(lowercase__ ) in warn_mock.call_args[0][0]
| 41 |
'''simple docstring'''
import doctest
from collections import deque
import numpy as np
class lowercase_ :
"""simple docstring"""
def __init__( self : Optional[Any] ):
__lowercase = [2, 1, 2, -1]
__lowercase = [1, 2, 3, 4]
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = len(self.first_signal )
__lowercase = len(self.second_signal )
__lowercase = max(lowercase__ ,lowercase__ )
# create a zero matrix of max_length x max_length
__lowercase = [[0] * max_length for i in range(lowercase__ )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(lowercase__ ):
__lowercase = deque(self.second_signal )
rotated_signal.rotate(lowercase__ )
for j, item in enumerate(lowercase__ ):
matrix[i][j] += item
# multiply the matrix with the first signal
__lowercase = np.matmul(np.transpose(lowercase__ ) ,np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(lowercase__ ,2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 41 | 1 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
a_ = logging.get_logger(__name__)
@dataclass
class UpperCAmelCase_ ( snake_case__ ):
UpperCAmelCase_ = [
"""no_inference""",
"""no_cuda""",
"""no_tpu""",
"""no_speed""",
"""no_memory""",
"""no_env_print""",
"""no_multi_process""",
]
def __init__( self , **lowercase_):
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
snake_case_ : Optional[Any] = deprecated_arg[3:]
setattr(self , lowercase_ , not kwargs.pop(lowercase_))
logger.warning(
F'{deprecated_arg} is depreciated. Please use --no_{positive_arg} or'
F' {positive_arg}={kwargs[positive_arg]}')
snake_case_ : List[str] = kwargs.pop("torchscript" , self.torchscript)
snake_case_ : List[str] = kwargs.pop("torch_xla_tpu_print_metrics" , self.torch_xla_tpu_print_metrics)
snake_case_ : Optional[int] = kwargs.pop("fp16_opt_level" , self.fpaa_opt_level)
super().__init__(**lowercase_)
UpperCAmelCase_ = field(default=snake_case__ , metadata={"""help""": """Trace the models using torchscript"""} )
UpperCAmelCase_ = field(default=snake_case__ , metadata={"""help""": """Print Xla/PyTorch tpu metrics"""} )
UpperCAmelCase_ = field(
default="""O1""" , metadata={
"""help""": (
"""For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']. """
"""See details at https://nvidia.github.io/apex/amp.html"""
)
} , )
@cached_property
def snake_case__ ( self):
requires_backends(self , ["torch"])
logger.info("PyTorch: setting up devices")
if not self.cuda:
snake_case_ : int = torch.device("cpu")
snake_case_ : Union[str, Any] = 0
elif is_torch_tpu_available():
snake_case_ : List[str] = xm.xla_device()
snake_case_ : Tuple = 0
else:
snake_case_ : Tuple = torch.device("cuda" if torch.cuda.is_available() else "cpu")
snake_case_ : Optional[int] = torch.cuda.device_count()
return device, n_gpu
@property
def snake_case__ ( self):
return is_torch_tpu_available() and self.tpu
@property
def snake_case__ ( self):
requires_backends(self , ["torch"])
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def snake_case__ ( self):
requires_backends(self , ["torch"])
return self._setup_devices[0]
@property
def snake_case__ ( self):
requires_backends(self , ["torch"])
return self._setup_devices[1]
@property
def snake_case__ ( self):
return self.n_gpu > 0
| 92 |
'''simple docstring'''
from __future__ import annotations
from cmath import sqrt
def UpperCamelCase_ ( __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if a == 0:
raise ValueError("Coefficient 'a' must not be zero." )
snake_case_ : Union[str, Any] = b * b - 4 * a * c
snake_case_ : List[str] = (-b + sqrt(__SCREAMING_SNAKE_CASE )) / (2 * a)
snake_case_ : int = (-b - sqrt(__SCREAMING_SNAKE_CASE )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def UpperCamelCase_ ( ):
"""simple docstring"""
snake_case_ , snake_case_ : str = quadratic_roots(a=5, b=6, c=1 )
print(f'The solutions are: {solutiona} and {solutiona}' )
if __name__ == "__main__":
main()
| 92 | 1 |
'''simple docstring'''
from graphs.minimum_spanning_tree_kruskal import kruskal
def _UpperCamelCase ( ):
"""simple docstring"""
__magic_name__ : Optional[int] = 9
__magic_name__ : Tuple = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
__magic_name__ : List[str] = kruskal(UpperCamelCase__ , UpperCamelCase__ )
__magic_name__ : List[Any] = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
assert sorted(UpperCamelCase__ ) == sorted(UpperCamelCase__ ) | 436 |
'''simple docstring'''
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
return "\n".join(
F"""{number} * {i} = {number * i}""" for i in range(1 , number_of_terms + 1 ) )
if __name__ == "__main__":
print(multiplication_table(number=5, number_of_terms=10)) | 436 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFXLMRobertaModel
@require_tf
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__( unittest.TestCase ):
@slow
def snake_case__ ( self ) -> Optional[Any]:
A__ = TFXLMRobertaModel.from_pretrained('jplu/tf-xlm-roberta-base' )
A__ = {
'input_ids': tf.convert_to_tensor([[0, 26_46, 1_02_69, 83, 9_99_42, 2]] ,dtype=tf.intaa ), # "My dog is cute"
'attention_mask': tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]] ,dtype=tf.intaa ),
}
A__ = model(__UpperCAmelCase )['last_hidden_state']
A__ = tf.TensorShape((1, 6, 7_68) )
self.assertEqual(output.shape ,__UpperCAmelCase )
# compare the actual values for a slice.
A__ = tf.convert_to_tensor(
[
[
[0.0_6_8_1_7_6_2, 0.1_0_8_9_4_4_5_1, 0.0_6_7_7_2_5_0_4],
[-0.0_6_4_2_3_6_6_8, 0.0_2_3_6_6_6_1_5, 0.0_4_3_2_9_3_4_4],
[-0.0_6_0_5_7_2_9_5, 0.0_9_9_7_4_1_3_5, -0.0_0_0_7_0_5_8_4],
]
] ,dtype=tf.floataa ,)
self.assertTrue(np.allclose(output[:, :3, :3].numpy() ,expected_slice.numpy() ,atol=1e-4 ) )
| 536 | """simple docstring"""
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class UpperCamelCase__( __A ):
def snake_case__ ( self ,__UpperCAmelCase ) -> float:
return 0.0
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
A__ = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
A__ = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
A__ = 512
A__ = [1] + [0] * (size - 1)
A__ = [filter_type.process(UpperCamelCase__ ) for item in inputs]
A__ = [0] * (samplerate - size) # zero-padding
outputs += filler
A__ = np.abs(np.fft.fft(UpperCamelCase__ ) )
A__ = 20 * np.logaa(UpperCamelCase__ )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel('Frequency (Hz)' )
plt.xscale('log' )
# Display within reasonable bounds
A__ = get_bounds(UpperCamelCase__ , UpperCamelCase__ )
plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) )
plt.ylabel('Gain (dB)' )
plt.plot(UpperCamelCase__ )
plt.show()
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
A__ = 512
A__ = [1] + [0] * (size - 1)
A__ = [filter_type.process(UpperCamelCase__ ) for item in inputs]
A__ = [0] * (samplerate - size) # zero-padding
outputs += filler
A__ = np.angle(np.fft.fft(UpperCamelCase__ ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel('Frequency (Hz)' )
plt.xscale('log' )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel('Phase shift (Radians)' )
plt.plot(np.unwrap(UpperCamelCase__ , -2 * pi ) )
plt.show()
| 536 | 1 |
from __future__ import annotations
import time
SCREAMING_SNAKE_CASE = list[tuple[int, int]]
SCREAMING_SNAKE_CASE = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
SCREAMING_SNAKE_CASE = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class __UpperCAmelCase :
"""simple docstring"""
def __init__( self , __A , __A , __A , __A , __A ):
__a = pos_x
__a = pos_y
__a = (pos_y, pos_x)
__a = goal_x
__a = goal_y
__a = parent
class __UpperCAmelCase :
"""simple docstring"""
def __init__( self , __A , __A ):
__a = Node(start[1] , start[0] , goal[1] , goal[0] , __A )
__a = Node(goal[1] , goal[0] , goal[1] , goal[0] , __A )
__a = [self.start]
__a = False
def snake_case_ ( self ):
while self.node_queue:
__a = self.node_queue.pop(0 )
if current_node.pos == self.target.pos:
__a = True
return self.retrace_path(__A )
__a = self.get_successors(__A )
for node in successors:
self.node_queue.append(__A )
if not self.reached:
return [self.start.pos]
return None
def snake_case_ ( self , __A ):
__a = []
for action in delta:
__a = parent.pos_x + action[1]
__a = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(__A ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(__A , __A , self.target.pos_y , self.target.pos_x , __A ) )
return successors
def snake_case_ ( self , __A ):
__a = node
__a = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
__a = current_node.parent
path.reverse()
return path
class __UpperCAmelCase :
"""simple docstring"""
def __init__( self , __A , __A ):
__a = BreadthFirstSearch(__A , __A )
__a = BreadthFirstSearch(__A , __A )
__a = False
def snake_case_ ( self ):
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
__a = self.fwd_bfs.node_queue.pop(0 )
__a = self.bwd_bfs.node_queue.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
__a = True
return self.retrace_bidirectional_path(
__A , __A )
__a = current_bwd_node
__a = current_fwd_node
__a = {
self.fwd_bfs: self.fwd_bfs.get_successors(__A ),
self.bwd_bfs: self.bwd_bfs.get_successors(__A ),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(__A )
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def snake_case_ ( self , __A , __A ):
__a = self.fwd_bfs.retrace_path(__A )
__a = self.bwd_bfs.retrace_path(__A )
bwd_path.pop()
bwd_path.reverse()
__a = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE = (0, 0)
SCREAMING_SNAKE_CASE = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
SCREAMING_SNAKE_CASE = time.time()
SCREAMING_SNAKE_CASE = BreadthFirstSearch(init, goal)
SCREAMING_SNAKE_CASE = bfs.search()
SCREAMING_SNAKE_CASE = time.time() - start_bfs_time
print('Unidirectional BFS computation time : ', bfs_time)
SCREAMING_SNAKE_CASE = time.time()
SCREAMING_SNAKE_CASE = BidirectionalBreadthFirstSearch(init, goal)
SCREAMING_SNAKE_CASE = bd_bfs.search()
SCREAMING_SNAKE_CASE = time.time() - start_bd_bfs_time
print('Bidirectional BFS computation time : ', bd_bfs_time)
| 99 | from pathlib import Path
import fire
def lowerCAmelCase_ ( lowercase: str , lowercase: str , lowercase: int ) -> int:
'''simple docstring'''
_UpperCamelCase: Any = Path(lowercase )
_UpperCamelCase: int = Path(lowercase )
dest_dir.mkdir(exist_ok=lowercase )
for path in src_dir.iterdir():
_UpperCamelCase: List[str] = [x.rstrip() for x in list(path.open().readlines() )][:n]
_UpperCamelCase: int = dest_dir.joinpath(path.name )
print(lowercase )
dest_path.open('''w''' ).write('''\n'''.join(lowercase ) )
if __name__ == "__main__":
fire.Fire(minify) | 271 | 0 |
'''simple docstring'''
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
A : Optional[int] = '\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = "{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation",\n author = "Lin, Chin-Yew and\n Och, Franz Josef",\n booktitle = "{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics",\n month = "aug 23{--}aug 27",\n year = "2004",\n address = "Geneva, Switzerland",\n publisher = "COLING",\n url = "https://www.aclweb.org/anthology/C04-1072",\n pages = "501--507",\n}\n'
A : List[str] = '\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine\'s output and that of a human: "the closer a machine translation is to a professional human translation,\nthe better it is" โ this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segmentsโgenerally sentencesโby comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation\'s overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU\'s output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n'
A : Optional[Any] = '\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n \'bleu\': bleu score,\n \'precisions\': geometric mean of n-gram precisions,\n \'brevity_penalty\': brevity penalty,\n \'length_ratio\': ratio of lengths,\n \'translation_length\': translation_length,\n \'reference_length\': reference_length\nExamples:\n\n >>> predictions = [\n ... ["hello", "there", "general", "kenobi"], # tokenized prediction of the first sample\n ... ["foo", "bar", "foobar"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [["hello", "there", "general", "kenobi"], ["hello", "there", "!"]], # tokenized references for the first sample (2 references)\n ... [["foo", "bar", "foobar"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric("bleu")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results["bleu"])\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase ( datasets.Metric ):
def SCREAMING_SNAKE_CASE_ ( self : str ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ),
'references': datasets.Sequence(
datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ) , id='references' ),
} ) , codebase_urls=['https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py'] , reference_urls=[
'https://en.wikipedia.org/wiki/BLEU',
'https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213',
] , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , __snake_case : str , __snake_case : str , __snake_case : List[Any]=4 , __snake_case : List[Any]=False ):
'''simple docstring'''
_snake_case: Optional[Any] = compute_bleu(
reference_corpus=__snake_case , translation_corpus=__snake_case , max_order=__snake_case , smooth=__snake_case )
((_snake_case) , (_snake_case) , (_snake_case) , (_snake_case) , (_snake_case) , (_snake_case)): Optional[Any] = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 273 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class lowerCamelCase ( metaclass=__UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = ["torch", "scipy"]
def __init__( self : Tuple , *__snake_case : List[Any] , **__snake_case : List[Any] ):
'''simple docstring'''
requires_backends(self , ['torch', 'scipy'] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Optional[int] , *__snake_case : Optional[Any] , **__snake_case : Tuple ):
'''simple docstring'''
requires_backends(cls , ['torch', 'scipy'] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : List[Any] , *__snake_case : Optional[Any] , **__snake_case : str ):
'''simple docstring'''
requires_backends(cls , ['torch', 'scipy'] )
| 273 | 1 |
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def a__ ( lowercase__ ):
'''simple docstring'''
def is_in_circle(lowercase__ , lowercase__ ) -> bool:
UpperCAmelCase_ =sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
UpperCAmelCase_ =mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(lowercase__ ) )
# The ratio of the area for circle to square is pi/4.
UpperCAmelCase_ =proportion * 4
print(F'The estimated value of pi is {pi_estimate}' )
print(F'The numpy value of pi is {pi}' )
print(F'The total error is {abs(pi - pi_estimate )}' )
def a__ ( lowercase__ , lowercase__ , lowercase__ = 0.0 , lowercase__ = 1.0 , ):
'''simple docstring'''
return mean(
function_to_integrate(uniform(lowercase__ , lowercase__ ) ) for _ in range(lowercase__ ) ) * (max_value - min_value)
def a__ ( lowercase__ , lowercase__ = 0.0 , lowercase__ = 1.0 ):
'''simple docstring'''
def identity_function(lowercase__ ) -> float:
return x
UpperCAmelCase_ =area_under_curve_estimator(
lowercase__ , lowercase__ , lowercase__ , lowercase__ )
UpperCAmelCase_ =(max_value * max_value - min_value * min_value) / 2
print("******************" )
print(F'Estimating area under y=x where x varies from {min_value} to {max_value}' )
print(F'Estimated value is {estimated_value}' )
print(F'Expected value is {expected_value}' )
print(F'Total error is {abs(estimated_value - expected_value )}' )
print("******************" )
def a__ ( lowercase__ ):
'''simple docstring'''
def function_to_integrate(lowercase__ ) -> float:
return sqrt(4.0 - x * x )
UpperCAmelCase_ =area_under_curve_estimator(
lowercase__ , lowercase__ , 0.0 , 2.0 )
print("******************" )
print("Estimating pi using area_under_curve_estimator" )
print(F'Estimated value is {estimated_value}' )
print(F'Expected value is {pi}' )
print(F'Total error is {abs(estimated_value - pi )}' )
print("******************" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 54 |
"""simple docstring"""
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
_snake_case = logging.get_logger(__name__) # pylint: disable=invalid-name
class _SCREAMING_SNAKE_CASE ( UpperCAmelCase ):
'''simple docstring'''
def __init__( self : List[str] , UpperCAmelCase_ : WhisperForConditionalGeneration , UpperCAmelCase_ : WhisperProcessor , UpperCAmelCase_ : AutoencoderKL , UpperCAmelCase_ : CLIPTextModel , UpperCAmelCase_ : CLIPTokenizer , UpperCAmelCase_ : UNetaDConditionModel , UpperCAmelCase_ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , UpperCAmelCase_ : StableDiffusionSafetyChecker , UpperCAmelCase_ : CLIPImageProcessor , ) -> List[Any]:
"""simple docstring"""
super().__init__()
if safety_checker is None:
logger.warning(
F"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'
' results in services or applications open to the public. Both the diffusers team and Hugging Face'
' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'
' it only for use-cases that involve analyzing network behavior or auditing its results. For more'
' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .' )
self.register_modules(
speech_model=UpperCAmelCase_ , speech_processor=UpperCAmelCase_ , vae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ , )
def __lowerCamelCase ( self : Tuple , UpperCAmelCase_ : Optional[Union[str, int]] = "auto" ) -> List[str]:
"""simple docstring"""
if slice_size == "auto":
_lowerCAmelCase = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(UpperCAmelCase_ )
def __lowerCamelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
self.enable_attention_slicing(UpperCAmelCase_ )
@torch.no_grad()
def __call__( self : List[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int=16_000 , UpperCAmelCase_ : int = 512 , UpperCAmelCase_ : int = 512 , UpperCAmelCase_ : int = 50 , UpperCAmelCase_ : float = 7.5 , UpperCAmelCase_ : Optional[Union[str, List[str]]] = None , UpperCAmelCase_ : Optional[int] = 1 , UpperCAmelCase_ : float = 0.0 , UpperCAmelCase_ : Optional[torch.Generator] = None , UpperCAmelCase_ : Optional[torch.FloatTensor] = None , UpperCAmelCase_ : Optional[str] = "pil" , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCAmelCase_ : int = 1 , **UpperCAmelCase_ : Any , ) -> Any:
"""simple docstring"""
_lowerCAmelCase = self.speech_processor.feature_extractor(
UpperCAmelCase_ , return_tensors='pt' , sampling_rate=UpperCAmelCase_ ).input_features.to(self.device )
_lowerCAmelCase = self.speech_model.generate(UpperCAmelCase_ , max_length=480_000 )
_lowerCAmelCase = self.speech_processor.tokenizer.batch_decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ , normalize=UpperCAmelCase_ )[
0
]
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
_lowerCAmelCase = 1
elif isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
_lowerCAmelCase = len(UpperCAmelCase_ )
else:
raise ValueError(F"""`prompt` has to be of type `str` or `list` but is {type(UpperCAmelCase_ )}""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or callback_steps <= 0)
):
raise ValueError(
F"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
F""" {type(UpperCAmelCase_ )}.""" )
# get prompt text embeddings
_lowerCAmelCase = self.tokenizer(
UpperCAmelCase_ , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , )
_lowerCAmelCase = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
_lowerCAmelCase = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
F""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
_lowerCAmelCase = text_input_ids[:, : self.tokenizer.model_max_length]
_lowerCAmelCase = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = text_embeddings.shape
_lowerCAmelCase = text_embeddings.repeat(1 , UpperCAmelCase_ , 1 )
_lowerCAmelCase = text_embeddings.view(bs_embed * num_images_per_prompt , UpperCAmelCase_ , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
_lowerCAmelCase = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
_lowerCAmelCase = 42
if negative_prompt is None:
_lowerCAmelCase = [''] * batch_size
elif type(UpperCAmelCase_ ) is not type(UpperCAmelCase_ ):
raise TypeError(
F"""`negative_prompt` should be the same type to `prompt`, but got {type(UpperCAmelCase_ )} !="""
F""" {type(UpperCAmelCase_ )}.""" )
elif isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
_lowerCAmelCase = [negative_prompt]
elif batch_size != len(UpperCAmelCase_ ):
raise ValueError(
F"""`negative_prompt`: {negative_prompt} has batch size {len(UpperCAmelCase_ )}, but `prompt`:"""
F""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
' the batch size of `prompt`.' )
else:
_lowerCAmelCase = negative_prompt
_lowerCAmelCase = text_input_ids.shape[-1]
_lowerCAmelCase = self.tokenizer(
UpperCAmelCase_ , padding='max_length' , max_length=UpperCAmelCase_ , truncation=UpperCAmelCase_ , return_tensors='pt' , )
_lowerCAmelCase = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
_lowerCAmelCase = uncond_embeddings.shape[1]
_lowerCAmelCase = uncond_embeddings.repeat(1 , UpperCAmelCase_ , 1 )
_lowerCAmelCase = uncond_embeddings.view(batch_size * num_images_per_prompt , UpperCAmelCase_ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_lowerCAmelCase = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
_lowerCAmelCase = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
_lowerCAmelCase = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
_lowerCAmelCase = torch.randn(UpperCAmelCase_ , generator=UpperCAmelCase_ , device='cpu' , dtype=UpperCAmelCase_ ).to(
self.device )
else:
_lowerCAmelCase = torch.randn(UpperCAmelCase_ , generator=UpperCAmelCase_ , device=self.device , dtype=UpperCAmelCase_ )
else:
if latents.shape != latents_shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
_lowerCAmelCase = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(UpperCAmelCase_ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
_lowerCAmelCase = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
_lowerCAmelCase = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (ฮท) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to ฮท in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_lowerCAmelCase = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
_lowerCAmelCase = {}
if accepts_eta:
_lowerCAmelCase = eta
for i, t in enumerate(self.progress_bar(UpperCAmelCase_ ) ):
# expand the latents if we are doing classifier free guidance
_lowerCAmelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_lowerCAmelCase = self.scheduler.scale_model_input(UpperCAmelCase_ , UpperCAmelCase_ )
# predict the noise residual
_lowerCAmelCase = self.unet(UpperCAmelCase_ , UpperCAmelCase_ , encoder_hidden_states=UpperCAmelCase_ ).sample
# perform guidance
if do_classifier_free_guidance:
_lowerCAmelCase , _lowerCAmelCase = noise_pred.chunk(2 )
_lowerCAmelCase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
_lowerCAmelCase = self.scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
_lowerCAmelCase = 1 / 0.18215 * latents
_lowerCAmelCase = self.vae.decode(UpperCAmelCase_ ).sample
_lowerCAmelCase = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
_lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
_lowerCAmelCase = self.numpy_to_pil(UpperCAmelCase_ )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=UpperCAmelCase_ , nsfw_content_detected=UpperCAmelCase_ )
| 580 | 0 |
'''simple docstring'''
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
_snake_case : Optional[int] = 2
class __SCREAMING_SNAKE_CASE :
def __init__( self, *, # begin keyword-only arguments
_a="<s>", _a="<pad>", _a="</s>", _a="<unk>", _a=None, ) -> str:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = bos, unk, pad, eos
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = self.add_symbol(_a )
__SCREAMING_SNAKE_CASE = self.add_symbol(_a )
__SCREAMING_SNAKE_CASE = self.add_symbol(_a )
__SCREAMING_SNAKE_CASE = self.add_symbol(_a )
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(_a )
__SCREAMING_SNAKE_CASE = len(self.symbols )
def __eq__( self, _a ) -> Dict:
return self.indices == other.indices
def __getitem__( self, _a ) -> List[Any]:
if idx < len(self.symbols ):
return self.symbols[idx]
return self.unk_word
def __len__( self ) -> Tuple:
return len(self.symbols )
def __contains__( self, _a ) -> int:
return sym in self.indices
@classmethod
def __lowerCAmelCase ( cls, _a ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = cls()
d.add_from_file(_a )
return d
def __lowerCAmelCase ( self, _a, _a=1, _a=False ) -> Tuple:
if word in self.indices and not overwrite:
__SCREAMING_SNAKE_CASE = self.indices[word]
__SCREAMING_SNAKE_CASE = self.count[idx] + n
return idx
else:
__SCREAMING_SNAKE_CASE = len(self.symbols )
__SCREAMING_SNAKE_CASE = idx
self.symbols.append(_a )
self.count.append(_a )
return idx
def __lowerCAmelCase ( self, _a ) -> List[Any]:
return 0
def __lowerCAmelCase ( self, _a ) -> Tuple:
if isinstance(_a, _a ):
try:
with open(_a, "r", encoding="utf-8" ) as fd:
self.add_from_file(_a )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception("Incorrect encoding detected in {}, please rebuild the dataset".format(_a ) )
return
__SCREAMING_SNAKE_CASE = f.readlines()
__SCREAMING_SNAKE_CASE = self._load_meta(_a )
for line in lines[indices_start_line:]:
try:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = line.rstrip().rsplit(" ", 1 )
if field == "#fairseq:overwrite":
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = line.rsplit(" ", 1 )
else:
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = int(_a )
__SCREAMING_SNAKE_CASE = line
if word in self and not overwrite:
raise RuntimeError(
"Duplicate word found when loading Dictionary: '{}'. "
"Duplicate words can overwrite earlier ones by adding the "
"#fairseq:overwrite flag at the end of the corresponding row "
"in the dictionary file. If using the Camembert model, please "
"download an updated copy of the model file.".format(_a ) )
self.add_symbol(_a, n=_a, overwrite=_a )
except ValueError:
raise ValueError("Incorrect dictionary format, expected '<token> <cnt> [flags]'" )
def _A ( __snake_case :Tuple ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = dict((re.sub(R"@@$" , "" , __snake_case ), v) if k.endswith("@@" ) else (re.sub(R"$" , "</w>" , __snake_case ), v) for k, v in d.items() )
__SCREAMING_SNAKE_CASE = "<s> <pad> </s> <unk>".split()
# restore the special tokens
for k in keep_keys:
del da[f'''{k}</w>''']
__SCREAMING_SNAKE_CASE = d[k] # restore
return da
def _A ( __snake_case :List[Any] , __snake_case :List[str] ) -> str:
"""simple docstring"""
if not os.path.exists(__snake_case ):
raise ValueError(f'''path {biogpt_checkpoint_path} does not exist!''' )
os.makedirs(__snake_case , exist_ok=__snake_case )
print(f'''Writing results to {pytorch_dump_folder_path}''' )
# handle various types of models
__SCREAMING_SNAKE_CASE = os.path.join(__snake_case , "checkpoint.pt" )
if not os.path.isfile(__snake_case ):
raise ValueError(f'''path to the file {checkpoint_file} does not exist!''' )
__SCREAMING_SNAKE_CASE = torch.load(__snake_case , map_location="cpu" )
__SCREAMING_SNAKE_CASE = chkpt["cfg"]["model"]
# dicts
__SCREAMING_SNAKE_CASE = os.path.join(__snake_case , "dict.txt" )
if not os.path.isfile(__snake_case ):
raise ValueError(f'''path to the file {dict_file} does not exist!''' )
__SCREAMING_SNAKE_CASE = Dictionary.load(__snake_case )
__SCREAMING_SNAKE_CASE = rewrite_dict_keys(src_dict.indices )
__SCREAMING_SNAKE_CASE = len(__snake_case )
__SCREAMING_SNAKE_CASE = os.path.join(__snake_case , VOCAB_FILES_NAMES["vocab_file"] )
print(f'''Generating {src_vocab_file} of {src_vocab_size} records''' )
with open(__snake_case , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(__snake_case , ensure_ascii=__snake_case , indent=__snake_case ) )
# merges_file (bpecodes)
__SCREAMING_SNAKE_CASE = os.path.join(__snake_case , "bpecodes" )
if not os.path.isfile(__snake_case ):
raise ValueError(f'''path to the file {bpecodes_file} does not exist!''' )
__SCREAMING_SNAKE_CASE = os.path.join(__snake_case , VOCAB_FILES_NAMES["merges_file"] )
shutil.copyfile(__snake_case , __snake_case )
# model config
__SCREAMING_SNAKE_CASE = os.path.join(__snake_case , "config.json" )
__SCREAMING_SNAKE_CASE = {
"activation_dropout": args["activation_dropout"],
"architectures": ["BioGptForCausalLM"],
"attention_probs_dropout_prob": args["attention_dropout"],
"bos_token_id": 0,
"eos_token_id": 2,
"hidden_act": args["activation_fn"],
"hidden_dropout_prob": args["dropout"],
"hidden_size": args["decoder_embed_dim"],
"initializer_range": 0.0_2,
"intermediate_size": args["decoder_ffn_embed_dim"],
"layer_norm_eps": 1e-12,
"layerdrop": args["decoder_layerdrop"],
"max_position_embeddings": args["max_target_positions"],
"model_type": "biogpt",
"num_attention_heads": args["decoder_attention_heads"],
"num_hidden_layers": args["decoder_layers"],
"pad_token_id": 1,
"scale_embedding": not args["no_scale_embedding"],
"tie_word_embeddings": args["share_decoder_input_output_embed"],
"vocab_size": src_vocab_size,
}
# good hparam defaults to start with
print(f'''Generating {biogpt_model_config_file}''' )
with open(__snake_case , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(__snake_case , ensure_ascii=__snake_case , indent=__snake_case ) )
# tokenizer config
__SCREAMING_SNAKE_CASE = os.path.join(__snake_case , __snake_case )
__SCREAMING_SNAKE_CASE = {
"bos_token": "<s>",
"eos_token": "</s>",
"model_max_length": 1024,
"pad_token": "<pad>",
"special_tokens_map_file": None,
"tokenizer_class": "BioGptTokenizer",
"unk_token": "<unk>",
}
print(f'''Generating {biogpt_tokenizer_config_file}''' )
with open(__snake_case , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(__snake_case , ensure_ascii=__snake_case , indent=__snake_case ) )
# model
__SCREAMING_SNAKE_CASE = chkpt["model"]
# remove unneeded keys
__SCREAMING_SNAKE_CASE = [
"decoder.version",
]
for k in ignore_keys:
model_state_dict.pop(__snake_case , __snake_case )
__SCREAMING_SNAKE_CASE = list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith("output_projection.weight" ):
__SCREAMING_SNAKE_CASE = model_state_dict.pop(__snake_case )
else:
__SCREAMING_SNAKE_CASE = model_state_dict.pop(__snake_case )
__SCREAMING_SNAKE_CASE = BioGptConfig.from_pretrained(__snake_case )
__SCREAMING_SNAKE_CASE = BioGptForCausalLM(__snake_case )
# check that it loads ok
model_new.load_state_dict(__snake_case )
# save
__SCREAMING_SNAKE_CASE = os.path.join(__snake_case , __snake_case )
print(f'''Generating {pytorch_weights_dump_path}''' )
torch.save(__snake_case , __snake_case )
print("Conversion is done!" )
if __name__ == "__main__":
_snake_case : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--biogpt_checkpoint_path',
default=None,
type=str,
required=True,
help=(
'Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'
' bpecodes, etc.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
_snake_case : Tuple = parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 713 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case : Any = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE__ ="""encoder-decoder"""
SCREAMING_SNAKE_CASE__ =True
def __init__( self, **_a ) -> Optional[Any]:
super().__init__(**_a )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
__SCREAMING_SNAKE_CASE = kwargs.pop("encoder" )
__SCREAMING_SNAKE_CASE = encoder_config.pop("model_type" )
__SCREAMING_SNAKE_CASE = kwargs.pop("decoder" )
__SCREAMING_SNAKE_CASE = decoder_config.pop("model_type" )
from ..auto.configuration_auto import AutoConfig
__SCREAMING_SNAKE_CASE = AutoConfig.for_model(_a, **_a )
__SCREAMING_SNAKE_CASE = AutoConfig.for_model(_a, **_a )
__SCREAMING_SNAKE_CASE = True
@classmethod
def __lowerCAmelCase ( cls, _a, _a, **_a ) -> PretrainedConfig:
logger.info("Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config" )
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = True
return cls(encoder=encoder_config.to_dict(), decoder=decoder_config.to_dict(), **_a )
def __lowerCAmelCase ( self ) -> Dict:
__SCREAMING_SNAKE_CASE = copy.deepcopy(self.__dict__ )
__SCREAMING_SNAKE_CASE = self.encoder.to_dict()
__SCREAMING_SNAKE_CASE = self.decoder.to_dict()
__SCREAMING_SNAKE_CASE = self.__class__.model_type
return output
| 214 | 0 |
import colorsys
from PIL import Image # type: ignore
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> float:
"""simple docstring"""
A__ = x
A__ = y
for step in range(lowercase_ ): # noqa: B007
A__ = a * a - b * b + x
A__ = 2 * a * b + y
A__ = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> tuple:
"""simple docstring"""
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> tuple:
"""simple docstring"""
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(lowercase_ , 1 , 1 ) )
def SCREAMING_SNAKE_CASE ( lowercase_ = 800 , lowercase_ = 600 , lowercase_ = -0.6 , lowercase_ = 0 , lowercase_ = 3.2 , lowercase_ = 50 , lowercase_ = True , ) -> Image.Image:
"""simple docstring"""
A__ = Image.new('''RGB''' , (image_width, image_height) )
A__ = img.load()
# loop through the image-coordinates
for image_x in range(lowercase_ ):
for image_y in range(lowercase_ ):
# determine the figure-coordinates based on the image-coordinates
A__ = figure_width / image_width * image_height
A__ = figure_center_x + (image_x / image_width - 0.5) * figure_width
A__ = figure_center_y + (image_y / image_height - 0.5) * figure_height
A__ = get_distance(lowercase_ , lowercase_ , lowercase_ )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
A__ = get_color_coded_rgb(lowercase_ )
else:
A__ = get_black_and_white_rgb(lowercase_ )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
_lowerCamelCase : Optional[int] = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 87 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import numpy as np
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForMaskedImageModeling,
HfArgumentParser,
Trainer,
TrainingArguments,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
lowerCAmelCase: Optional[Any] = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt')
lowerCAmelCase: Optional[int] = list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys())
lowerCAmelCase: int = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class a__:
lowercase__ = field(
default="""cifar10""" , metadata={"""help""": """Name of a dataset from the datasets package"""} )
lowercase__ = field(
default=lowerCamelCase__ , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
lowercase__ = field(
default=lowerCamelCase__ , metadata={"""help""": """The column name of the images in the files. If not set, will try to use 'image' or 'img'."""} , )
lowercase__ = field(default=lowerCamelCase__ , metadata={"""help""": """A folder containing the training data."""} )
lowercase__ = field(default=lowerCamelCase__ , metadata={"""help""": """A folder containing the validation data."""} )
lowercase__ = field(
default=0.15 , metadata={"""help""": """Percent to split off of train for validation."""} )
lowercase__ = field(default=32 , metadata={"""help""": """The size of the square patches to use for masking."""} )
lowercase__ = field(
default=0.6 , metadata={"""help""": """Percentage of patches to mask."""} , )
lowercase__ = field(
default=lowerCamelCase__ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
lowercase__ = field(
default=lowerCamelCase__ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def lowercase_ ( self : List[Any] ):
a : Any = {}
if self.train_dir is not None:
a : Dict = self.train_dir
if self.validation_dir is not None:
a : Union[str, Any] = self.validation_dir
a : Any = data_files if data_files else None
@dataclass
class a__:
lowercase__ = field(
default=lowerCamelCase__ , metadata={
"""help""": (
"""The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a """
"""checkpoint identifier on the hub. """
"""Don't set if you want to train a model from scratch."""
)
} , )
lowercase__ = field(
default=lowerCamelCase__ , metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(lowerCamelCase__ )} , )
lowercase__ = field(
default=lowerCamelCase__ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
lowercase__ = field(
default=lowerCamelCase__ , metadata={
"""help""": (
"""Override some existing default config settings when a model is trained from scratch. Example: """
"""n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"""
)
} , )
lowercase__ = field(
default=lowerCamelCase__ , metadata={"""help""": """Where do you want to store (cache) the pretrained models/datasets downloaded from the hub"""} , )
lowercase__ = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
lowercase__ = field(default=lowerCamelCase__ , metadata={"""help""": """Name or path of preprocessor config."""} )
lowercase__ = field(
default=lowerCamelCase__ , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
lowercase__ = field(
default=lowerCamelCase__ , metadata={
"""help""": (
"""The size (resolution) of each image. If not specified, will use `image_size` of the configuration."""
)
} , )
lowercase__ = field(
default=lowerCamelCase__ , metadata={
"""help""": (
"""The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration."""
)
} , )
lowercase__ = field(
default=lowerCamelCase__ , metadata={"""help""": """Stride to use for the encoder."""} , )
class a__:
def __init__( self : List[str] , __snake_case : int=1_92 , __snake_case : int=32 , __snake_case : List[str]=4 , __snake_case : Union[str, Any]=0.6 ):
a : Any = input_size
a : Union[str, Any] = mask_patch_size
a : int = model_patch_size
a : Tuple = mask_ratio
if self.input_size % self.mask_patch_size != 0:
raise ValueError('Input size must be divisible by mask patch size' )
if self.mask_patch_size % self.model_patch_size != 0:
raise ValueError('Mask patch size must be divisible by model patch size' )
a : str = self.input_size // self.mask_patch_size
a : Union[str, Any] = self.mask_patch_size // self.model_patch_size
a : str = self.rand_size**2
a : Tuple = int(np.ceil(self.token_count * self.mask_ratio ) )
def __call__( self : str ):
a : List[str] = np.random.permutation(self.token_count )[: self.mask_count]
a : List[str] = np.zeros(self.token_count , dtype=__snake_case )
a : Any = 1
a : List[str] = mask.reshape((self.rand_size, self.rand_size) )
a : Tuple = mask.repeat(self.scale , axis=0 ).repeat(self.scale , axis=1 )
return torch.tensor(mask.flatten() )
def lowerCamelCase__ ( _A ):
a : str = torch.stack([example['pixel_values'] for example in examples] )
a : List[str] = torch.stack([example['mask'] for example in examples] )
return {"pixel_values": pixel_values, "bool_masked_pos": mask}
def lowerCamelCase__ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
a : int = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
a , a , a : Dict = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
a , a , a : int = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_mim' , _A , _A )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
a : int = training_args.get_process_log_level()
logger.setLevel(_A )
transformers.utils.logging.set_verbosity(_A )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
a : Tuple = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
a : List[Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Initialize our dataset.
a : Any = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
a : Union[str, Any] = None if 'validation' in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , _A ) and data_args.train_val_split > 0.0:
a : Any = ds['train'].train_test_split(data_args.train_val_split )
a : str = split['train']
a : Any = split['test']
# Create config
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
a : Tuple = {
'cache_dir': model_args.cache_dir,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.config_name_or_path:
a : Any = AutoConfig.from_pretrained(model_args.config_name_or_path , **_A )
elif model_args.model_name_or_path:
a : Optional[int] = AutoConfig.from_pretrained(model_args.model_name_or_path , **_A )
else:
a : Optional[int] = CONFIG_MAPPING[model_args.model_type]()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.config_overrides is not None:
logger.info(f"""Overriding config: {model_args.config_overrides}""" )
config.update_from_string(model_args.config_overrides )
logger.info(f"""New config: {config}""" )
# make sure the decoder_type is "simmim" (only relevant for BEiT)
if hasattr(_A , 'decoder_type' ):
a : List[str] = 'simmim'
# adapt config
a : str = model_args.image_size if model_args.image_size is not None else config.image_size
a : Union[str, Any] = model_args.patch_size if model_args.patch_size is not None else config.patch_size
a : Any = (
model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride
)
config.update(
{
'image_size': model_args.image_size,
'patch_size': model_args.patch_size,
'encoder_stride': model_args.encoder_stride,
} )
# create image processor
if model_args.image_processor_name:
a : Union[str, Any] = AutoImageProcessor.from_pretrained(model_args.image_processor_name , **_A )
elif model_args.model_name_or_path:
a : Union[str, Any] = AutoImageProcessor.from_pretrained(model_args.model_name_or_path , **_A )
else:
a : List[Any] = {
conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items()
}
a : int = IMAGE_PROCESSOR_TYPES[model_args.model_type]()
# create model
if model_args.model_name_or_path:
a : Tuple = AutoModelForMaskedImageModeling.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_A , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('Training new model from scratch' )
a : Union[str, Any] = AutoModelForMaskedImageModeling.from_config(_A )
if training_args.do_train:
a : Tuple = ds['train'].column_names
else:
a : Dict = ds['validation'].column_names
if data_args.image_column_name is not None:
a : Optional[Any] = data_args.image_column_name
elif "image" in column_names:
a : str = 'image'
elif "img" in column_names:
a : Union[str, Any] = 'img'
else:
a : str = column_names[0]
# transformations as done in original SimMIM paper
# source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py
a : Optional[int] = Compose(
[
Lambda(lambda _A : img.convert('RGB' ) if img.mode != "RGB" else img ),
RandomResizedCrop(model_args.image_size , scale=(0.67, 1.0) , ratio=(3.0 / 4.0, 4.0 / 3.0) ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
# create mask generator
a : Dict = MaskGenerator(
input_size=model_args.image_size , mask_patch_size=data_args.mask_patch_size , model_patch_size=model_args.patch_size , mask_ratio=data_args.mask_ratio , )
def preprocess_images(_A ):
a : Dict = [transforms(_A ) for image in examples[image_column_name]]
a : Optional[int] = [mask_generator() for i in range(len(examples[image_column_name] ) )]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError('--do_train requires a train dataset' )
if data_args.max_train_samples is not None:
a : List[Any] = ds['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(_A )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError('--do_eval requires a validation dataset' )
if data_args.max_eval_samples is not None:
a : str = (
ds['validation'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(_A )
# Initialize our trainer
a : List[str] = Trainer(
model=_A , args=_A , train_dataset=ds['train'] if training_args.do_train else None , eval_dataset=ds['validation'] if training_args.do_eval else None , tokenizer=_A , data_collator=_A , )
# Training
if training_args.do_train:
a : str = None
if training_args.resume_from_checkpoint is not None:
a : Any = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
a : Dict = last_checkpoint
a : Optional[int] = trainer.train(resume_from_checkpoint=_A )
trainer.save_model()
trainer.log_metrics('train' , train_result.metrics )
trainer.save_metrics('train' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
a : List[Any] = trainer.evaluate()
trainer.log_metrics('eval' , _A )
trainer.save_metrics('eval' , _A )
# Write model card and (optionally) push to hub
a : str = {
'finetuned_from': model_args.model_name_or_path,
'tasks': 'masked-image-modeling',
'dataset': data_args.dataset_name,
'tags': ['masked-image-modeling'],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_A )
else:
trainer.create_model_card(**_A )
if __name__ == "__main__":
main() | 526 | 0 |
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class A__ ( unittest.TestCase ):
"""simple docstring"""
def _UpperCamelCase( self : Any ):
a__ : Tuple = "| <pad> <unk> <s> </s> a b c d e f g h i j k".split()
a__ : Any = dict(zip(lowerCamelCase__ , range(len(lowerCamelCase__ ) ) ) )
a__ : Any = {
"unk_token": "<unk>",
"bos_token": "<s>",
"eos_token": "</s>",
}
a__ : Any = {
"feature_size": 1,
"padding_value": 0.0,
"sampling_rate": 16_000,
"return_attention_mask": False,
"do_normalize": True,
}
a__ : Union[str, Any] = tempfile.mkdtemp()
a__ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
a__ : Union[str, Any] = os.path.join(self.tmpdirname , lowerCamelCase__ )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowerCamelCase__ ) + "\n" )
with open(self.feature_extraction_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowerCamelCase__ ) + "\n" )
# load decoder from hub
a__ : List[str] = "hf-internal-testing/ngram-beam-search-decoder"
def _UpperCamelCase( self : Optional[Any] , **lowerCamelCase__ : Optional[int] ):
a__ : int = self.add_kwargs_tokens_map.copy()
kwargs.update(lowerCamelCase__ )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase__ )
def _UpperCamelCase( self : Optional[Any] , **lowerCamelCase__ : int ):
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **lowerCamelCase__ )
def _UpperCamelCase( self : str , **lowerCamelCase__ : int ):
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **lowerCamelCase__ )
def _UpperCamelCase( self : Tuple ):
shutil.rmtree(self.tmpdirname )
def _UpperCamelCase( self : List[Any] ):
a__ : Any = self.get_tokenizer()
a__ : str = self.get_feature_extractor()
a__ : List[str] = self.get_decoder()
a__ : str = WavaVecaProcessorWithLM(tokenizer=lowerCamelCase__ , feature_extractor=lowerCamelCase__ , decoder=lowerCamelCase__ )
processor.save_pretrained(self.tmpdirname )
a__ : List[str] = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCamelCase__ )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , lowerCamelCase__ )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , lowerCamelCase__ )
def _UpperCamelCase( self : str ):
a__ : Optional[Any] = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
a__ : int = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def _UpperCamelCase( self : List[Any] ):
a__ : str = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(["xx"] )
with self.assertRaisesRegex(lowerCamelCase__ , "include" ):
WavaVecaProcessorWithLM(
tokenizer=lowerCamelCase__ , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def _UpperCamelCase( self : str ):
a__ : Any = self.get_feature_extractor()
a__ : Optional[int] = self.get_tokenizer()
a__ : Dict = self.get_decoder()
a__ : str = WavaVecaProcessorWithLM(tokenizer=lowerCamelCase__ , feature_extractor=lowerCamelCase__ , decoder=lowerCamelCase__ )
a__ : Optional[Any] = floats_list((3, 1_000) )
a__ : List[str] = feature_extractor(lowerCamelCase__ , return_tensors="np" )
a__ : Union[str, Any] = processor(lowerCamelCase__ , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _UpperCamelCase( self : List[Any] ):
a__ : Optional[Any] = self.get_feature_extractor()
a__ : Tuple = self.get_tokenizer()
a__ : Optional[int] = self.get_decoder()
a__ : Optional[int] = WavaVecaProcessorWithLM(tokenizer=lowerCamelCase__ , feature_extractor=lowerCamelCase__ , decoder=lowerCamelCase__ )
a__ : Any = "This is a test string"
a__ : int = processor(text=lowerCamelCase__ )
a__ : Dict = tokenizer(lowerCamelCase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _UpperCamelCase( self : str , lowerCamelCase__ : List[str]=(2, 10, 16) , lowerCamelCase__ : Any=77 ):
np.random.seed(lowerCamelCase__ )
return np.random.rand(*lowerCamelCase__ )
def _UpperCamelCase( self : Union[str, Any] ):
a__ : Dict = self.get_feature_extractor()
a__ : Any = self.get_tokenizer()
a__ : Dict = self.get_decoder()
a__ : Union[str, Any] = WavaVecaProcessorWithLM(tokenizer=lowerCamelCase__ , feature_extractor=lowerCamelCase__ , decoder=lowerCamelCase__ )
a__ : Optional[Any] = self._get_dummy_logits(shape=(10, 16) , seed=13 )
a__ : Any = processor.decode(lowerCamelCase__ )
a__ : str = decoder.decode_beams(lowerCamelCase__ )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual("</s> <s> </s>" , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ["fork"], ["spawn"]] )
def _UpperCamelCase( self : Dict , lowerCamelCase__ : Union[str, Any] ):
a__ : Tuple = self.get_feature_extractor()
a__ : Tuple = self.get_tokenizer()
a__ : str = self.get_decoder()
a__ : int = WavaVecaProcessorWithLM(tokenizer=lowerCamelCase__ , feature_extractor=lowerCamelCase__ , decoder=lowerCamelCase__ )
a__ : str = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
a__ : int = processor.batch_decode(lowerCamelCase__ )
else:
with get_context(lowerCamelCase__ ).Pool() as pool:
a__ : Tuple = processor.batch_decode(lowerCamelCase__ , lowerCamelCase__ )
a__ : int = list(lowerCamelCase__ )
with get_context("fork" ).Pool() as p:
a__ : str = decoder.decode_beams_batch(lowerCamelCase__ , lowerCamelCase__ )
a__ : Dict = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(lowerCamelCase__ , decoded_processor.text )
self.assertListEqual(["<s> <s> </s>", "<s> <s> <s>"] , decoded_processor.text )
self.assertListEqual(lowerCamelCase__ , decoded_processor.logit_score )
self.assertListEqual(lowerCamelCase__ , decoded_processor.lm_score )
def _UpperCamelCase( self : Union[str, Any] ):
a__ : Optional[int] = self.get_feature_extractor()
a__ : List[str] = self.get_tokenizer()
a__ : Union[str, Any] = self.get_decoder()
a__ : Tuple = WavaVecaProcessorWithLM(tokenizer=lowerCamelCase__ , feature_extractor=lowerCamelCase__ , decoder=lowerCamelCase__ )
a__ : Optional[int] = self._get_dummy_logits()
a__ : Tuple = 15
a__ : Optional[Any] = -20.0
a__ : int = -4.0
a__ : Optional[Any] = processor.batch_decode(
lowerCamelCase__ , beam_width=lowerCamelCase__ , beam_prune_logp=lowerCamelCase__ , token_min_logp=lowerCamelCase__ , )
a__ : List[str] = decoded_processor_out.text
a__ : List[str] = list(lowerCamelCase__ )
with get_context("fork" ).Pool() as pool:
a__ : Optional[Any] = decoder.decode_beams_batch(
lowerCamelCase__ , lowerCamelCase__ , beam_width=lowerCamelCase__ , beam_prune_logp=lowerCamelCase__ , token_min_logp=lowerCamelCase__ , )
a__ : Union[str, Any] = [d[0][0] for d in decoded_decoder_out]
a__ : List[str] = [d[0][2] for d in decoded_decoder_out]
a__ : Optional[Any] = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertListEqual(["</s> <s> <s>", "<s> <s> <s>"] , lowerCamelCase__ )
self.assertTrue(np.array_equal(lowerCamelCase__ , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.054, -18.447] , lowerCamelCase__ , atol=1E-3 ) )
self.assertTrue(np.array_equal(lowerCamelCase__ , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.554, -13.9474] , lowerCamelCase__ , atol=1E-3 ) )
def _UpperCamelCase( self : List[Any] ):
a__ : List[str] = self.get_feature_extractor()
a__ : List[Any] = self.get_tokenizer()
a__ : Any = self.get_decoder()
a__ : str = WavaVecaProcessorWithLM(tokenizer=lowerCamelCase__ , feature_extractor=lowerCamelCase__ , decoder=lowerCamelCase__ )
a__ : int = self._get_dummy_logits()
a__ : Tuple = 2.0
a__ : List[str] = 5.0
a__ : Optional[Any] = -20.0
a__ : Any = True
a__ : List[str] = processor.batch_decode(
lowerCamelCase__ , alpha=lowerCamelCase__ , beta=lowerCamelCase__ , unk_score_offset=lowerCamelCase__ , lm_score_boundary=lowerCamelCase__ , )
a__ : Tuple = decoded_processor_out.text
a__ : Union[str, Any] = list(lowerCamelCase__ )
decoder.reset_params(
alpha=lowerCamelCase__ , beta=lowerCamelCase__ , unk_score_offset=lowerCamelCase__ , lm_score_boundary=lowerCamelCase__ , )
with get_context("fork" ).Pool() as pool:
a__ : Any = decoder.decode_beams_batch(
lowerCamelCase__ , lowerCamelCase__ , )
a__ : Any = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertListEqual(["<s> </s> <s> </s> </s>", "</s> </s> <s> </s> </s>"] , lowerCamelCase__ )
a__ : List[Any] = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -20.0 )
self.assertEqual(lm_model.score_boundary , lowerCamelCase__ )
def _UpperCamelCase( self : Union[str, Any] ):
a__ : Any = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" )
a__ : Dict = processor.decoder.model_container[processor.decoder._model_key]
a__ : Tuple = Path(language_model._kenlm_model.path.decode("utf-8" ) ).parent.parent.absolute()
a__ : str = os.listdir(lowerCamelCase__ )
a__ : Tuple = ["alphabet.json", "language_model"]
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
def _UpperCamelCase( self : Dict ):
a__ : int = snapshot_download("hf-internal-testing/processor_with_lm" )
a__ : Optional[Any] = WavaVecaProcessorWithLM.from_pretrained(lowerCamelCase__ )
a__ : Optional[int] = processor.decoder.model_container[processor.decoder._model_key]
a__ : Optional[Any] = Path(language_model._kenlm_model.path.decode("utf-8" ) ).parent.parent.absolute()
a__ : Optional[int] = os.listdir(lowerCamelCase__ )
a__ : Optional[int] = os.listdir(lowerCamelCase__ )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
def _UpperCamelCase( self : Any ):
a__ : Any = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" )
a__ : Optional[int] = AutoProcessor.from_pretrained("hf-internal-testing/processor_with_lm" )
a__ : Any = floats_list((3, 1_000) )
a__ : List[str] = processor_wavaveca(lowerCamelCase__ , return_tensors="np" )
a__ : str = processor_auto(lowerCamelCase__ , return_tensors="np" )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1E-2 )
a__ : Any = self._get_dummy_logits()
a__ : Union[str, Any] = processor_wavaveca.batch_decode(lowerCamelCase__ )
a__ : str = processor_auto.batch_decode(lowerCamelCase__ )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def _UpperCamelCase( self : List[Any] ):
a__ : Optional[Any] = self.get_feature_extractor()
a__ : str = self.get_tokenizer()
a__ : Tuple = self.get_decoder()
a__ : Union[str, Any] = WavaVecaProcessorWithLM(tokenizer=lowerCamelCase__ , feature_extractor=lowerCamelCase__ , decoder=lowerCamelCase__ )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg="`processor` and `feature_extractor` model input names do not match" , )
@staticmethod
def _UpperCamelCase( lowerCamelCase__ : List[str] , lowerCamelCase__ : str ):
a__ : Optional[int] = [d[key] for d in offsets]
return retrieved_list
def _UpperCamelCase( self : List[Any] ):
a__ : List[Any] = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" )
a__ : List[Any] = self._get_dummy_logits()[0]
a__ : List[Any] = processor.decode(lowerCamelCase__ , output_word_offsets=lowerCamelCase__ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("text" in outputs )
self.assertTrue("word_offsets" in outputs )
self.assertTrue(isinstance(lowerCamelCase__ , lowerCamelCase__ ) )
self.assertEqual(" ".join(self.get_from_offsets(outputs["word_offsets"] , "word" ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"] , "word" ) , ["<s>", "<s>", "</s>"] )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"] , "start_offset" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"] , "end_offset" ) , [1, 3, 5] )
def _UpperCamelCase( self : Tuple ):
a__ : str = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" )
a__ : int = self._get_dummy_logits()
a__ : Optional[Any] = processor.batch_decode(lowerCamelCase__ , output_word_offsets=lowerCamelCase__ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("text" in outputs )
self.assertTrue("word_offsets" in outputs )
self.assertTrue(isinstance(lowerCamelCase__ , lowerCamelCase__ ) )
self.assertListEqual(
[" ".join(self.get_from_offsets(lowerCamelCase__ , "word" ) ) for o in outputs["word_offsets"]] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"][0] , "word" ) , ["<s>", "<s>", "</s>"] )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"][0] , "start_offset" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"][0] , "end_offset" ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def _UpperCamelCase( self : Union[str, Any] ):
import torch
a__ : Optional[Any] = load_dataset("common_voice" , "en" , split="train" , streaming=lowerCamelCase__ )
a__ : Optional[int] = ds.cast_column("audio" , datasets.Audio(sampling_rate=16_000 ) )
a__ : Union[str, Any] = iter(lowerCamelCase__ )
a__ : str = next(lowerCamelCase__ )
a__ : List[Any] = AutoProcessor.from_pretrained("patrickvonplaten/wav2vec2-base-100h-with-lm" )
a__ : Optional[int] = WavaVecaForCTC.from_pretrained("patrickvonplaten/wav2vec2-base-100h-with-lm" )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
a__ : Tuple = processor(sample["audio"]["array"] , return_tensors="pt" ).input_values
with torch.no_grad():
a__ : int = model(lowerCamelCase__ ).logits.cpu().numpy()
a__ : Any = processor.decode(logits[0] , output_word_offsets=lowerCamelCase__ )
a__ : Dict = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
a__ : Optional[Any] = [
{
"start_time": d["start_offset"] * time_offset,
"end_time": d["end_offset"] * time_offset,
"word": d["word"],
}
for d in output["word_offsets"]
]
a__ : Optional[int] = "WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL"
# output words
self.assertEqual(" ".join(self.get_from_offsets(lowerCamelCase__ , "word" ) ) , lowerCamelCase__ )
self.assertEqual(" ".join(self.get_from_offsets(lowerCamelCase__ , "word" ) ) , output.text )
# output times
a__ : str = torch.tensor(self.get_from_offsets(lowerCamelCase__ , "start_time" ) )
a__ : Union[str, Any] = torch.tensor(self.get_from_offsets(lowerCamelCase__ , "end_time" ) )
# fmt: off
a__ : Union[str, Any] = torch.tensor([1.4199, 1.6599, 2.2599, 3.0, 3.24, 3.5999, 3.7999, 4.0999, 4.26, 4.94, 5.28, 5.6599, 5.78, 5.94, 6.32, 6.5399, 6.6599] )
a__ : List[str] = torch.tensor([1.5399, 1.8999, 2.9, 3.16, 3.5399, 3.72, 4.0199, 4.1799, 4.76, 5.1599, 5.5599, 5.6999, 5.86, 6.1999, 6.38, 6.6199, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=0.01 ) )
self.assertTrue(torch.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=0.01 ) )
| 705 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class A__ ( A__ ):
"""simple docstring"""
_lowercase = (DPMSolverSinglestepScheduler,)
_lowercase = (('num_inference_steps', 2_5),)
def _UpperCamelCase( self : Optional[int] , **lowerCamelCase__ : Tuple ):
a__ : Any = {
"num_train_timesteps": 1_000,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"solver_order": 2,
"prediction_type": "epsilon",
"thresholding": False,
"sample_max_value": 1.0,
"algorithm_type": "dpmsolver++",
"solver_type": "midpoint",
"lambda_min_clipped": -float("inf" ),
"variance_type": None,
}
config.update(**lowerCamelCase__ )
return config
def _UpperCamelCase( self : List[Any] , lowerCamelCase__ : Tuple=0 , **lowerCamelCase__ : Union[str, Any] ):
a__ : Optional[Any] = dict(self.forward_default_kwargs )
a__ : Any = kwargs.pop("num_inference_steps" , lowerCamelCase__ )
a__ : Any = self.dummy_sample
a__ : int = 0.1 * sample
a__ : List[str] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
a__ : Dict = self.get_scheduler_config(**lowerCamelCase__ )
a__ : Any = scheduler_class(**lowerCamelCase__ )
scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residuals
a__ : str = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase__ )
a__ : List[str] = scheduler_class.from_pretrained(lowerCamelCase__ )
new_scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residuals
a__ : int = dummy_past_residuals[: new_scheduler.config.solver_order]
a__, a__ : Any = sample, sample
for t in range(lowerCamelCase__ , time_step + scheduler.config.solver_order + 1 ):
a__ : List[Any] = scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample
a__ : List[str] = new_scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def _UpperCamelCase( self : Any ):
pass
def _UpperCamelCase( self : Optional[int] , lowerCamelCase__ : Optional[Any]=0 , **lowerCamelCase__ : Optional[int] ):
a__ : Tuple = dict(self.forward_default_kwargs )
a__ : Tuple = kwargs.pop("num_inference_steps" , lowerCamelCase__ )
a__ : Union[str, Any] = self.dummy_sample
a__ : Tuple = 0.1 * sample
a__ : Tuple = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
a__ : Dict = self.get_scheduler_config()
a__ : List[str] = scheduler_class(**lowerCamelCase__ )
scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residuals (must be after setting timesteps)
a__ : Union[str, Any] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase__ )
a__ : int = scheduler_class.from_pretrained(lowerCamelCase__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residual (must be after setting timesteps)
a__ : List[str] = dummy_past_residuals[: new_scheduler.config.solver_order]
a__ : List[Any] = scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample
a__ : Dict = new_scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def _UpperCamelCase( self : Union[str, Any] , lowerCamelCase__ : Any=None , **lowerCamelCase__ : Union[str, Any] ):
if scheduler is None:
a__ : Union[str, Any] = self.scheduler_classes[0]
a__ : Optional[Any] = self.get_scheduler_config(**lowerCamelCase__ )
a__ : str = scheduler_class(**lowerCamelCase__ )
a__ : List[Any] = self.scheduler_classes[0]
a__ : int = self.get_scheduler_config(**lowerCamelCase__ )
a__ : Any = scheduler_class(**lowerCamelCase__ )
a__ : Any = 10
a__ : int = self.dummy_model()
a__ : Optional[Any] = self.dummy_sample_deter
scheduler.set_timesteps(lowerCamelCase__ )
for i, t in enumerate(scheduler.timesteps ):
a__ : List[str] = model(lowerCamelCase__ , lowerCamelCase__ )
a__ : Any = scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ).prev_sample
return sample
def _UpperCamelCase( self : str ):
a__ : str = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
a__ : Optional[Any] = 50
a__ : List[str] = self.dummy_model()
a__ : str = self.dummy_sample_deter
scheduler.set_timesteps(lowerCamelCase__ )
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:] ):
a__ : List[str] = model(lowerCamelCase__ , lowerCamelCase__ )
a__ : List[Any] = scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ).prev_sample
a__ : Optional[int] = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_mean.item() - 0.2574 ) < 1E-3
def _UpperCamelCase( self : Union[str, Any] ):
for timesteps in [25, 50, 100, 999, 1_000]:
self.check_over_configs(num_train_timesteps=lowerCamelCase__ )
def _UpperCamelCase( self : Optional[Any] ):
# make sure that iterating over schedulers with same config names gives same results
# for defaults
a__ : Tuple = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
a__ : int = self.full_loop(scheduler=lowerCamelCase__ )
a__ : str = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_mean.item() - 0.2791 ) < 1E-3
a__ : Tuple = DEISMultistepScheduler.from_config(scheduler.config )
a__ : Tuple = DPMSolverMultistepScheduler.from_config(scheduler.config )
a__ : int = UniPCMultistepScheduler.from_config(scheduler.config )
a__ : Dict = DPMSolverSinglestepScheduler.from_config(scheduler.config )
a__ : Optional[Any] = self.full_loop(scheduler=lowerCamelCase__ )
a__ : Optional[int] = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_mean.item() - 0.2791 ) < 1E-3
def _UpperCamelCase( self : Union[str, Any] ):
self.check_over_configs(thresholding=lowerCamelCase__ )
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=lowerCamelCase__ , prediction_type=lowerCamelCase__ , sample_max_value=lowerCamelCase__ , algorithm_type="dpmsolver++" , solver_order=lowerCamelCase__ , solver_type=lowerCamelCase__ , )
def _UpperCamelCase( self : List[str] ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCamelCase__ )
def _UpperCamelCase( self : List[str] ):
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=lowerCamelCase__ , solver_type=lowerCamelCase__ , prediction_type=lowerCamelCase__ , algorithm_type=lowerCamelCase__ , )
a__ : Dict = self.full_loop(
solver_order=lowerCamelCase__ , solver_type=lowerCamelCase__ , prediction_type=lowerCamelCase__ , algorithm_type=lowerCamelCase__ , )
assert not torch.isnan(lowerCamelCase__ ).any(), "Samples have nan numbers"
def _UpperCamelCase( self : str ):
self.check_over_configs(lower_order_final=lowerCamelCase__ )
self.check_over_configs(lower_order_final=lowerCamelCase__ )
def _UpperCamelCase( self : Optional[int] ):
self.check_over_configs(lambda_min_clipped=-float("inf" ) )
self.check_over_configs(lambda_min_clipped=-5.1 )
def _UpperCamelCase( self : Union[str, Any] ):
self.check_over_configs(variance_type=lowerCamelCase__ )
self.check_over_configs(variance_type="learned_range" )
def _UpperCamelCase( self : Any ):
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1_000]:
self.check_over_forward(num_inference_steps=lowerCamelCase__ , time_step=0 )
def _UpperCamelCase( self : Optional[int] ):
a__ : Optional[int] = self.full_loop()
a__ : Optional[Any] = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_mean.item() - 0.2791 ) < 1E-3
def _UpperCamelCase( self : str ):
a__ : List[str] = self.full_loop(use_karras_sigmas=lowerCamelCase__ )
a__ : Optional[int] = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_mean.item() - 0.2248 ) < 1E-3
def _UpperCamelCase( self : int ):
a__ : List[Any] = self.full_loop(prediction_type="v_prediction" )
a__ : Optional[int] = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_mean.item() - 0.1453 ) < 1E-3
def _UpperCamelCase( self : Tuple ):
a__ : Any = self.full_loop(prediction_type="v_prediction" , use_karras_sigmas=lowerCamelCase__ )
a__ : List[str] = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_mean.item() - 0.0649 ) < 1E-3
def _UpperCamelCase( self : str ):
a__ : Union[str, Any] = self.scheduler_classes[0]
a__ : List[str] = self.get_scheduler_config(thresholding=lowerCamelCase__ , dynamic_thresholding_ratio=0 )
a__ : int = scheduler_class(**lowerCamelCase__ )
a__ : int = 10
a__ : int = self.dummy_model()
a__ : Optional[Any] = self.dummy_sample_deter.half()
scheduler.set_timesteps(lowerCamelCase__ )
for i, t in enumerate(scheduler.timesteps ):
a__ : Dict = model(lowerCamelCase__ , lowerCamelCase__ )
a__ : Optional[Any] = scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ).prev_sample
assert sample.dtype == torch.floataa
| 151 | 0 |
'''simple docstring'''
from scipy.stats import spearmanr
import datasets
UpperCAmelCase_ : Any = '\nThe Spearman rank-order correlation coefficient is a measure of the\nrelationship between two datasets. Like other correlation coefficients,\nthis one varies between -1 and +1 with 0 implying no correlation.\nPositive correlations imply that as data in dataset x increases, so\ndoes data in dataset y. Negative correlations imply that as x increases,\ny decreases. Correlations of -1 or +1 imply an exact monotonic relationship.\n\nUnlike the Pearson correlation, the Spearman correlation does not\nassume that both datasets are normally distributed.\n\nThe p-value roughly indicates the probability of an uncorrelated system\nproducing datasets that have a Spearman correlation at least as extreme\nas the one computed from these datasets. The p-values are not entirely\nreliable but are probably reasonable for datasets larger than 500 or so.\n'
UpperCAmelCase_ : Union[str, Any] = '\nArgs:\n predictions (`List[float]`): Predicted labels, as returned by a model.\n references (`List[float]`): Ground truth labels.\n return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns\n only the spearmanr score. Defaults to `False`.\nReturns:\n spearmanr (`float`): Spearman correlation coefficient.\n p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.\nExamples:\n Example 1:\n >>> spearmanr_metric = datasets.load_metric("spearmanr")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])\n >>> print(results)\n {\'spearmanr\': -0.7}\n\n Example 2:\n >>> spearmanr_metric = datasets.load_metric("spearmanr")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],\n ... predictions=[10, 9, 2.5, 6, 4],\n ... return_pvalue=True)\n >>> print(results[\'spearmanr\'])\n -0.7\n >>> print(round(results[\'spearmanr_pvalue\'], 2))\n 0.19\n'
UpperCAmelCase_ : List[str] = R'\\n@book{kokoska2000crc,\n title={CRC standard probability and statistics tables and formulae},\n author={Kokoska, Stephen and Zwillinger, Daniel},\n year={2000},\n publisher={Crc Press}\n}\n@article{2020SciPy-NMeth,\n author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\n title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\n journal = {Nature Methods},\n year = {2020},\n volume = {17},\n pages = {261--272},\n adsurl = {https://rdcu.be/b08Wh},\n doi = {10.1038/s41592-019-0686-2},\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase__ ( datasets.Metric ):
def lowerCamelCase_ ( self : int ):
return datasets.MetricInfo(
description=_DESCRIPTION,citation=_CITATION,inputs_description=_KWARGS_DESCRIPTION,features=datasets.Features(
{
"predictions": datasets.Value("float" ),
"references": datasets.Value("float" ),
} ),reference_urls=["https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"],)
def lowerCamelCase_ ( self : List[Any],__A : Optional[Any],__A : Tuple,__A : Tuple=False ):
_lowerCamelCase : Any = spearmanr(__A,__A )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]} | 44 |
'''simple docstring'''
import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(A, 'hidden_sizes' ) )
self.parent.assertTrue(hasattr(A, 'neck_hidden_sizes' ) )
self.parent.assertTrue(hasattr(A, 'num_attention_heads' ) )
class _a :
'''simple docstring'''
def __init__( self, A, A=13, A=32, A=2, A=3, A=640, A=4, A="silu", A=3, A=32, A=0.1, A=0.1, A=0.1, A=0.02, A=True, A=True, A=10, A=None, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = parent
SCREAMING_SNAKE_CASE : int = batch_size
SCREAMING_SNAKE_CASE : int = image_size
SCREAMING_SNAKE_CASE : str = patch_size
SCREAMING_SNAKE_CASE : Tuple = num_channels
SCREAMING_SNAKE_CASE : int = last_hidden_size
SCREAMING_SNAKE_CASE : Any = num_attention_heads
SCREAMING_SNAKE_CASE : List[Any] = hidden_act
SCREAMING_SNAKE_CASE : Optional[int] = conv_kernel_size
SCREAMING_SNAKE_CASE : Optional[Any] = output_stride
SCREAMING_SNAKE_CASE : Any = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Dict = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Optional[Any] = classifier_dropout_prob
SCREAMING_SNAKE_CASE : Optional[Any] = use_labels
SCREAMING_SNAKE_CASE : int = is_training
SCREAMING_SNAKE_CASE : Dict = num_labels
SCREAMING_SNAKE_CASE : Dict = initializer_range
SCREAMING_SNAKE_CASE : Optional[int] = scope
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : Dict = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Any = ids_tensor([self.batch_size], self.num_labels )
SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels )
SCREAMING_SNAKE_CASE : int = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCamelCase_ ( self ):
'''simple docstring'''
return MobileViTConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, num_attention_heads=self.num_attention_heads, hidden_act=self.hidden_act, conv_kernel_size=self.conv_kernel_size, output_stride=self.output_stride, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, classifier_dropout_prob=self.classifier_dropout_prob, initializer_range=self.initializer_range, )
def UpperCamelCase_ ( self, A, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = MobileViTModel(config=A )
model.to(A )
model.eval()
SCREAMING_SNAKE_CASE : Optional[int] = model(A )
self.parent.assertEqual(
result.last_hidden_state.shape, (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
def UpperCamelCase_ ( self, A, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.num_labels
SCREAMING_SNAKE_CASE : Tuple = MobileViTForImageClassification(A )
model.to(A )
model.eval()
SCREAMING_SNAKE_CASE : List[str] = model(A, labels=A )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self, A, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.num_labels
SCREAMING_SNAKE_CASE : str = MobileViTForSemanticSegmentation(A )
model.to(A )
model.eval()
SCREAMING_SNAKE_CASE : str = model(A )
self.parent.assertEqual(
result.logits.shape, (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
SCREAMING_SNAKE_CASE : int = model(A, labels=A )
self.parent.assertEqual(
result.logits.shape, (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = config_and_inputs
SCREAMING_SNAKE_CASE : str = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
A : Tuple = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
A : List[Any] = (
{
'''feature-extraction''': MobileViTModel,
'''image-classification''': MobileViTForImageClassification,
'''image-segmentation''': MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
A : Optional[int] = False
A : Dict = False
A : List[Any] = False
A : Optional[int] = False
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = MobileViTModelTester(self )
SCREAMING_SNAKE_CASE : str = MobileViTConfigTester(self, config_class=A, has_text_modality=A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileViT does not use inputs_embeds' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='MobileViT does not support input and output embeddings' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='MobileViT does not output attentions' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(A )
SCREAMING_SNAKE_CASE : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE : Any = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE : Any = ['pixel_values']
self.assertListEqual(arg_names[:1], A )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
def check_hidden_states_output(A, A, A ):
SCREAMING_SNAKE_CASE : Any = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : Tuple = model(**self._prepare_for_class(A, A ) )
SCREAMING_SNAKE_CASE : Dict = outputs.hidden_states
SCREAMING_SNAKE_CASE : List[str] = 5
self.assertEqual(len(A ), A )
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
SCREAMING_SNAKE_CASE : int = 2
for i in range(len(A ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ), [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor], )
divisor *= 2
self.assertEqual(self.model_tester.output_stride, divisor // 2 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Tuple = True
check_hidden_states_output(A, A, A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE : Optional[Any] = True
check_hidden_states_output(A, A, A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*A )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : int = MobileViTModel.from_pretrained(A )
self.assertIsNotNone(A )
def lowercase__( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class _a ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self ):
'''simple docstring'''
return MobileViTImageProcessor.from_pretrained('apple/mobilevit-xx-small' ) if is_vision_available() else None
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = MobileViTForImageClassification.from_pretrained('apple/mobilevit-xx-small' ).to(A )
SCREAMING_SNAKE_CASE : Any = self.default_image_processor
SCREAMING_SNAKE_CASE : Dict = prepare_img()
SCREAMING_SNAKE_CASE : Dict = image_processor(images=A, return_tensors='pt' ).to(A )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE : Tuple = model(**A )
# verify the logits
SCREAMING_SNAKE_CASE : Optional[Any] = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape, A )
SCREAMING_SNAKE_CASE : int = torch.tensor([-1.93_64, -1.23_27, -0.46_53] ).to(A )
self.assertTrue(torch.allclose(outputs.logits[0, :3], A, atol=1E-4 ) )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
SCREAMING_SNAKE_CASE : Optional[Any] = model.to(A )
SCREAMING_SNAKE_CASE : Optional[int] = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
SCREAMING_SNAKE_CASE : str = prepare_img()
SCREAMING_SNAKE_CASE : Optional[int] = image_processor(images=A, return_tensors='pt' ).to(A )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE : Dict = model(**A )
SCREAMING_SNAKE_CASE : List[str] = outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape, A )
SCREAMING_SNAKE_CASE : Tuple = torch.tensor(
[
[[6.97_13, 6.97_86, 7.24_22], [7.28_93, 7.28_25, 7.44_46], [7.65_80, 7.87_97, 7.94_20]],
[[-10.68_69, -10.32_50, -10.34_71], [-10.42_28, -9.98_68, -9.71_32], [-11.04_05, -11.02_21, -10.73_18]],
[[-3.30_89, -2.85_39, -2.67_40], [-3.27_06, -2.56_21, -2.51_08], [-3.25_34, -2.66_15, -2.66_51]],
], device=A, )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3], A, atol=1E-4 ) )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
SCREAMING_SNAKE_CASE : List[str] = model.to(A )
SCREAMING_SNAKE_CASE : List[Any] = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
SCREAMING_SNAKE_CASE : Optional[Any] = prepare_img()
SCREAMING_SNAKE_CASE : Any = image_processor(images=A, return_tensors='pt' ).to(A )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[Any] = model(**A )
SCREAMING_SNAKE_CASE : int = outputs.logits.detach().cpu()
SCREAMING_SNAKE_CASE : Dict = image_processor.post_process_semantic_segmentation(outputs=A, target_sizes=[(50, 60)] )
SCREAMING_SNAKE_CASE : Dict = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape, A )
SCREAMING_SNAKE_CASE : Tuple = image_processor.post_process_semantic_segmentation(outputs=A )
SCREAMING_SNAKE_CASE : Any = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape, A )
| 28 | 0 |
from __future__ import annotations
import math
import random
from typing import Any
class __UpperCAmelCase :
"""simple docstring"""
def __init__( self ):
__a = []
__a = 0
__a = 0
def snake_case_ ( self ):
return self.head == self.tail
def snake_case_ ( self , __A ):
self.data.append(__A )
__a = self.tail + 1
def snake_case_ ( self ):
__a = self.data[self.head]
__a = self.head + 1
return ret
def snake_case_ ( self ):
return self.tail - self.head
def snake_case_ ( self ):
print(self.data )
print("""**************""" )
print(self.data[self.head : self.tail] )
class __UpperCAmelCase :
"""simple docstring"""
def __init__( self , __A ):
__a = data
__a = None
__a = None
__a = 1
def snake_case_ ( self ):
return self.data
def snake_case_ ( self ):
return self.left
def snake_case_ ( self ):
return self.right
def snake_case_ ( self ):
return self.height
def snake_case_ ( self , __A ):
__a = data
def snake_case_ ( self , __A ):
__a = node
def snake_case_ ( self , __A ):
__a = node
def snake_case_ ( self , __A ):
__a = height
def a (lowerCAmelCase__ ):
if node is None:
return 0
return node.get_height()
def a (lowerCAmelCase__ , lowerCAmelCase__ ):
if a > b:
return a
return b
def a (lowerCAmelCase__ ):
print("""left rotation node:""" , node.get_data() )
__a = node.get_left()
assert ret is not None
node.set_left(ret.get_right() )
ret.set_right(lowerCAmelCase__ )
__a = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(lowerCAmelCase__ )
__a = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(lowerCAmelCase__ )
return ret
def a (lowerCAmelCase__ ):
print("""right rotation node:""" , node.get_data() )
__a = node.get_right()
assert ret is not None
node.set_right(ret.get_left() )
ret.set_left(lowerCAmelCase__ )
__a = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(lowerCAmelCase__ )
__a = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(lowerCAmelCase__ )
return ret
def a (lowerCAmelCase__ ):
__a = node.get_left()
assert left_child is not None
node.set_left(left_rotation(lowerCAmelCase__ ) )
return right_rotation(lowerCAmelCase__ )
def a (lowerCAmelCase__ ):
__a = node.get_right()
assert right_child is not None
node.set_right(right_rotation(lowerCAmelCase__ ) )
return left_rotation(lowerCAmelCase__ )
def a (lowerCAmelCase__ , lowerCAmelCase__ ):
if node is None:
return MyNode(lowerCAmelCase__ )
if data < node.get_data():
node.set_left(insert_node(node.get_left() , lowerCAmelCase__ ) )
if (
get_height(node.get_left() ) - get_height(node.get_right() ) == 2
): # an unbalance detected
__a = node.get_left()
assert left_child is not None
if (
data < left_child.get_data()
): # new node is the left child of the left child
__a = right_rotation(lowerCAmelCase__ )
else:
__a = lr_rotation(lowerCAmelCase__ )
else:
node.set_right(insert_node(node.get_right() , lowerCAmelCase__ ) )
if get_height(node.get_right() ) - get_height(node.get_left() ) == 2:
__a = node.get_right()
assert right_child is not None
if data < right_child.get_data():
__a = rl_rotation(lowerCAmelCase__ )
else:
__a = left_rotation(lowerCAmelCase__ )
__a = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(lowerCAmelCase__ )
return node
def a (lowerCAmelCase__ ):
while True:
__a = root.get_right()
if right_child is None:
break
__a = right_child
return root.get_data()
def a (lowerCAmelCase__ ):
while True:
__a = root.get_left()
if left_child is None:
break
__a = left_child
return root.get_data()
def a (lowerCAmelCase__ , lowerCAmelCase__ ):
__a = root.get_left()
__a = root.get_right()
if root.get_data() == data:
if left_child is not None and right_child is not None:
__a = get_left_most(lowerCAmelCase__ )
root.set_data(lowerCAmelCase__ )
root.set_right(del_node(lowerCAmelCase__ , lowerCAmelCase__ ) )
elif left_child is not None:
__a = left_child
elif right_child is not None:
__a = right_child
else:
return None
elif root.get_data() > data:
if left_child is None:
print("""No such data""" )
return root
else:
root.set_left(del_node(lowerCAmelCase__ , lowerCAmelCase__ ) )
else: # root.get_data() < data
if right_child is None:
return root
else:
root.set_right(del_node(lowerCAmelCase__ , lowerCAmelCase__ ) )
if get_height(lowerCAmelCase__ ) - get_height(lowerCAmelCase__ ) == 2:
assert right_child is not None
if get_height(right_child.get_right() ) > get_height(right_child.get_left() ):
__a = left_rotation(lowerCAmelCase__ )
else:
__a = rl_rotation(lowerCAmelCase__ )
elif get_height(lowerCAmelCase__ ) - get_height(lowerCAmelCase__ ) == -2:
assert left_child is not None
if get_height(left_child.get_left() ) > get_height(left_child.get_right() ):
__a = right_rotation(lowerCAmelCase__ )
else:
__a = lr_rotation(lowerCAmelCase__ )
__a = my_max(get_height(root.get_right() ) , get_height(root.get_left() ) ) + 1
root.set_height(lowerCAmelCase__ )
return root
class __UpperCAmelCase :
"""simple docstring"""
def __init__( self ):
__a = None
def snake_case_ ( self ):
return get_height(self.root )
def snake_case_ ( self , __A ):
print("""insert:""" + str(__A ) )
__a = insert_node(self.root , __A )
def snake_case_ ( self , __A ):
print("""delete:""" + str(__A ) )
if self.root is None:
print("""Tree is empty!""" )
return
__a = del_node(self.root , __A )
def __str__( self , ): # a level traversale, gives a more intuitive look on the tree
__a = """"""
__a = MyQueue()
q.push(self.root )
__a = self.get_height()
if layer == 0:
return output
__a = 0
while not q.is_empty():
__a = q.pop()
__a = """ """ * int(math.pow(2 , layer - 1 ) )
output += space
if node is None:
output += "*"
q.push(__A )
q.push(__A )
else:
output += str(node.get_data() )
q.push(node.get_left() )
q.push(node.get_right() )
output += space
__a = cnt + 1
for i in range(100 ):
if cnt == math.pow(2 , __A ) - 1:
__a = layer - 1
if layer == 0:
output += "\n*************************************"
return output
output += "\n"
break
output += "\n*************************************"
return output
def a ():
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
SCREAMING_SNAKE_CASE = AVLtree()
SCREAMING_SNAKE_CASE = list(range(1_0))
random.shuffle(lst)
for i in lst:
t.insert(i)
print(str(t))
random.shuffle(lst)
for i in lst:
t.del_node(i)
print(str(t))
| 209 |
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE = {
'facebook/mask2former-swin-small-coco-instance': (
'https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json'
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class __UpperCAmelCase ( __A ):
"""simple docstring"""
_lowerCamelCase = """mask2former"""
_lowerCamelCase = ["""swin"""]
_lowerCamelCase = {"""hidden_size""": """hidden_dim"""}
def __init__( self , __A = None , __A = 256 , __A = 256 , __A = 256 , __A = 1024 , __A = "relu" , __A = 6 , __A = 10 , __A = 8 , __A = 0.0 , __A = 2048 , __A = False , __A = False , __A = 4 , __A = 255 , __A = 100 , __A = 0.1 , __A = 2.0 , __A = 5.0 , __A = 5.0 , __A = 12544 , __A = 3.0 , __A = 0.75 , __A = 0.02 , __A = 1.0 , __A = True , __A = [4, 8, 16, 32] , __A = None , **__A , ):
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.""" )
__a = CONFIG_MAPPING["""swin"""](
image_size=224 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=__A , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] , )
if isinstance(__A , __A ):
__a = backbone_config.pop("""model_type""" )
__a = CONFIG_MAPPING[backbone_model_type]
__a = config_class.from_dict(__A )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f'''Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. '''
f'''Supported model types: {','.join(self.backbones_supported )}''' )
__a = backbone_config
__a = feature_size
__a = mask_feature_size
__a = hidden_dim
__a = encoder_feedforward_dim
__a = activation_function
__a = encoder_layers
__a = decoder_layers
__a = num_attention_heads
__a = dropout
__a = dim_feedforward
__a = pre_norm
__a = enforce_input_projection
__a = common_stride
__a = ignore_value
__a = num_queries
__a = no_object_weight
__a = class_weight
__a = mask_weight
__a = dice_weight
__a = train_num_points
__a = oversample_ratio
__a = importance_sample_ratio
__a = init_std
__a = init_xavier_std
__a = use_auxiliary_loss
__a = feature_strides
__a = output_auxiliary_logits
__a = decoder_layers
super().__init__(**__A )
@classmethod
def snake_case_ ( cls , __A , **__A ):
return cls(
backbone_config=__A , **__A , )
def snake_case_ ( self ):
__a = copy.deepcopy(self.__dict__ )
__a = self.backbone_config.to_dict()
__a = self.__class__.model_type
return output
| 209 | 1 |
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
lowercase__ = 42 # [batch_size x 3]
lowercase__ = 42 # [batch_size x 3]
lowercase__ = 42 # [batch_size x 3]
lowercase__ = 42 # [batch_size x 3]
lowercase__ = 42
lowercase__ = 42
lowercase__ = 42
lowercase__ = 42
lowercase__ = 42
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def lowercase__ ( self : Dict ):
'''simple docstring'''
return torch.from_numpy(np.array([self.width, self.height], dtype=np.floataa ) )
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
return torch.from_numpy(np.array([self.x_fov, self.y_fov], dtype=np.floataa ) )
def lowercase__ ( self : Dict ):
'''simple docstring'''
lowercase__ = torch.arange(self.height * self.width )
lowercase__ = torch.stack(
[
pixel_indices % self.width,
torch.div(lowerCamelCase, self.width, rounding_mode='''trunc''' ),
], axis=1, )
return coords
@property
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
lowercase__ , *lowercase__ = self.shape
lowercase__ = int(np.prod(lowerCamelCase ) )
lowercase__ = self.get_image_coords()
lowercase__ = torch.broadcast_to(coords.unsqueeze(0 ), [batch_size * inner_batch_size, *coords.shape] )
lowercase__ = self.get_camera_rays(lowerCamelCase )
lowercase__ = rays.view(lowerCamelCase, inner_batch_size * self.height * self.width, 2, 3 )
return rays
def lowercase__ ( self : Union[str, Any], lowerCamelCase : torch.Tensor ):
'''simple docstring'''
lowercase__ , *lowercase__ , lowercase__ = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
lowercase__ = coords.view(lowerCamelCase, -1, 2 )
lowercase__ = self.resolution()
lowercase__ = self.fov()
lowercase__ = (flat.float() / (res - 1)) * 2 - 1
lowercase__ = fracs * torch.tan(fov / 2 )
lowercase__ = fracs.view(lowerCamelCase, -1, 2 )
lowercase__ = (
self.z.view(lowerCamelCase, 1, 3 )
+ self.x.view(lowerCamelCase, 1, 3 ) * fracs[:, :, :1]
+ self.y.view(lowerCamelCase, 1, 3 ) * fracs[:, :, 1:]
)
lowercase__ = directions / directions.norm(dim=-1, keepdim=lowerCamelCase )
lowercase__ = torch.stack(
[
torch.broadcast_to(self.origin.view(lowerCamelCase, 1, 3 ), [batch_size, directions.shape[1], 3] ),
directions,
], dim=2, )
return rays.view(lowerCamelCase, *lowerCamelCase, 2, 3 )
def lowercase__ ( self : Optional[int], lowerCamelCase : int, lowerCamelCase : int ):
'''simple docstring'''
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin, x=self.x, y=self.y, z=self.z, width=lowerCamelCase, height=lowerCamelCase, x_fov=self.x_fov, y_fov=self.y_fov, )
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = []
lowercase__ = []
lowercase__ = []
lowercase__ = []
for theta in np.linspace(0 , 2 * np.pi , num=20 ):
lowercase__ = np.array([np.sin(lowerCamelCase_ ), np.cos(lowerCamelCase_ ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
lowercase__ = -z * 4
lowercase__ = np.array([np.cos(lowerCamelCase_ ), -np.sin(lowerCamelCase_ ), 0.0] )
lowercase__ = np.cross(lowerCamelCase_ , lowerCamelCase_ )
origins.append(lowerCamelCase_ )
xs.append(lowerCamelCase_ )
ys.append(lowerCamelCase_ )
zs.append(lowerCamelCase_ )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(lowerCamelCase_ , axis=0 ) ).float() , x=torch.from_numpy(np.stack(lowerCamelCase_ , axis=0 ) ).float() , y=torch.from_numpy(np.stack(lowerCamelCase_ , axis=0 ) ).float() , z=torch.from_numpy(np.stack(lowerCamelCase_ , axis=0 ) ).float() , width=lowerCamelCase_ , height=lowerCamelCase_ , x_fov=0.7 , y_fov=0.7 , shape=(1, len(lowerCamelCase_ )) , )
| 183 |
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
A__ : Tuple = {
'<': operator.lt,
'<=': operator.le,
'==': operator.eq,
'!=': operator.ne,
'>=': operator.ge,
'>': operator.gt,
}
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
if got_ver is None or want_ver is None:
raise ValueError(
F"""Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider"""
F""" reinstalling {pkg}.""" )
if not ops[op](version.parse(lowerCamelCase_ ) , version.parse(lowerCamelCase_ ) ):
raise ImportError(
F"""{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}""" )
def a ( lowerCamelCase_ , lowerCamelCase_ = None ):
'''simple docstring'''
lowercase__ = F"""\n{hint}""" if hint is not None else ''''''
# non-versioned check
if re.match(r'''^[\w_\-\d]+$''' , lowerCamelCase_ ):
lowercase__ , lowercase__ , lowercase__ = requirement, None, None
else:
lowercase__ = re.findall(r'''^([^!=<>\s]+)([\s!=<>]{1,2}.+)''' , lowerCamelCase_ )
if not match:
raise ValueError(
'''requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but'''
F""" got {requirement}""" )
lowercase__ , lowercase__ = match[0]
lowercase__ = want_full.split(''',''' ) # there could be multiple requirements
lowercase__ = {}
for w in want_range:
lowercase__ = re.findall(r'''^([\s!=<>]{1,2})(.+)''' , lowerCamelCase_ )
if not match:
raise ValueError(
'''requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,'''
F""" but got {requirement}""" )
lowercase__ , lowercase__ = match[0]
lowercase__ = want_ver
if op not in ops:
raise ValueError(F"""{requirement}: need one of {list(ops.keys() )}, but got {op}""" )
# special case
if pkg == "python":
lowercase__ = '''.'''.join([str(lowerCamelCase_ ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
return
# check if any version is installed
try:
lowercase__ = importlib.metadata.version(lowerCamelCase_ )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
F"""The '{requirement}' distribution was not found and is required by this application. {hint}""" )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = '''Try: pip install transformers -U or pip install -e \'.[dev]\' if you\'re working with git main'''
return require_version(lowerCamelCase_ , lowerCamelCase_ )
| 183 | 1 |
'''simple docstring'''
from __future__ import annotations
__lowerCamelCase : List[str] = []
def __snake_case (__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
for i in range(len(__UpperCAmelCase ) ):
if board[row][i] == 1:
return False
for i in range(len(__UpperCAmelCase ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(__UpperCAmelCase , -1 , -1 ) , range(__UpperCAmelCase , -1 , -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(__UpperCAmelCase , -1 , -1 ) , range(__UpperCAmelCase , len(__UpperCAmelCase ) ) ):
if board[i][j] == 1:
return False
return True
def __snake_case (__UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
if row >= len(__UpperCAmelCase ):
solution.append(__UpperCAmelCase )
printboard(__UpperCAmelCase )
print()
return True
for i in range(len(__UpperCAmelCase ) ):
if is_safe(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
lowerCamelCase_ : Any = 1
solve(__UpperCAmelCase , row + 1 )
lowerCamelCase_ : Optional[Any] = 0
return False
def __snake_case (__UpperCAmelCase ):
"""simple docstring"""
for i in range(len(__UpperCAmelCase ) ):
for j in range(len(__UpperCAmelCase ) ):
if board[i][j] == 1:
print('''Q''' , end=''' ''' )
else:
print('''.''' , end=''' ''' )
print()
# n=int(input("The no. of queens"))
__lowerCamelCase : int = 8
__lowerCamelCase : Tuple = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print("""The total no. of solutions are :""", len(solution))
| 418 |
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__lowerCamelCase : Union[str, Any] = abspath(join(dirname(dirname(__file__)), """src"""))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="""ignore""", category=FutureWarning)
def __snake_case (__UpperCAmelCase ):
"""simple docstring"""
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__UpperCAmelCase )
def __snake_case (__UpperCAmelCase ):
"""simple docstring"""
from diffusers.utils.testing_utils import pytest_terminal_summary_main
lowerCamelCase_ : Tuple = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(__UpperCAmelCase , id=__UpperCAmelCase )
| 418 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
_snake_case : Dict = logging.get_logger(__name__)
_snake_case : List[Any] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
_snake_case : List[Any] = {
'vocab_file': {'mobilebert-uncased': 'https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt'},
'tokenizer_file': {
'mobilebert-uncased': 'https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json'
},
}
_snake_case : List[Any] = {'mobilebert-uncased': 512}
_snake_case : Optional[int] = {}
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_INIT_CONFIGURATION
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = MobileBertTokenizer
def __init__( self : Any , lowerCAmelCase_ : Optional[Any]=None , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : Optional[int]=True , lowerCAmelCase_ : Tuple="[UNK]" , lowerCAmelCase_ : Optional[int]="[SEP]" , lowerCAmelCase_ : Optional[int]="[PAD]" , lowerCAmelCase_ : Tuple="[CLS]" , lowerCAmelCase_ : List[Any]="[MASK]" , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : Optional[Any]=None , **lowerCAmelCase_ : int , ) -> Optional[Any]:
super().__init__(
lowerCAmelCase_ , tokenizer_file=lowerCAmelCase_ , do_lower_case=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , tokenize_chinese_chars=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ , **lowerCAmelCase_ , )
__lowerCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , lowerCAmelCase_ ) != do_lower_case
or normalizer_state.get('strip_accents' , lowerCAmelCase_ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , lowerCAmelCase_ ) != tokenize_chinese_chars
):
__lowerCAmelCase = getattr(lowerCAmelCase_ , normalizer_state.pop('type' ) )
__lowerCAmelCase = do_lower_case
__lowerCAmelCase = strip_accents
__lowerCAmelCase = tokenize_chinese_chars
__lowerCAmelCase = normalizer_class(**lowerCAmelCase_ )
__lowerCAmelCase = do_lower_case
def lowercase ( self : List[str] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Any=None ) -> List[str]:
__lowerCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowercase ( self : Union[str, Any] , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> List[int]:
__lowerCAmelCase = [self.sep_token_id]
__lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase ( self : Tuple , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ) -> Tuple[str]:
__lowerCAmelCase = self._tokenizer.model.save(lowerCAmelCase_ , name=lowerCAmelCase_ )
return tuple(lowerCAmelCase_ )
| 53 |
'''simple docstring'''
from __future__ import annotations
def UpperCAmelCase ( UpperCAmelCase__ : int):
lowerCamelCase : Optional[int] = str(UpperCAmelCase__)
return len(UpperCAmelCase__) == 9 and set(UpperCAmelCase__) == set('123456789')
def UpperCAmelCase ( ):
for base_num in range(99_99 , 49_99 , -1):
lowerCamelCase : Dict = 10_00_02 * base_num
if is_9_pandigital(UpperCAmelCase__):
return candidate
for base_num in range(3_33 , 99 , -1):
lowerCamelCase : Tuple = 1_00_20_03 * base_num
if is_9_pandigital(UpperCAmelCase__):
return candidate
return None
if __name__ == "__main__":
print(f"""{solution() = }""")
| 320 | 0 |
"""simple docstring"""
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
SCREAMING_SNAKE_CASE__ : Dict =16
SCREAMING_SNAKE_CASE__ : Any =32
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 16 , SCREAMING_SNAKE_CASE_ = "bert-base-cased" ) ->Dict:
_lowerCamelCase : Dict = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ )
_lowerCamelCase : Optional[int] = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(SCREAMING_SNAKE_CASE_ ):
# max_length=None => use the model max length (it's actually the default)
_lowerCamelCase : List[str] = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
_lowerCamelCase : List[str] = datasets.map(
SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=SCREAMING_SNAKE_CASE_ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_lowerCamelCase : Optional[Any] = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(SCREAMING_SNAKE_CASE_ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(SCREAMING_SNAKE_CASE_ , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return tokenizer.pad(SCREAMING_SNAKE_CASE_ , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
_lowerCamelCase : Any = DataLoader(
tokenized_datasets['''train'''] , shuffle=SCREAMING_SNAKE_CASE_ , collate_fn=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ )
_lowerCamelCase : int = DataLoader(
tokenized_datasets['''validation'''] , shuffle=SCREAMING_SNAKE_CASE_ , collate_fn=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ )
return train_dataloader, eval_dataloader
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ->str:
# Initialize accelerator
_lowerCamelCase : List[Any] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_lowerCamelCase : Any = config["""lr"""]
_lowerCamelCase : Dict = int(config['''num_epochs'''] )
_lowerCamelCase : List[Any] = int(config['''seed'''] )
_lowerCamelCase : List[str] = int(config['''batch_size'''] )
_lowerCamelCase : List[Any] = args.model_name_or_path
set_seed(SCREAMING_SNAKE_CASE_ )
_lowerCamelCase : List[str] = get_dataloaders(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_lowerCamelCase : Tuple = AutoModelForSequenceClassification.from_pretrained(SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ )
# Instantiate optimizer
_lowerCamelCase : Tuple = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
_lowerCamelCase : List[str] = optimizer_cls(params=model.parameters() , lr=SCREAMING_SNAKE_CASE_ )
if accelerator.state.deepspeed_plugin is not None:
_lowerCamelCase : Any = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
_lowerCamelCase : Any = 1
_lowerCamelCase : str = (len(SCREAMING_SNAKE_CASE_ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
_lowerCamelCase : List[Any] = get_linear_schedule_with_warmup(
optimizer=SCREAMING_SNAKE_CASE_ , num_warmup_steps=0 , num_training_steps=SCREAMING_SNAKE_CASE_ , )
else:
_lowerCamelCase : Dict = DummyScheduler(SCREAMING_SNAKE_CASE_ , total_num_steps=SCREAMING_SNAKE_CASE_ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_lowerCamelCase : Dict = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# We need to keep track of how many total steps we have iterated over
_lowerCamelCase : str = 0
# We also need to keep track of the stating epoch so files are named properly
_lowerCamelCase : Tuple = 0
# Now we train the model
_lowerCamelCase : str = evaluate.load('''glue''' , '''mrpc''' )
_lowerCamelCase : int = 0
_lowerCamelCase : Tuple = {}
for epoch in range(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
model.train()
for step, batch in enumerate(SCREAMING_SNAKE_CASE_ ):
_lowerCamelCase : List[str] = model(**SCREAMING_SNAKE_CASE_ )
_lowerCamelCase : int = outputs.loss
_lowerCamelCase : List[Any] = loss / gradient_accumulation_steps
accelerator.backward(SCREAMING_SNAKE_CASE_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
_lowerCamelCase : List[Any] = 0
for step, batch in enumerate(SCREAMING_SNAKE_CASE_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_lowerCamelCase : Optional[int] = model(**SCREAMING_SNAKE_CASE_ )
_lowerCamelCase : Dict = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
_lowerCamelCase : str = accelerator.gather(
(predictions, batch['''labels''']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(SCREAMING_SNAKE_CASE_ ) - 1:
_lowerCamelCase : int = predictions[: len(eval_dataloader.dataset ) - samples_seen]
_lowerCamelCase : Union[str, Any] = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=SCREAMING_SNAKE_CASE_ , references=SCREAMING_SNAKE_CASE_ , )
_lowerCamelCase : List[str] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , SCREAMING_SNAKE_CASE_ )
_lowerCamelCase : str = eval_metric["""accuracy"""]
if best_performance < eval_metric["accuracy"]:
_lowerCamelCase : Dict = eval_metric["""accuracy"""]
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), F'''Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}'''
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , '''all_results.json''' ) , '''w''' ) as f:
json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase ( ) ->int:
_lowerCamelCase : int = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' )
parser.add_argument(
'''--model_name_or_path''' , type=SCREAMING_SNAKE_CASE_ , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=SCREAMING_SNAKE_CASE_ , )
parser.add_argument(
'''--output_dir''' , type=SCREAMING_SNAKE_CASE_ , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , )
parser.add_argument(
'''--performance_lower_bound''' , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , help='''Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.''' , )
parser.add_argument(
'''--num_epochs''' , type=SCREAMING_SNAKE_CASE_ , default=3 , help='''Number of train epochs.''' , )
_lowerCamelCase : Optional[int] = parser.parse_args()
_lowerCamelCase : Any = {"""lr""": 2e-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16}
training_function(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
main()
| 714 | """simple docstring"""
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase=13 , _lowercase=7 , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=99 , _lowercase=32 , _lowercase=5 , _lowercase=4 , _lowercase=37 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=512 , _lowercase=16 , _lowercase=2 , _lowercase=0.02 , _lowercase=4 , ) -> List[Any]:
_lowerCamelCase : str = parent
_lowerCamelCase : Dict = batch_size
_lowerCamelCase : Optional[Any] = seq_length
_lowerCamelCase : List[str] = is_training
_lowerCamelCase : Dict = use_attention_mask
_lowerCamelCase : Optional[Any] = use_token_type_ids
_lowerCamelCase : Tuple = use_labels
_lowerCamelCase : List[Any] = vocab_size
_lowerCamelCase : Union[str, Any] = hidden_size
_lowerCamelCase : Union[str, Any] = num_hidden_layers
_lowerCamelCase : Union[str, Any] = num_attention_heads
_lowerCamelCase : Tuple = intermediate_size
_lowerCamelCase : Union[str, Any] = hidden_act
_lowerCamelCase : Tuple = hidden_dropout_prob
_lowerCamelCase : Union[str, Any] = attention_probs_dropout_prob
_lowerCamelCase : Tuple = max_position_embeddings
_lowerCamelCase : List[str] = type_vocab_size
_lowerCamelCase : List[str] = type_sequence_label_size
_lowerCamelCase : Union[str, Any] = initializer_range
_lowerCamelCase : str = num_choices
def a__ ( self ) -> Dict:
_lowerCamelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCamelCase : List[Any] = None
if self.use_attention_mask:
_lowerCamelCase : str = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCamelCase : List[Any] = None
if self.use_token_type_ids:
_lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCamelCase : int = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowercase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def a__ ( self ) -> int:
_lowerCamelCase : Optional[Any] = self.prepare_config_and_inputs()
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : int = config_and_inputs
_lowerCamelCase : Tuple = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class _UpperCAmelCase ( a_ , unittest.TestCase ):
"""simple docstring"""
__snake_case = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def a__ ( self ) -> Any:
_lowerCamelCase : Union[str, Any] = FlaxAlbertModelTester(self )
@slow
def a__ ( self ) -> int:
for model_class_name in self.all_model_classes:
_lowerCamelCase : str = model_class_name.from_pretrained('''albert-base-v2''' )
_lowerCamelCase : int = model(np.ones((1, 1) ) )
self.assertIsNotNone(_lowercase )
@require_flax
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def a__ ( self ) -> Optional[int]:
_lowerCamelCase : Tuple = FlaxAlbertModel.from_pretrained('''albert-base-v2''' )
_lowerCamelCase : List[Any] = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
_lowerCamelCase : Union[str, Any] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_lowerCamelCase : str = model(_lowercase , attention_mask=_lowercase )[0]
_lowerCamelCase : List[str] = (1, 11, 768)
self.assertEqual(output.shape , _lowercase )
_lowerCamelCase : Dict = np.array(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , _lowercase , atol=1E-4 ) )
| 558 | 0 |
"""simple docstring"""
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def __lowerCAmelCase ( __UpperCamelCase : Optional[int] , __UpperCamelCase : str=0.999 , __UpperCamelCase : Dict="cosine" , ):
'''simple docstring'''
if alpha_transform_type == "cosine":
def alpha_bar_fn(__UpperCamelCase : Optional[Any] ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(__UpperCamelCase : Optional[Any] ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'Unsupported alpha_tranform_type: {alpha_transform_type}' )
snake_case_ : Optional[int] = []
for i in range(__UpperCamelCase ):
snake_case_ : str = i / num_diffusion_timesteps
snake_case_ : Dict = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__UpperCamelCase ) / alpha_bar_fn(__UpperCamelCase ) , __UpperCamelCase ) )
return torch.tensor(__UpperCamelCase , dtype=torch.floataa )
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = [e.name for e in KarrasDiffusionSchedulers]
_lowerCamelCase = 2
@register_to_config
def __init__( self , _lowercase = 1_0_0_0 , _lowercase = 0.0_0085 , _lowercase = 0.012 , _lowercase = "linear" , _lowercase = None , _lowercase = "epsilon" , _lowercase = "linspace" , _lowercase = 0 , ) -> Union[str, Any]:
'''simple docstring'''
if trained_betas is not None:
snake_case_ : Tuple = torch.tensor(_lowercase , dtype=torch.floataa )
elif beta_schedule == "linear":
snake_case_ : int = torch.linspace(_lowercase , _lowercase , _lowercase , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
snake_case_ : Optional[int] = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , _lowercase , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
snake_case_ : List[str] = betas_for_alpha_bar(_lowercase )
else:
raise NotImplementedError(f'{beta_schedule} does is not implemented for {self.__class__}' )
snake_case_ : Tuple = 1.0 - self.betas
snake_case_ : str = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(_lowercase , _lowercase , _lowercase )
def UpperCAmelCase__ ( self , _lowercase , _lowercase=None ) -> List[str]:
'''simple docstring'''
if schedule_timesteps is None:
snake_case_ : Optional[Any] = self.timesteps
snake_case_ : int = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
snake_case_ : Optional[Any] = 1 if len(_lowercase ) > 1 else 0
else:
snake_case_ : Optional[int] = timestep.cpu().item() if torch.is_tensor(_lowercase ) else timestep
snake_case_ : List[str] = self._index_counter[timestep_int]
return indices[pos].item()
@property
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def UpperCAmelCase__ ( self , _lowercase , _lowercase , ) -> torch.FloatTensor:
'''simple docstring'''
snake_case_ : str = self.index_for_timestep(_lowercase )
if self.state_in_first_order:
snake_case_ : Union[str, Any] = self.sigmas[step_index]
else:
snake_case_ : List[Any] = self.sigmas_interpol[step_index]
snake_case_ : Dict = sample / ((sigma**2 + 1) ** 0.5)
return sample
def UpperCAmelCase__ ( self , _lowercase , _lowercase = None , _lowercase = None , ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Any = num_inference_steps
snake_case_ : List[Any] = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
snake_case_ : Tuple = np.linspace(0 , num_train_timesteps - 1 , _lowercase , dtype=_lowercase )[::-1].copy()
elif self.config.timestep_spacing == "leading":
snake_case_ : int = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
snake_case_ : Any = (np.arange(0 , _lowercase ) * step_ratio).round()[::-1].copy().astype(_lowercase )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
snake_case_ : Tuple = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
snake_case_ : Dict = (np.arange(_lowercase , 0 , -step_ratio )).round().copy().astype(_lowercase )
timesteps -= 1
else:
raise ValueError(
f'{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.' )
snake_case_ : Dict = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
snake_case_ : List[str] = torch.from_numpy(np.log(_lowercase ) ).to(_lowercase )
snake_case_ : Optional[int] = np.interp(_lowercase , np.arange(0 , len(_lowercase ) ) , _lowercase )
snake_case_ : Union[str, Any] = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
snake_case_ : str = torch.from_numpy(_lowercase ).to(device=_lowercase )
# interpolate sigmas
snake_case_ : Union[str, Any] = sigmas.log().lerp(sigmas.roll(1 ).log() , 0.5 ).exp()
snake_case_ : List[Any] = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
snake_case_ : str = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(_lowercase ).startswith("""mps""" ):
# mps does not support float64
snake_case_ : str = torch.from_numpy(_lowercase ).to(_lowercase , dtype=torch.floataa )
else:
snake_case_ : Optional[int] = torch.from_numpy(_lowercase ).to(_lowercase )
# interpolate timesteps
snake_case_ : Tuple = self.sigma_to_t(_lowercase ).to(_lowercase , dtype=timesteps.dtype )
snake_case_ : str = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1 ).flatten()
snake_case_ : Any = torch.cat([timesteps[:1], interleaved_timesteps] )
snake_case_ : List[Any] = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
snake_case_ : Union[str, Any] = defaultdict(_lowercase )
def UpperCAmelCase__ ( self , _lowercase ) -> Tuple:
'''simple docstring'''
snake_case_ : List[Any] = sigma.log()
# get distribution
snake_case_ : List[str] = log_sigma - self.log_sigmas[:, None]
# get sigmas range
snake_case_ : Optional[Any] = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
snake_case_ : int = low_idx + 1
snake_case_ : Tuple = self.log_sigmas[low_idx]
snake_case_ : List[str] = self.log_sigmas[high_idx]
# interpolate sigmas
snake_case_ : Optional[int] = (low - log_sigma) / (low - high)
snake_case_ : Optional[Any] = w.clamp(0 , 1 )
# transform interpolation to time range
snake_case_ : List[Any] = (1 - w) * low_idx + w * high_idx
snake_case_ : List[str] = t.view(sigma.shape )
return t
@property
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
return self.sample is None
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase = True , ) -> Union[SchedulerOutput, Tuple]:
'''simple docstring'''
snake_case_ : List[str] = self.index_for_timestep(_lowercase )
# advance index counter by 1
snake_case_ : List[Any] = timestep.cpu().item() if torch.is_tensor(_lowercase ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
snake_case_ : int = self.sigmas[step_index]
snake_case_ : Union[str, Any] = self.sigmas_interpol[step_index + 1]
snake_case_ : List[str] = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
snake_case_ : Optional[int] = self.sigmas[step_index - 1]
snake_case_ : Optional[int] = self.sigmas_interpol[step_index]
snake_case_ : int = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
snake_case_ : str = 0
snake_case_ : Optional[int] = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
snake_case_ : str = sigma_hat if self.state_in_first_order else sigma_interpol
snake_case_ : Optional[Any] = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
snake_case_ : Dict = sigma_hat if self.state_in_first_order else sigma_interpol
snake_case_ : int = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError("""prediction_type not implemented yet: sample""" )
else:
raise ValueError(
f'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`' )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
snake_case_ : List[str] = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
snake_case_ : List[str] = sigma_interpol - sigma_hat
# store for 2nd order step
snake_case_ : List[Any] = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
snake_case_ : List[Any] = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
snake_case_ : Optional[int] = sigma_next - sigma_hat
snake_case_ : Dict = self.sample
snake_case_ : int = None
snake_case_ : Any = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=_lowercase )
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase , ) -> torch.FloatTensor:
'''simple docstring'''
snake_case_ : Optional[Any] = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(_lowercase ):
# mps does not support float64
snake_case_ : List[Any] = self.timesteps.to(original_samples.device , dtype=torch.floataa )
snake_case_ : Any = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
snake_case_ : Any = self.timesteps.to(original_samples.device )
snake_case_ : Union[str, Any] = timesteps.to(original_samples.device )
snake_case_ : Optional[Any] = [self.index_for_timestep(_lowercase , _lowercase ) for t in timesteps]
snake_case_ : Any = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
snake_case_ : Union[str, Any] = sigma.unsqueeze(-1 )
snake_case_ : Optional[Any] = original_samples + noise * sigma
return noisy_samples
def __len__( self ) -> Union[str, Any]:
'''simple docstring'''
return self.config.num_train_timesteps
| 58 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE_ = {
'''configuration_speecht5''': [
'''SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP''',
'''SpeechT5Config''',
'''SpeechT5HifiGanConfig''',
],
'''feature_extraction_speecht5''': ['''SpeechT5FeatureExtractor'''],
'''processing_speecht5''': ['''SpeechT5Processor'''],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = ['''SpeechT5Tokenizer''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
'''SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SpeechT5ForSpeechToText''',
'''SpeechT5ForSpeechToSpeech''',
'''SpeechT5ForTextToSpeech''',
'''SpeechT5Model''',
'''SpeechT5PreTrainedModel''',
'''SpeechT5HifiGan''',
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 373 | 0 |
from math import pow
def UpperCamelCase__ ( _A: int , _A: int , _A: int , _A: int , _A: int , ):
'''simple docstring'''
if current_sum == needed_sum:
# If the sum of the powers is equal to needed_sum, then we have a solution.
solutions_count += 1
return current_sum, solutions_count
__lowerCamelCase = int(pow(UpperCamelCase__ , UpperCamelCase__ ) )
if current_sum + i_to_n <= needed_sum:
# If the sum of the powers is less than needed_sum, then continue adding powers.
current_sum += i_to_n
__lowerCamelCase , __lowerCamelCase = backtrack(
UpperCamelCase__ , UpperCamelCase__ , current_number + 1 , UpperCamelCase__ , UpperCamelCase__ )
current_sum -= i_to_n
if i_to_n < needed_sum:
# If the power of i is less than needed_sum, then try with the next power.
__lowerCamelCase , __lowerCamelCase = backtrack(
UpperCamelCase__ , UpperCamelCase__ , current_number + 1 , UpperCamelCase__ , UpperCamelCase__ )
return current_sum, solutions_count
def UpperCamelCase__ ( _A: int , _A: int ):
'''simple docstring'''
if not (1 <= needed_sum <= 1000 and 2 <= power <= 10):
raise ValueError(
"""Invalid input\n"""
"""needed_sum must be between 1 and 1000, power between 2 and 10.""" )
return backtrack(UpperCamelCase__ , UpperCamelCase__ , 1 , 0 , 0 )[1] # Return the solutions_count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 715 |
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
_a : str = logging.getLogger(__name__)
_a : Optional[int] = 'Hello world! cรฉcรฉ herlolip'
_a : List[str] = namedtuple(
'BertAbsConfig',
[
'temp_dir',
'large',
'use_bert_emb',
'finetune_bert',
'encoder',
'share_emb',
'max_pos',
'enc_layers',
'enc_hidden_size',
'enc_heads',
'enc_ff_size',
'enc_dropout',
'dec_layers',
'dec_hidden_size',
'dec_heads',
'dec_ff_size',
'dec_dropout',
],
)
def UpperCamelCase__ ( _A: int , _A: List[str] ):
'''simple docstring'''
__lowerCamelCase = BertAbsConfig(
temp_dir=""".""" , finetune_bert=_A , large=_A , share_emb=_A , use_bert_emb=_A , encoder="""bert""" , max_pos=512 , enc_layers=6 , enc_hidden_size=512 , enc_heads=8 , enc_ff_size=512 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=768 , dec_heads=8 , dec_ff_size=2048 , dec_dropout=0.2 , )
__lowerCamelCase = torch.load(_A , lambda _A , _A : storage )
__lowerCamelCase = AbsSummarizer(_A , torch.device("""cpu""" ) , _A )
original.eval()
__lowerCamelCase = BertAbsSummarizer(_A , torch.device("""cpu""" ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info("""convert the model""" )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info("""Make sure that the models' outputs are identical""" )
__lowerCamelCase = BertTokenizer.from_pretrained("""bert-base-uncased""" )
# prepare the model inputs
__lowerCamelCase = tokenizer.encode("""This is sample รฉร alj'-.""" )
encoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(_A )) )
__lowerCamelCase = torch.tensor(_A ).unsqueeze(0 )
__lowerCamelCase = tokenizer.encode("""This is sample 3 รฉร alj'-.""" )
decoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(_A )) )
__lowerCamelCase = torch.tensor(_A ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
__lowerCamelCase = encoder_input_ids
__lowerCamelCase = decoder_input_ids
__lowerCamelCase = __lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = __lowerCamelCase = None
__lowerCamelCase = __lowerCamelCase = None
__lowerCamelCase = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
__lowerCamelCase = original(_A , _A , _A , _A , _A , _A , _A )[0]
__lowerCamelCase = original.generator(_A )
__lowerCamelCase = new_model(
_A , _A , _A , _A , _A )[0]
__lowerCamelCase = new_model.generator(_A )
__lowerCamelCase = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print("""Maximum absolute difference beween weights: {:.2f}""".format(_A ) )
__lowerCamelCase = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print("""Maximum absolute difference beween weights: {:.2f}""".format(_A ) )
__lowerCamelCase = torch.allclose(_A , _A , atol=1e-3 )
if are_identical:
logging.info("""all weights are equal up to 1e-3""" )
else:
raise ValueError("""the weights are different. The new model is likely different from the original one.""" )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info("""saving the model's state dictionary""" )
torch.save(
new_model.state_dict() , """./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin""" )
if __name__ == "__main__":
_a : Any = argparse.ArgumentParser()
parser.add_argument(
'--bertabs_checkpoint_path',
default=None,
type=str,
required=True,
help='Path the official PyTorch dump.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the output PyTorch model.',
)
_a : Any = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 571 | 0 |
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = tmp_path / '''file.csv'''
snake_case_ = textwrap.dedent(
'''\
header1,header2
1,2
10,20
''' )
with open(SCREAMING_SNAKE_CASE__ , '''w''' ) as f:
f.write(SCREAMING_SNAKE_CASE__ )
return str(SCREAMING_SNAKE_CASE__ )
@pytest.fixture
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = tmp_path / '''malformed_file.csv'''
snake_case_ = textwrap.dedent(
'''\
header1,header2
1,2
10,20,
''' )
with open(SCREAMING_SNAKE_CASE__ , '''w''' ) as f:
f.write(SCREAMING_SNAKE_CASE__ )
return str(SCREAMING_SNAKE_CASE__ )
@pytest.fixture
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = tmp_path / '''csv_with_image.csv'''
snake_case_ = textwrap.dedent(
F'''\
image
{image_file}
''' )
with open(SCREAMING_SNAKE_CASE__ , '''w''' ) as f:
f.write(SCREAMING_SNAKE_CASE__ )
return str(SCREAMING_SNAKE_CASE__ )
@pytest.fixture
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = tmp_path / '''csv_with_label.csv'''
snake_case_ = textwrap.dedent(
'''\
label
good
bad
good
''' )
with open(SCREAMING_SNAKE_CASE__ , '''w''' ) as f:
f.write(SCREAMING_SNAKE_CASE__ )
return str(SCREAMING_SNAKE_CASE__ )
@pytest.fixture
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = tmp_path / '''csv_with_int_list.csv'''
snake_case_ = textwrap.dedent(
'''\
int_list
1 2 3
4 5 6
7 8 9
''' )
with open(SCREAMING_SNAKE_CASE__ , '''w''' ) as f:
f.write(SCREAMING_SNAKE_CASE__ )
return str(SCREAMING_SNAKE_CASE__ )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = Csv()
snake_case_ = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(SCREAMING_SNAKE_CASE__ , match='''Error tokenizing data''' ):
for _ in generator:
pass
assert any(
record.levelname == '''ERROR'''
and '''Failed to read file''' in record.message
and os.path.basename(SCREAMING_SNAKE_CASE__ ) in record.message
for record in caplog.records )
@require_pil
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
with open(SCREAMING_SNAKE_CASE__ , encoding='''utf-8''' ) as f:
snake_case_ = f.read().splitlines()[1]
snake_case_ = Csv(encoding='''utf-8''' , features=Features({'''image''': Image()} ) )
snake_case_ = csv._generate_tables([[csv_file_with_image]] )
snake_case_ = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('''image''' ).type == Image()()
snake_case_ = pa_table.to_pydict()['''image''']
assert generated_content == [{"path": image_file, "bytes": None}]
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
with open(SCREAMING_SNAKE_CASE__ , encoding='''utf-8''' ) as f:
snake_case_ = f.read().splitlines()[1:]
snake_case_ = Csv(encoding='''utf-8''' , features=Features({'''label''': ClassLabel(names=['''good''', '''bad'''] )} ) )
snake_case_ = csv._generate_tables([[csv_file_with_label]] )
snake_case_ = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('''label''' ).type == ClassLabel(names=['''good''', '''bad'''] )()
snake_case_ = pa_table.to_pydict()['''label''']
assert generated_content == [ClassLabel(names=['''good''', '''bad'''] ).straint(SCREAMING_SNAKE_CASE__ ) for label in labels]
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = Csv(encoding='''utf-8''' , sep=''',''' , converters={'''int_list''': lambda SCREAMING_SNAKE_CASE__ : [int(SCREAMING_SNAKE_CASE__ ) for i in x.split()]} )
snake_case_ = csv._generate_tables([[csv_file_with_int_list]] )
snake_case_ = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field('''int_list''' ).type )
snake_case_ = pa_table.to_pydict()['''int_list''']
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]] | 39 | """simple docstring"""
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class SCREAMING_SNAKE_CASE ( a ):
"""simple docstring"""
def __init__( self : List[str] , _snake_case : str = "โ" , _snake_case : bool = True , _snake_case : Union[str, AddedToken] = "<unk>" , _snake_case : Union[str, AddedToken] = "</s>" , _snake_case : Union[str, AddedToken] = "<pad>" , ) -> Optional[int]:
'''simple docstring'''
a__ = {
'pad': {'id': 0, 'token': pad_token},
'eos': {'id': 1, 'token': eos_token},
'unk': {'id': 2, 'token': unk_token},
}
a__ = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
a__ = token_dict['token']
a__ = Tokenizer(Unigram() )
a__ = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(' {2,}' ) , ' ' ),
normalizers.Lowercase(),
] )
a__ = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=_snake_case , add_prefix_space=_snake_case ),
pre_tokenizers.Digits(individual_digits=_snake_case ),
pre_tokenizers.Punctuation(),
] )
a__ = decoders.Metaspace(replacement=_snake_case , add_prefix_space=_snake_case )
a__ = TemplateProcessing(
single=F'''$A {self.special_tokens["eos"]["token"]}''' , special_tokens=[(self.special_tokens['eos']['token'], self.special_tokens['eos']['id'])] , )
a__ = {
'model': 'SentencePieceUnigram',
'replacement': replacement,
'add_prefix_space': add_prefix_space,
}
super().__init__(_snake_case , _snake_case )
def _lowerCAmelCase ( self : Dict , _snake_case : Union[str, List[str]] , _snake_case : int = 8000 , _snake_case : bool = True , ) -> str:
'''simple docstring'''
a__ = trainers.UnigramTrainer(
vocab_size=_snake_case , special_tokens=self.special_tokens_list , show_progress=_snake_case , )
if isinstance(_snake_case , _snake_case ):
a__ = [files]
self._tokenizer.train(_snake_case , trainer=_snake_case )
self.add_unk_id()
def _lowerCAmelCase ( self : Union[str, Any] , _snake_case : Union[Iterator[str], Iterator[Iterator[str]]] , _snake_case : int = 8000 , _snake_case : bool = True , ) -> Optional[Any]:
'''simple docstring'''
a__ = trainers.UnigramTrainer(
vocab_size=_snake_case , special_tokens=self.special_tokens_list , show_progress=_snake_case , )
self._tokenizer.train_from_iterator(_snake_case , trainer=_snake_case )
self.add_unk_id()
def _lowerCAmelCase ( self : List[Any] ) -> int:
'''simple docstring'''
a__ = json.loads(self._tokenizer.to_str() )
a__ = self.special_tokens['unk']['id']
a__ = Tokenizer.from_str(json.dumps(_snake_case ) )
| 232 | 0 |
"""simple docstring"""
import warnings
from contextlib import contextmanager
from ....processing_utils import ProcessorMixin
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = 'MCTCTFeatureExtractor'
lowercase__ = 'AutoTokenizer'
def __init__( self , __a , __a) -> Optional[int]:
'''simple docstring'''
super().__init__(__a , __a)
_UpperCamelCase = self.feature_extractor
_UpperCamelCase = False
def __call__( self , *__a , **__a) -> Any:
'''simple docstring'''
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*__a , **__a)
if "raw_speech" in kwargs:
warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''')
_UpperCamelCase = kwargs.pop('''raw_speech''')
else:
_UpperCamelCase = kwargs.pop('''audio''' , __a)
_UpperCamelCase = kwargs.pop('''sampling_rate''' , __a)
_UpperCamelCase = kwargs.pop('''text''' , __a)
if len(__a) > 0:
_UpperCamelCase = args[0]
_UpperCamelCase = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''')
if audio is not None:
_UpperCamelCase = self.feature_extractor(__a , *__a , sampling_rate=__a , **__a)
if text is not None:
_UpperCamelCase = self.tokenizer(__a , **__a)
if text is None:
return inputs
elif audio is None:
return encodings
else:
_UpperCamelCase = encodings['''input_ids''']
return inputs
def UpperCAmelCase ( self , *__a , **__a) -> Dict:
'''simple docstring'''
return self.tokenizer.batch_decode(*__a , **__a)
def UpperCAmelCase ( self , *__a , **__a) -> Optional[int]:
'''simple docstring'''
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*__a , **__a)
_UpperCamelCase = kwargs.pop('''input_features''' , __a)
_UpperCamelCase = kwargs.pop('''labels''' , __a)
if len(__a) > 0:
_UpperCamelCase = args[0]
_UpperCamelCase = args[1:]
if input_features is not None:
_UpperCamelCase = self.feature_extractor.pad(__a , *__a , **__a)
if labels is not None:
_UpperCamelCase = self.tokenizer.pad(__a , **__a)
if labels is None:
return input_features
elif input_features is None:
return labels
else:
_UpperCamelCase = labels['''input_ids''']
return input_features
def UpperCAmelCase ( self , *__a , **__a) -> Optional[Any]:
'''simple docstring'''
return self.tokenizer.decode(*__a , **__a)
@contextmanager
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your audio inputs, or in a separate call.''')
_UpperCamelCase = True
_UpperCamelCase = self.tokenizer
yield
_UpperCamelCase = self.feature_extractor
_UpperCamelCase = False
| 78 |
"""simple docstring"""
import json
import sys
def lowerCamelCase__ ( __snake_case, __snake_case ) -> Union[str, Any]:
"""simple docstring"""
with open(__snake_case, encoding='''utf-8''' ) as f:
_UpperCamelCase = json.load(__snake_case )
_UpperCamelCase = ['''<details>''', '''<summary>Show updated benchmarks!</summary>''', ''' ''']
for benchmark_name in sorted(__snake_case ):
_UpperCamelCase = results[benchmark_name]
_UpperCamelCase = benchmark_name.split('''/''' )[-1]
output_md.append(F'''### Benchmark: {benchmark_file_name}''' )
_UpperCamelCase = '''| metric |'''
_UpperCamelCase = '''|--------|'''
_UpperCamelCase = '''| new / old (diff) |'''
for metric_name in sorted(__snake_case ):
_UpperCamelCase = benchmark_res[metric_name]
_UpperCamelCase = metric_vals['''new''']
_UpperCamelCase = metric_vals.get('''old''', __snake_case )
_UpperCamelCase = metric_vals.get('''diff''', __snake_case )
_UpperCamelCase = F''' {new_val:f}''' if isinstance(__snake_case, (int, float) ) else '''None'''
if old_val is not None:
val_str += F''' / {old_val:f}''' if isinstance(__snake_case, (int, float) ) else "None"
if dif_val is not None:
val_str += F''' ({dif_val:f})''' if isinstance(__snake_case, (int, float) ) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append('''</details>''' )
with open(__snake_case, '''w''', encoding='''utf-8''' ) as f:
f.writelines('''\n'''.join(__snake_case ) )
if __name__ == "__main__":
_a = sys.argv[1]
_a = sys.argv[2]
format_json_to_md(input_json_file, output_md_file)
| 78 | 1 |
SCREAMING_SNAKE_CASE__ : Optional[int] = "0.21.0"
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 85 | from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ : Dict = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Any = {
"facebook/levit-128S": "https://huggingface.co/facebook/levit-128S/resolve/main/config.json",
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class snake_case ( UpperCamelCase_ ):
lowercase_ = 'levit'
def __init__( self : str , a_ : Optional[Any]=224 , a_ : List[str]=3 , a_ : Any=3 , a_ : Any=2 , a_ : Tuple=1 , a_ : int=16 , a_ : Optional[int]=[128, 256, 384] , a_ : Dict=[4, 8, 12] , a_ : List[str]=[4, 4, 4] , a_ : Any=[16, 16, 16] , a_ : Dict=0 , a_ : Tuple=[2, 2, 2] , a_ : Union[str, Any]=[2, 2, 2] , a_ : Optional[Any]=0.02 , **a_ : str , )-> Any:
"""simple docstring"""
super().__init__(**a_ )
SCREAMING_SNAKE_CASE__ : Any = image_size
SCREAMING_SNAKE_CASE__ : List[Any] = num_channels
SCREAMING_SNAKE_CASE__ : Any = kernel_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = stride
SCREAMING_SNAKE_CASE__ : Any = padding
SCREAMING_SNAKE_CASE__ : Any = hidden_sizes
SCREAMING_SNAKE_CASE__ : List[Any] = num_attention_heads
SCREAMING_SNAKE_CASE__ : Optional[Any] = depths
SCREAMING_SNAKE_CASE__ : List[str] = key_dim
SCREAMING_SNAKE_CASE__ : int = drop_path_rate
SCREAMING_SNAKE_CASE__ : List[str] = patch_size
SCREAMING_SNAKE_CASE__ : List[str] = attention_ratio
SCREAMING_SNAKE_CASE__ : Tuple = mlp_ratio
SCREAMING_SNAKE_CASE__ : str = initializer_range
SCREAMING_SNAKE_CASE__ : List[Any] = [
['Subsample', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['Subsample', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class snake_case ( UpperCamelCase_ ):
lowercase_ = version.parse('1.11' )
@property
def __lowercase( self : str )-> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def __lowercase( self : Any )-> float:
"""simple docstring"""
return 1e-4
| 85 | 1 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
)
| 714 | from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json'
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
}
class a ( __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :Union[str, Any] = '''speech_to_text'''
lowerCamelCase :List[str] = ['''past_key_values''']
lowerCamelCase :str = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self , lowerCAmelCase_=1_00_00 , lowerCAmelCase_=12 , lowerCAmelCase_=20_48 , lowerCAmelCase_=4 , lowerCAmelCase_=6 , lowerCAmelCase_=20_48 , lowerCAmelCase_=4 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_="relu" , lowerCAmelCase_=2_56 , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.02 , lowerCAmelCase_=2 , lowerCAmelCase_=True , lowerCAmelCase_=1 , lowerCAmelCase_=0 , lowerCAmelCase_=2 , lowerCAmelCase_=60_00 , lowerCAmelCase_=10_24 , lowerCAmelCase_=2 , lowerCAmelCase_=(5, 5) , lowerCAmelCase_=10_24 , lowerCAmelCase_=80 , lowerCAmelCase_=1 , **lowerCAmelCase_ , ) -> Tuple:
_A = vocab_size
_A = d_model
_A = encoder_ffn_dim
_A = encoder_layers
_A = encoder_attention_heads
_A = decoder_ffn_dim
_A = decoder_layers
_A = decoder_attention_heads
_A = dropout
_A = attention_dropout
_A = activation_dropout
_A = activation_function
_A = init_std
_A = encoder_layerdrop
_A = decoder_layerdrop
_A = use_cache
_A = encoder_layers
_A = scale_embedding # scale factor will be sqrt(d_model) if True
_A = max_source_positions
_A = max_target_positions
_A = num_conv_layers
_A = list(lowerCAmelCase_ )
_A = conv_channels
_A = input_feat_per_channel
_A = input_channels
if len(self.conv_kernel_sizes ) != self.num_conv_layers:
raise ValueError(
"""Configuration for convolutional module is incorrect. """
"""It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` """
F'''but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, '''
F'''`config.num_conv_layers = {self.num_conv_layers}`.''' )
super().__init__(
pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , is_encoder_decoder=lowerCAmelCase_ , decoder_start_token_id=lowerCAmelCase_ , **lowerCAmelCase_ , )
| 83 | 0 |
'''simple docstring'''
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
__UpperCAmelCase = '''pt'''
elif is_tf_available():
__UpperCAmelCase = '''tf'''
else:
__UpperCAmelCase = '''jax'''
class a__ ( a__ , unittest.TestCase ):
'''simple docstring'''
lowercase__ : List[Any] = ByTaTokenizer
lowercase__ : Optional[Any] = False
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
super().setUp()
lowerCAmelCase__ = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
return ByTaTokenizer.from_pretrained('''google/byt5-small''' )
def __SCREAMING_SNAKE_CASE ( self , **lowerCamelCase_ ) -> ByTaTokenizer:
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_=False , lowerCamelCase_=20 , lowerCamelCase_=5 ) -> Tuple[str, list]:
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for ByT5 because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
lowerCAmelCase__ = []
for i in range(len(lowerCamelCase_ ) ):
try:
lowerCAmelCase__ = tokenizer.decode([i] , clean_up_tokenization_spaces=lowerCamelCase_ )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
lowerCAmelCase__ = list(filter(lambda lowerCamelCase_ : re.match(r'''^[ a-zA-Z]+$''' , t[1] ) , lowerCamelCase_ ) )
lowerCAmelCase__ = list(filter(lambda lowerCamelCase_ : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=lowerCamelCase_ ) , lowerCamelCase_ ) )
if max_length is not None and len(lowerCamelCase_ ) > max_length:
lowerCAmelCase__ = toks[:max_length]
if min_length is not None and len(lowerCamelCase_ ) < min_length and len(lowerCamelCase_ ) > 0:
while len(lowerCamelCase_ ) < min_length:
lowerCAmelCase__ = toks + toks
# toks_str = [t[1] for t in toks]
lowerCAmelCase__ = [t[0] for t in toks]
# Ensure consistency
lowerCAmelCase__ = tokenizer.decode(lowerCamelCase_ , clean_up_tokenization_spaces=lowerCamelCase_ )
if " " not in output_txt and len(lowerCamelCase_ ) > 1:
lowerCAmelCase__ = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=lowerCamelCase_ )
+ ''' '''
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=lowerCamelCase_ )
)
if with_prefix_space:
lowerCAmelCase__ = ''' ''' + output_txt
lowerCAmelCase__ = tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
return output_txt, output_ids
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
lowerCAmelCase__ = self.ta_base_tokenizer
lowerCAmelCase__ = tokenizer(['''hi</s>''', '''I went to the gym</s>''', '''</s>'''] )
lowerCAmelCase__ = tokenizer(['''hi''', '''I went to the gym''', ''''''] )
self.assertListEqual(batch_with_eos_added['''input_ids'''] , batch_without_eos_added['''input_ids'''] )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
lowerCAmelCase__ = self.ta_base_tokenizer
lowerCAmelCase__ = '''Unicode โฌ.'''
lowerCAmelCase__ = tokenizer(lowerCamelCase_ )
lowerCAmelCase__ = [88, 1_13, 1_08, 1_02, 1_14, 1_03, 1_04, 35, 2_29, 1_33, 1_75, 49, 1]
self.assertEqual(encoded['''input_ids'''] , lowerCamelCase_ )
# decoding
lowerCAmelCase__ = tokenizer.decode(lowerCamelCase_ )
self.assertEqual(lowerCamelCase_ , '''Unicode โฌ.</s>''' )
lowerCAmelCase__ = tokenizer('''e รจ รฉ รช รซ''' )
lowerCAmelCase__ = [1_04, 35, 1_98, 1_71, 35, 1_98, 1_72, 35, 1_98, 1_73, 35, 1_98, 1_74, 1]
self.assertEqual(encoded['''input_ids'''] , lowerCamelCase_ )
# decoding
lowerCAmelCase__ = tokenizer.decode(lowerCamelCase_ )
self.assertEqual(lowerCamelCase_ , '''e รจ รฉ รช รซ</s>''' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('''e รจ รฉ รช รซ''' ) ) , '''e รจ รฉ รช รซ</s>''' )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
lowerCAmelCase__ = self.ta_base_tokenizer
lowerCAmelCase__ = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
# fmt: off
lowerCAmelCase__ = [68, 35, 1_11, 1_14, 1_13, 1_06, 35, 1_15, 1_00, 1_17, 1_00, 1_06, 1_17, 1_00, 1_15, 1_07, 35, 1_05, 1_14, 1_17, 35, 1_18, 1_20, 1_12, 1_12, 1_00, 1_17, 1_08, 1_25, 1_00, 1_19, 1_08, 1_14, 1_13, 49, 1, 0]
# fmt: on
lowerCAmelCase__ = tokenizer(lowerCamelCase_ , padding=lowerCamelCase_ , return_tensors=lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
if FRAMEWORK != "jax":
lowerCAmelCase__ = list(batch.input_ids.numpy()[0] )
else:
lowerCAmelCase__ = list(batch.input_ids.tolist()[0] )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
self.assertEqual((2, 37) , batch.input_ids.shape )
self.assertEqual((2, 37) , batch.attention_mask.shape )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
lowerCAmelCase__ = self.ta_base_tokenizer
lowerCAmelCase__ = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
lowerCAmelCase__ = tokenizer(lowerCamelCase_ , padding=lowerCamelCase_ , return_tensors=lowerCamelCase_ )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('''input_ids''' , lowerCamelCase_ )
self.assertIn('''attention_mask''' , lowerCamelCase_ )
self.assertNotIn('''decoder_input_ids''' , lowerCamelCase_ )
self.assertNotIn('''decoder_attention_mask''' , lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
lowerCAmelCase__ = self.ta_base_tokenizer
lowerCAmelCase__ = [
'''Summary of the text.''',
'''Another summary.''',
]
lowerCAmelCase__ = tokenizer(
text_target=lowerCamelCase_ , max_length=32 , padding='''max_length''' , truncation=lowerCamelCase_ , return_tensors=lowerCamelCase_ )
self.assertEqual(32 , targets['''input_ids'''].shape[1] )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
lowerCAmelCase__ = self.ta_base_tokenizer
lowerCAmelCase__ = ['''A long paragraph for summarization. </s>''']
lowerCAmelCase__ = ['''Summary of the text. </s>''']
# fmt: off
lowerCAmelCase__ = [68, 35, 1_11, 1_14, 1_13, 1_06, 35, 1_15, 1_00, 1_17, 1_00, 1_06, 1_17, 1_00, 1_15, 1_07, 35, 1_05, 1_14, 1_17, 35, 1_18, 1_20, 1_12, 1_12, 1_00, 1_17, 1_08, 1_25, 1_00, 1_19, 1_08, 1_14, 1_13, 49, 35, 1]
lowerCAmelCase__ = [86, 1_20, 1_12, 1_12, 1_00, 1_17, 1_24, 35, 1_14, 1_05, 35, 1_19, 1_07, 1_04, 35, 1_19, 1_04, 1_23, 1_19, 49, 35, 1]
# fmt: on
lowerCAmelCase__ = tokenizer(lowerCamelCase_ , text_target=lowerCamelCase_ )
self.assertEqual(lowerCamelCase_ , batch['''input_ids'''][0] )
self.assertEqual(lowerCamelCase_ , batch['''labels'''][0] )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
# safety check on max_len default value so we are sure the test works
lowerCAmelCase__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
lowerCAmelCase__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCAmelCase__ = tempfile.mkdtemp()
lowerCAmelCase__ = ''' He is very happy, UNwant\u00E9d,running'''
lowerCAmelCase__ = tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
tokenizer.save_pretrained(lowerCamelCase_ )
lowerCAmelCase__ = tokenizer.__class__.from_pretrained(lowerCamelCase_ )
lowerCAmelCase__ = after_tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
shutil.rmtree(lowerCamelCase_ )
lowerCAmelCase__ = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCAmelCase__ = tempfile.mkdtemp()
lowerCAmelCase__ = ''' He is very happy, UNwant\u00E9d,running'''
tokenizer.add_tokens(['''bim''', '''bambam'''] )
lowerCAmelCase__ = tokenizer.additional_special_tokens
additional_special_tokens.append('''new_additional_special_token''' )
tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens} )
lowerCAmelCase__ = tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
tokenizer.save_pretrained(lowerCamelCase_ )
lowerCAmelCase__ = tokenizer.__class__.from_pretrained(lowerCamelCase_ )
lowerCAmelCase__ = after_tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
self.assertIn('''new_additional_special_token''' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
lowerCAmelCase__ = tokenizer.__class__.from_pretrained(lowerCamelCase_ , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
lowerCAmelCase__ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowerCamelCase_ )
with open(os.path.join(lowerCamelCase_ , '''special_tokens_map.json''' ) , encoding='''utf-8''' ) as json_file:
lowerCAmelCase__ = json.load(lowerCamelCase_ )
with open(os.path.join(lowerCamelCase_ , '''tokenizer_config.json''' ) , encoding='''utf-8''' ) as json_file:
lowerCAmelCase__ = json.load(lowerCamelCase_ )
lowerCAmelCase__ = [F"""<extra_id_{i}>""" for i in range(1_25 )]
lowerCAmelCase__ = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
lowerCAmelCase__ = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
with open(os.path.join(lowerCamelCase_ , '''special_tokens_map.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(lowerCamelCase_ , lowerCamelCase_ )
with open(os.path.join(lowerCamelCase_ , '''tokenizer_config.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(lowerCamelCase_ , lowerCamelCase_ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
lowerCAmelCase__ = tokenizer_class.from_pretrained(
lowerCamelCase_ , )
self.assertIn(
'''an_additional_special_token''' , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
['''an_additional_special_token'''] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['''an_additional_special_token'''] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
lowerCAmelCase__ = added_tokens_extra_ids + [AddedToken('''a_new_additional_special_token''' , lstrip=lowerCamelCase_ )]
lowerCAmelCase__ = tokenizer_class.from_pretrained(
lowerCamelCase_ , additional_special_tokens=lowerCamelCase_ , )
self.assertIn('''a_new_additional_special_token''' , tokenizer.additional_special_tokens )
self.assertEqual(
['''a_new_additional_special_token'''] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['''a_new_additional_special_token'''] ) ) , )
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
lowerCAmelCase__ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowerCamelCase_ )
lowerCAmelCase__ = tokenizer_class.from_pretrained(lowerCamelCase_ )
self.assertTrue(tokenizer.decode([2_55] ) == '''''' )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
pass
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
pass
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
pass
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
pass
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
# The default common tokenizer tests uses invalid tokens for ByT5 that can only accept one-character strings
# and special added tokens as tokens
lowerCAmelCase__ = self.get_tokenizers(fast=lowerCamelCase_ , do_lower_case=lowerCamelCase_ )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
lowerCAmelCase__ = ['''t''', '''h''', '''i''', '''s''', ''' ''', '''i''', '''s''', ''' ''', '''a''', ''' ''', '''t''', '''e''', '''x''', '''t''', '''</s>''']
lowerCAmelCase__ = tokenizer.convert_tokens_to_string(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
lowerCAmelCase__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
lowerCAmelCase__ = [
'''bos_token''',
'''eos_token''',
'''unk_token''',
'''sep_token''',
'''pad_token''',
'''cls_token''',
'''mask_token''',
]
lowerCAmelCase__ = 0
lowerCAmelCase__ = tokenizer.convert_ids_to_tokens(
lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ )
for attr in attributes_list:
setattr(lowerCamelCase_ , attr + '''_id''' , lowerCamelCase_ )
self.assertEqual(getattr(lowerCamelCase_ , lowerCamelCase_ ) , lowerCamelCase_ )
self.assertEqual(getattr(lowerCamelCase_ , attr + '''_id''' ) , lowerCamelCase_ )
setattr(lowerCamelCase_ , attr + '''_id''' , lowerCamelCase_ )
self.assertEqual(getattr(lowerCamelCase_ , lowerCamelCase_ ) , lowerCamelCase_ )
self.assertEqual(getattr(lowerCamelCase_ , attr + '''_id''' ) , lowerCamelCase_ )
setattr(lowerCamelCase_ , '''additional_special_tokens_ids''' , [] )
self.assertListEqual(getattr(lowerCamelCase_ , '''additional_special_tokens''' ) , [] )
self.assertListEqual(getattr(lowerCamelCase_ , '''additional_special_tokens_ids''' ) , [] )
setattr(lowerCamelCase_ , '''additional_special_tokens_ids''' , [token_id_to_test_setters] )
self.assertListEqual(getattr(lowerCamelCase_ , '''additional_special_tokens''' ) , [token_to_test_setters] )
self.assertListEqual(getattr(lowerCamelCase_ , '''additional_special_tokens_ids''' ) , [token_id_to_test_setters] ) | 90 |
'''simple docstring'''
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument(
'--txt2img_unclip',
default='kakaobrain/karlo-v1-alpha',
type=str,
required=False,
help='The pretrained txt2img unclip.',
)
_lowercase = parser.parse_args()
_lowercase = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
_lowercase = CLIPImageProcessor()
_lowercase = CLIPVisionModelWithProjection.from_pretrained('openai/clip-vit-large-patch14')
_lowercase = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 342 | 0 |
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
__lowerCamelCase : Optional[int] = imread(R'''digital_image_processing/image_data/lena_small.jpg''')
__lowerCamelCase : str = cvtColor(img, COLOR_BGR2GRAY)
def lowercase__ ( ):
'''simple docstring'''
__magic_name__ : Optional[int] = cn.convert_to_negative(__A )
# assert negative_img array for at least one True
assert negative_img.any()
def lowercase__ ( ):
'''simple docstring'''
with Image.open('''digital_image_processing/image_data/lena_small.jpg''' ) as img:
# Work around assertion for response
assert str(cc.change_contrast(__A ,1_1_0 ) ).startswith(
'''<PIL.Image.Image image mode=RGB size=100x100 at''' )
def lowercase__ ( ):
'''simple docstring'''
__magic_name__ : List[Any] = canny.gen_gaussian_kernel(9 ,sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def lowercase__ ( ):
'''simple docstring'''
__magic_name__ : Tuple = imread('''digital_image_processing/image_data/lena_small.jpg''' ,0 )
# assert ambiguous array for all == True
assert canny_img.all()
__magic_name__ : Optional[Any] = canny.canny(__A )
# assert canny array for at least one True
assert canny_array.any()
def lowercase__ ( ):
'''simple docstring'''
assert gg.gaussian_filter(__A ,5 ,sigma=0.9 ).all()
def lowercase__ ( ):
'''simple docstring'''
__magic_name__ : Tuple = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
__magic_name__ : str = conv.img_convolve(__A ,__A ).astype(__A )
assert res.any()
def lowercase__ ( ):
'''simple docstring'''
assert med.median_filter(__A ,3 ).any()
def lowercase__ ( ):
'''simple docstring'''
__magic_name__ , __magic_name__ : Optional[int] = sob.sobel_filter(__A )
assert grad.any() and theta.any()
def lowercase__ ( ):
'''simple docstring'''
__magic_name__ : Dict = sp.make_sepia(__A ,2_0 )
assert sepia.all()
def lowercase__ ( __A: str = "digital_image_processing/image_data/lena_small.jpg" ):
'''simple docstring'''
__magic_name__ : Optional[int] = bs.Burkes(imread(__A ,1 ) ,1_2_0 )
burkes.process()
assert burkes.output_img.any()
def lowercase__ ( __A: str = "digital_image_processing/image_data/lena_small.jpg" ,):
'''simple docstring'''
__magic_name__ : Any = rs.NearestNeighbour(imread(__A ,1 ) ,4_0_0 ,2_0_0 )
nn.process()
assert nn.output.any()
def lowercase__ ( ):
'''simple docstring'''
__magic_name__ : List[Any] = '''digital_image_processing/image_data/lena.jpg'''
# Reading the image and converting it to grayscale.
__magic_name__ : Tuple = imread(__A ,0 )
# Test for get_neighbors_pixel function() return not None
__magic_name__ : Union[str, Any] = 0
__magic_name__ : Dict = 0
__magic_name__ : List[str] = image[x_coordinate][y_coordinate]
__magic_name__ : str = lbp.get_neighbors_pixel(
__A ,__A ,__A ,__A )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
__magic_name__ : Optional[Any] = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 ,image.shape[0] ):
for j in range(0 ,image.shape[1] ):
__magic_name__ : Tuple = lbp.local_binary_value(__A ,__A ,__A )
assert lbp_image.any()
| 501 |
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
__lowerCamelCase : int = logging.get_logger(__name__)
__lowerCamelCase : Dict[Optional[str], Type[Formatter]] = {}
__lowerCamelCase : Dict[Optional[str], str] = {}
__lowerCamelCase : Dict[Optional[str], Exception] = {}
def lowercase__ ( __A: type ,__A: Optional[str] ,__A: Optional[List[str]] = None ,):
'''simple docstring'''
__magic_name__ : Union[str, Any] = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
F'''Overwriting format type \'{format_type}\' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})''' )
__magic_name__ : Tuple = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
F'''Overwriting format type alias \'{alias}\' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})''' )
__magic_name__ : Optional[int] = format_type
def lowercase__ ( __A: Exception ,__A: Optional[str] ,__A: Optional[List[str]] = None ):
'''simple docstring'''
__magic_name__ : Union[str, Any] = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
__magic_name__ : List[str] = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=['''python'''])
_register_formatter(ArrowFormatter, '''arrow''', aliases=['''pa''', '''pyarrow'''])
_register_formatter(NumpyFormatter, '''numpy''', aliases=['''np'''])
_register_formatter(PandasFormatter, '''pandas''', aliases=['''pd'''])
_register_formatter(CustomFormatter, '''custom''')
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, '''torch''', aliases=['''pt''', '''pytorch'''])
else:
__lowerCamelCase : str = ValueError('''PyTorch needs to be installed to be able to return PyTorch tensors.''')
_register_unavailable_formatter(_torch_error, '''torch''', aliases=['''pt''', '''pytorch'''])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, '''tensorflow''', aliases=['''tf'''])
else:
__lowerCamelCase : int = ValueError('''Tensorflow needs to be installed to be able to return Tensorflow tensors.''')
_register_unavailable_formatter(_tf_error, '''tensorflow''', aliases=['''tf'''])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, '''jax''', aliases=[])
else:
__lowerCamelCase : str = ValueError('''JAX needs to be installed to be able to return JAX arrays.''')
_register_unavailable_formatter(_jax_error, '''jax''', aliases=[])
def lowercase__ ( __A: Optional[str] ):
'''simple docstring'''
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def lowercase__ ( __A: Optional[str] ,**__A: Any ):
'''simple docstring'''
__magic_name__ : Tuple = get_format_type_from_alias(__A )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**__A )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
F'''Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got \'{format_type}\'''' )
| 501 | 1 |
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class lowercase_ ( A ):
def _snake_case ( self , __A ) -> float:
return 0.0
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : int ) -> tuple[int | float, int | float]:
SCREAMING_SNAKE_CASE_ : List[Any] =min([-2_0, np.min(fft_results[1 : samplerate // 2 - 1] )] )
SCREAMING_SNAKE_CASE_ : Optional[int] =max([2_0, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : FilterType , UpperCAmelCase_ : int ) -> None:
SCREAMING_SNAKE_CASE_ : List[Any] =5_1_2
SCREAMING_SNAKE_CASE_ : Optional[int] =[1] + [0] * (size - 1)
SCREAMING_SNAKE_CASE_ : Tuple =[filter_type.process(UpperCAmelCase_ ) for item in inputs]
SCREAMING_SNAKE_CASE_ : Any =[0] * (samplerate - size) # zero-padding
outputs += filler
SCREAMING_SNAKE_CASE_ : str =np.abs(np.fft.fft(UpperCAmelCase_ ) )
SCREAMING_SNAKE_CASE_ : Dict =2_0 * np.logaa(UpperCAmelCase_ )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(2_4 , samplerate / 2 - 1 )
plt.xlabel('''Frequency (Hz)''' )
plt.xscale('''log''' )
# Display within reasonable bounds
SCREAMING_SNAKE_CASE_ : List[Any] =get_bounds(UpperCAmelCase_ , UpperCAmelCase_ )
plt.ylim(max([-8_0, bounds[0]] ) , min([8_0, bounds[1]] ) )
plt.ylabel('''Gain (dB)''' )
plt.plot(UpperCAmelCase_ )
plt.show()
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : FilterType , UpperCAmelCase_ : int ) -> None:
SCREAMING_SNAKE_CASE_ : List[Any] =5_1_2
SCREAMING_SNAKE_CASE_ : Optional[Any] =[1] + [0] * (size - 1)
SCREAMING_SNAKE_CASE_ : str =[filter_type.process(UpperCAmelCase_ ) for item in inputs]
SCREAMING_SNAKE_CASE_ : str =[0] * (samplerate - size) # zero-padding
outputs += filler
SCREAMING_SNAKE_CASE_ : List[str] =np.angle(np.fft.fft(UpperCAmelCase_ ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(2_4 , samplerate / 2 - 1 )
plt.xlabel('''Frequency (Hz)''' )
plt.xscale('''log''' )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel('''Phase shift (Radians)''' )
plt.plot(np.unwrap(UpperCAmelCase_ , -2 * pi ) )
plt.show()
| 443 |
import importlib
import os
import fsspec
import pytest
from fsspec import register_implementation
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from .utils import require_lza, require_zstandard
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : List[Any] ) -> Dict:
assert "mock" in _fsspec_registry
assert "bz2" in _fsspec_registry
def SCREAMING_SNAKE_CASE_ ( ) -> Union[str, Any]:
assert "mock" not in _fsspec_registry
assert "bz2" in _fsspec_registry
def SCREAMING_SNAKE_CASE_ ( ) -> Dict:
SCREAMING_SNAKE_CASE_ : Optional[Any] ='''mock-s3-bucket'''
SCREAMING_SNAKE_CASE_ : Any =f's3://{mock_bucket}'
SCREAMING_SNAKE_CASE_ : Dict =extract_path_from_uri(UpperCAmelCase_ )
assert dataset_path.startswith('''s3://''' ) is False
SCREAMING_SNAKE_CASE_ : Dict ='''./local/path'''
SCREAMING_SNAKE_CASE_ : str =extract_path_from_uri(UpperCAmelCase_ )
assert dataset_path == new_dataset_path
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : Union[str, Any] ) -> List[str]:
SCREAMING_SNAKE_CASE_ : Union[str, Any] =is_remote_filesystem(UpperCAmelCase_ )
assert is_remote is True
SCREAMING_SNAKE_CASE_ : Dict =fsspec.filesystem('''file''' )
SCREAMING_SNAKE_CASE_ : Optional[int] =is_remote_filesystem(UpperCAmelCase_ )
assert is_remote is False
@pytest.mark.parametrize('''compression_fs_class''' , UpperCAmelCase_ )
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : str , UpperCAmelCase_ : Any ) -> List[Any]:
SCREAMING_SNAKE_CASE_ : str ={'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_file, '''bz2''': bza_file, '''lz4''': lza_file}
SCREAMING_SNAKE_CASE_ : List[Any] =input_paths[compression_fs_class.protocol]
if input_path is None:
SCREAMING_SNAKE_CASE_ : List[str] =f'for \'{compression_fs_class.protocol}\' compression protocol, '
if compression_fs_class.protocol == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =fsspec.filesystem(compression_fs_class.protocol , fo=UpperCAmelCase_ )
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE_ : str =os.path.basename(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[Any] =expected_filename[: expected_filename.rindex('''.''' )]
assert fs.glob('''*''' ) == [expected_filename]
with fs.open(UpperCAmelCase_ , '''r''' , encoding='''utf-8''' ) as f, open(UpperCAmelCase_ , encoding='''utf-8''' ) as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize('''protocol''' , ['''zip''', '''gzip'''] )
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : Any , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : str ) -> Any:
SCREAMING_SNAKE_CASE_ : Tuple ={'''zip''': zip_jsonl_path, '''gzip''': jsonl_gz_path}
SCREAMING_SNAKE_CASE_ : List[Any] =compressed_file_paths[protocol]
SCREAMING_SNAKE_CASE_ : str ='''dataset.jsonl'''
SCREAMING_SNAKE_CASE_ : Dict =f'{protocol}://{member_file_path}::{compressed_file_path}'
SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ : Dict =fsspec.get_fs_token_paths(UpperCAmelCase_ )
assert fs.isfile(UpperCAmelCase_ )
assert not fs.isfile('''non_existing_''' + member_file_path )
@pytest.mark.integration
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : int ) -> int:
SCREAMING_SNAKE_CASE_ : Dict =hf_api.dataset_info(UpperCAmelCase_ , token=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[int] =HfFileSystem(repo_info=UpperCAmelCase_ , token=UpperCAmelCase_ )
assert sorted(hffs.glob('''*''' ) ) == [".gitattributes", "data"]
assert hffs.isdir('''data''' )
assert hffs.isfile('''.gitattributes''' ) and hffs.isfile('''data/text_data.txt''' )
with open(UpperCAmelCase_ ) as f:
assert hffs.open('''data/text_data.txt''' , '''r''' ).read() == f.read()
def SCREAMING_SNAKE_CASE_ ( ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ : Tuple ='''bz2'''
# Import module
import datasets.filesystems
# Overwrite protocol and reload
register_implementation(UpperCAmelCase_ , UpperCAmelCase_ , clobber=UpperCAmelCase_ )
with pytest.warns(UpperCAmelCase_ ) as warning_info:
importlib.reload(datasets.filesystems )
assert len(UpperCAmelCase_ ) == 1
assert (
str(warning_info[0].message )
== f'A filesystem protocol was already set for {protocol} and will be overwritten.'
)
| 443 | 1 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Callable
def snake_case ( _a: Callable[[int | float], int | float] , _a: int | float , _a: int | float , _a: int = 100 , )-> float:
'''simple docstring'''
lowerCamelCase__ = x_start
lowerCamelCase__ = fnc(_a )
lowerCamelCase__ = 0.0
for _ in range(_a ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
lowerCamelCase__ = (x_end - x_start) / steps + xa
lowerCamelCase__ = fnc(_a )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
lowerCamelCase__ = xa
lowerCamelCase__ = fxa
return area
if __name__ == "__main__":
def snake_case ( _a: Union[str, Any] )-> Tuple:
'''simple docstring'''
return x**3 + x**2
print("f(x) = x^3 + x^2")
print("The area between the curve, x = -5, x = 5 and the x axis is:")
_snake_case = 10
while i <= 10_0000:
print(f"""with {i} steps: {trapezoidal_area(f, -5, 5, i)}""")
i *= 10
| 659 |
"""simple docstring"""
def snake_case ( _a: list[list[float]] )-> list[list[float]]:
'''simple docstring'''
lowerCamelCase__ = []
for data in source_data:
for i, el in enumerate(_a ):
if len(_a ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(_a ) )
return data_lists
def snake_case ( _a: list[list[float]] , _a: list[int] )-> list[list[float]]:
'''simple docstring'''
lowerCamelCase__ = []
for dlist, weight in zip(_a , _a ):
lowerCamelCase__ = min(_a )
lowerCamelCase__ = max(_a )
lowerCamelCase__ = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
lowerCamelCase__ = F'Invalid weight of {weight:f} provided'
raise ValueError(_a )
score_lists.append(_a )
return score_lists
def snake_case ( _a: list[list[float]] )-> list[float]:
'''simple docstring'''
lowerCamelCase__ = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(_a ):
lowerCamelCase__ = final_scores[j] + ele
return final_scores
def snake_case ( _a: list[list[float]] , _a: list[int] )-> list[list[float]]:
'''simple docstring'''
lowerCamelCase__ = get_data(_a )
lowerCamelCase__ = calculate_each_score(_a , _a )
lowerCamelCase__ = generate_final_scores(_a )
# append scores to source data
for i, ele in enumerate(_a ):
source_data[i].append(_a )
return source_data
| 659 | 1 |
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
snake_case = """3"""
print("""Python version:""", sys.version)
print("""transformers version:""", transformers.__version__)
try:
import torch
print("""Torch version:""", torch.__version__)
print("""Cuda available:""", torch.cuda.is_available())
print("""Cuda version:""", torch.version.cuda)
print("""CuDNN version:""", torch.backends.cudnn.version())
print("""Number of GPUs available:""", torch.cuda.device_count())
print("""NCCL version:""", torch.cuda.nccl.version())
except ImportError:
print("""Torch version:""", None)
try:
import deepspeed
print("""DeepSpeed version:""", deepspeed.__version__)
except ImportError:
print("""DeepSpeed version:""", None)
try:
import tensorflow as tf
print("""TensorFlow version:""", tf.__version__)
print("""TF GPUs available:""", bool(tf.config.list_physical_devices("""GPU""")))
print("""Number of TF GPUs available:""", len(tf.config.list_physical_devices("""GPU""")))
except ImportError:
print("""TensorFlow version:""", None)
| 62 |
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
_lowerCamelCase : Dict = logging.get_logger(__name__)
class lowerCAmelCase__ ( __magic_name__ ):
'''simple docstring'''
lowercase_ = ["""pixel_values"""]
def __init__( self , lowercase__ = True , lowercase__ = None , lowercase__ = PILImageResampling.BILINEAR , lowercase__ = True , lowercase__ = 1 / 2_5_5 , lowercase__ = True , lowercase__ = None , lowercase__ = True , **lowercase__ , ):
'''simple docstring'''
super().__init__(**lowercase__ )
__A =size if size is not None else {'''shortest_edge''': 2_2_4}
__A =get_size_dict(lowercase__ , default_to_square=lowercase__ )
__A =crop_size if crop_size is not None else {'''height''': 2_5_6, '''width''': 2_5_6}
__A =get_size_dict(lowercase__ , param_name='''crop_size''' )
__A =do_resize
__A =size
__A =resample
__A =do_rescale
__A =rescale_factor
__A =do_center_crop
__A =crop_size
__A =do_flip_channel_order
def __UpperCamelCase ( self , lowercase__ , lowercase__ , lowercase__ = PIL.Image.BILINEAR , lowercase__ = None , **lowercase__ , ):
'''simple docstring'''
__A =get_size_dict(lowercase__ , default_to_square=lowercase__ )
if "shortest_edge" not in size:
raise ValueError(f'''The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}''' )
__A =get_resize_output_image_size(lowercase__ , size=size['''shortest_edge'''] , default_to_square=lowercase__ )
return resize(lowercase__ , size=lowercase__ , resample=lowercase__ , data_format=lowercase__ , **lowercase__ )
def __UpperCamelCase ( self , lowercase__ , lowercase__ , lowercase__ = None , **lowercase__ , ):
'''simple docstring'''
__A =get_size_dict(lowercase__ )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}''' )
return center_crop(lowercase__ , size=(size['''height'''], size['''width''']) , data_format=lowercase__ , **lowercase__ )
def __UpperCamelCase ( self , lowercase__ , lowercase__ , lowercase__ = None , **lowercase__ , ):
'''simple docstring'''
return rescale(lowercase__ , scale=lowercase__ , data_format=lowercase__ , **lowercase__ )
def __UpperCamelCase ( self , lowercase__ , lowercase__ = None ):
'''simple docstring'''
return flip_channel_order(lowercase__ , data_format=lowercase__ )
def __UpperCamelCase ( self , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = ChannelDimension.FIRST , **lowercase__ , ):
'''simple docstring'''
__A =do_resize if do_resize is not None else self.do_resize
__A =resample if resample is not None else self.resample
__A =do_rescale if do_rescale is not None else self.do_rescale
__A =rescale_factor if rescale_factor is not None else self.rescale_factor
__A =do_center_crop if do_center_crop is not None else self.do_center_crop
__A =(
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
__A =size if size is not None else self.size
__A =get_size_dict(lowercase__ , default_to_square=lowercase__ )
__A =crop_size if crop_size is not None else self.crop_size
__A =get_size_dict(lowercase__ , param_name='''crop_size''' )
__A =make_list_of_images(lowercase__ )
if not valid_images(lowercase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
# All transformations expect numpy arrays.
__A =[to_numpy_array(lowercase__ ) for image in images]
if do_resize:
__A =[self.resize(image=lowercase__ , size=lowercase__ , resample=lowercase__ ) for image in images]
if do_center_crop:
__A =[self.center_crop(image=lowercase__ , size=lowercase__ ) for image in images]
if do_rescale:
__A =[self.rescale(image=lowercase__ , scale=lowercase__ ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
__A =[self.flip_channel_order(image=lowercase__ ) for image in images]
__A =[to_channel_dimension_format(lowercase__ , lowercase__ ) for image in images]
__A ={'''pixel_values''': images}
return BatchFeature(data=lowercase__ , tensor_type=lowercase__ )
def __UpperCamelCase ( self , lowercase__ , lowercase__ = None ):
'''simple docstring'''
__A =outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowercase__ ) != len(lowercase__ ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(lowercase__ ):
__A =target_sizes.numpy()
__A =[]
for idx in range(len(lowercase__ ) ):
__A =torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=lowercase__ )
__A =resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(lowercase__ )
else:
__A =logits.argmax(dim=1 )
__A =[semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 184 | 0 |
"""simple docstring"""
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class _UpperCAmelCase :
def __init__( self , snake_case_ , snake_case_=99 , snake_case_=13 , snake_case_=7 , snake_case_=9 , snake_case_=True , snake_case_=True , snake_case_=False , snake_case_=32 , snake_case_=5 , snake_case_=4 , snake_case_=37 , snake_case_=8 , snake_case_=0.1 , snake_case_=0.002 , snake_case_=1 , snake_case_=0 , snake_case_=0 , snake_case_=None , snake_case_=None , ):
_snake_case : Union[str, Any] = parent
_snake_case : int = batch_size
_snake_case : Any = encoder_seq_length
_snake_case : str = decoder_seq_length
# For common tests
_snake_case : int = self.decoder_seq_length
_snake_case : Optional[Any] = is_training
_snake_case : List[str] = use_attention_mask
_snake_case : List[Any] = use_labels
_snake_case : Optional[int] = vocab_size
_snake_case : List[Any] = hidden_size
_snake_case : str = num_hidden_layers
_snake_case : Any = num_attention_heads
_snake_case : int = d_ff
_snake_case : Any = relative_attention_num_buckets
_snake_case : int = dropout_rate
_snake_case : Union[str, Any] = initializer_factor
_snake_case : List[Any] = eos_token_id
_snake_case : Optional[int] = pad_token_id
_snake_case : Optional[Any] = decoder_start_token_id
_snake_case : Tuple = None
_snake_case : Dict = decoder_layers
def lowerCamelCase__ ( self ):
return TaConfig.from_pretrained("google/umt5-base" )
def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=None , ):
if attention_mask is None:
_snake_case : Dict = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
_snake_case : int = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
_snake_case : int = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=snake_case_ )
if decoder_head_mask is None:
_snake_case : Dict = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=snake_case_ )
if cross_attn_head_mask is None:
_snake_case : List[Any] = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=snake_case_ )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def lowerCamelCase__ ( self ):
_snake_case : List[str] = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
_snake_case : Optional[int] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
_snake_case : Optional[int] = input_ids.clamp(self.pad_token_id + 1 )
_snake_case : Tuple = decoder_input_ids.clamp(self.pad_token_id + 1 )
_snake_case : Optional[Any] = self.get_config()
_snake_case : Dict = config.num_attention_heads
_snake_case : Optional[Any] = self.prepare_inputs_dict(snake_case_ , snake_case_ , snake_case_ )
return config, input_dict
def lowerCamelCase__ ( self ):
_snake_case , _snake_case : Optional[int] = self.prepare_config_and_inputs()
return config, inputs_dict
def lowerCamelCase__ ( self ):
return TaConfig(
vocab_size=1_66 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def lowerCamelCase__ ( self ):
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ):
_snake_case : Optional[int] = UMTaModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_snake_case : Tuple = model(
input_ids=snake_case_ , decoder_input_ids=snake_case_ , attention_mask=snake_case_ , decoder_attention_mask=snake_case_ , )
_snake_case : Dict = model(input_ids=snake_case_ , decoder_input_ids=snake_case_ )
_snake_case : int = result.last_hidden_state
_snake_case : Any = result.past_key_values
_snake_case : Tuple = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(snake_case_ ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ):
_snake_case : Union[str, Any] = UMTaModel(config=snake_case_ ).get_decoder().to(snake_case_ ).eval()
# first forward pass
_snake_case : Optional[int] = model(snake_case_ , use_cache=snake_case_ )
_snake_case : Optional[Any] = model(snake_case_ )
_snake_case : Union[str, Any] = model(snake_case_ , use_cache=snake_case_ )
self.parent.assertTrue(len(snake_case_ ) == len(snake_case_ ) )
self.parent.assertTrue(len(snake_case_ ) == len(snake_case_ ) + 1 )
_snake_case , _snake_case : Any = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_snake_case : List[str] = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
_snake_case : Optional[int] = torch.cat([input_ids, next_tokens] , dim=-1 )
_snake_case : str = model(snake_case_ )["last_hidden_state"]
_snake_case : Any = model(snake_case_ , past_key_values=snake_case_ )["last_hidden_state"]
# select random slice
_snake_case : Optional[int] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_snake_case : int = output_from_no_past[:, -1, random_slice_idx].detach()
_snake_case : int = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case_ , snake_case_ , atol=1E-3 ) )
def lowerCamelCase__ ( self , snake_case_ , snake_case_ , ):
_snake_case : Optional[Any] = UMTaModel(config=snake_case_ ).to(snake_case_ ).half().eval()
_snake_case : Dict = model(**snake_case_ )["last_hidden_state"]
self.parent.assertFalse(torch.isnan(snake_case_ ).any().item() )
@require_torch
class _UpperCAmelCase ( _snake_case , _snake_case , _snake_case , unittest.TestCase):
__lowercase : List[str] = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
__lowercase : Optional[int] = (UMTaForConditionalGeneration,) if is_torch_available() else ()
__lowercase : str = (
{
"""conversational""": UMTaForConditionalGeneration,
"""feature-extraction""": UMTaModel,
"""summarization""": UMTaForConditionalGeneration,
"""text2text-generation""": UMTaForConditionalGeneration,
"""translation""": UMTaForConditionalGeneration,
"""question-answering""": UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
__lowercase : List[Any] = True
__lowercase : int = False
__lowercase : Tuple = False
__lowercase : Optional[int] = True
__lowercase : Union[str, Any] = True
# The small UMT5 model needs higher percentages for CPU/MP tests
__lowercase : Union[str, Any] = [0.8, 0.9]
def lowerCamelCase__ ( self ):
_snake_case : int = UMTaModelTester(self )
@unittest.skip("Test has a segmentation fault on torch 1.8.0" )
def lowerCamelCase__ ( self ):
_snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
_snake_case : Tuple = UMTaModel(config_and_inputs[0] ).to(snake_case_ )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
snake_case_ , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , F'{tmpdirname}/t5_test.onnx' , export_params=snake_case_ , opset_version=9 , input_names=["input_ids", "decoder_input_ids"] , )
@unittest.skipIf(torch_device == "cpu" , "Cant do half precision" )
def lowerCamelCase__ ( self ):
_snake_case : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*snake_case_ )
def lowerCamelCase__ ( self ):
_snake_case : Union[str, Any] = ["encoder_attentions", "decoder_attentions", "cross_attentions"]
_snake_case : Tuple = self.model_tester.prepare_config_and_inputs()
_snake_case : Union[str, Any] = config_and_inputs[0]
_snake_case : int = UMTaForConditionalGeneration(snake_case_ ).eval()
model.to(snake_case_ )
_snake_case : Union[str, Any] = {
"head_mask": torch.zeros(config.num_layers , config.num_heads , device=snake_case_ ),
"decoder_head_mask": torch.zeros(config.num_decoder_layers , config.num_heads , device=snake_case_ ),
"cross_attn_head_mask": torch.zeros(config.num_decoder_layers , config.num_heads , device=snake_case_ ),
}
for attn_name, (name, mask) in zip(snake_case_ , head_masking.items() ):
_snake_case : int = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
_snake_case : List[Any] = torch.ones(
config.num_decoder_layers , config.num_heads , device=snake_case_ )
_snake_case : List[str] = model.generate(
config_and_inputs[1]["input_ids"] , num_beams=1 , max_length=3 , output_attentions=snake_case_ , return_dict_in_generate=snake_case_ , **snake_case_ , )
# We check the state of decoder_attentions and cross_attentions just from the last step
_snake_case : int = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip("Does not work on the tiny model as we keep hitting edge cases." )
def lowerCamelCase__ ( self ):
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class _UpperCAmelCase ( unittest.TestCase):
@slow
@unittest.skip(
"Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged" )
def lowerCamelCase__ ( self ):
_snake_case : Tuple = UMTaForConditionalGeneration.from_pretrained("google/umt5-small" , return_dict=snake_case_ ).to(snake_case_ )
_snake_case : Union[str, Any] = AutoTokenizer.from_pretrained("google/umt5-small" , use_fast=snake_case_ , legacy=snake_case_ )
_snake_case : List[Any] = [
"Bonjour monsieur <extra_id_0> bien <extra_id_1>.",
"No se como puedo <extra_id_0>.",
"This is the reason why we <extra_id_0> them.",
"The <extra_id_0> walks in <extra_id_1>, seats",
"A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.",
]
_snake_case : Optional[Any] = tokenizer(snake_case_ , return_tensors="pt" , padding=snake_case_ ).input_ids
# fmt: off
_snake_case : List[str] = torch.tensor(
[
[ 3_85_30, 21_07_03, 25_62_99, 14_10, 25_62_98, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 8_26, 3_21, 6_71, 2_59_22, 25_62_99, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 14_60, 3_39, 3_12, 1_90_14, 1_06_20, 7_58, 25_62_99, 23_55,2_74, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 5_17, 25_62_99, 1_48_69, 2_81, 3_01, 25_62_98, 2_75, 11_99_83,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 3_20, 25_62_99, 1_48_69, 2_81, 22_34, 2_89, 22_75, 3_33,6_13_91, 2_89, 25_62_98, 5_43, 25_62_97, 16_87_14, 3_29, 25_62_96,2_74, 1],
] )
# fmt: on
torch.testing.assert_allclose(snake_case_ , snake_case_ )
_snake_case : Optional[int] = model.generate(input_ids.to(snake_case_ ) )
_snake_case : str = [
"<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] ๐ ๐ ๐ ๐ ๐ ๐ ๐ ๐ ๐ ๐ ๐ <extra_id_56>ajลกietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajลกie</s>",
"<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> ํผํด[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
]
_snake_case : Dict = tokenizer.batch_decode(snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
| 87 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_a : List[Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : List[Any] = ["""BartphoTokenizer"""]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
_a : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 87 | 1 |
"""simple docstring"""
import os
import re
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
a :Optional[int] = logging.get_logger(__name__)
a :str = {"vocab_file": "spiece.model"}
a :Dict = {
"vocab_file": {
"google/bigbird-roberta-base": "https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model",
"google/bigbird-roberta-large": (
"https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model"
),
"google/bigbird-base-trivia-itc": (
"https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model"
),
}
}
a :Any = {
"google/bigbird-roberta-base": 4_096,
"google/bigbird-roberta-large": 4_096,
"google/bigbird-base-trivia-itc": 4_096,
}
class __a (UpperCamelCase_):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :Dict = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE :int = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE :Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE :List[str] = ["""input_ids""", """attention_mask"""]
_SCREAMING_SNAKE_CASE :List[int] = []
def __init__( self , _a , _a="<unk>" , _a="<s>" , _a="</s>" , _a="<pad>" , _a="[SEP]" , _a="[MASK]" , _a="[CLS]" , _a = None , **_a , ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else bos_token
SCREAMING_SNAKE_CASE__ : int = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else eos_token
SCREAMING_SNAKE_CASE__ : List[Any] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else unk_token
SCREAMING_SNAKE_CASE__ : Optional[int] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else pad_token
SCREAMING_SNAKE_CASE__ : Optional[Any] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else cls_token
SCREAMING_SNAKE_CASE__ : str = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE__ : List[str] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token
SCREAMING_SNAKE_CASE__ : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase_ , )
SCREAMING_SNAKE_CASE__ : Any = vocab_file
SCREAMING_SNAKE_CASE__ : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCamelCase_ )
@property
def _a ( self ) -> Optional[Any]:
"""simple docstring"""
return self.sp_model.get_piece_size()
def _a ( self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = {self.convert_ids_to_tokens(UpperCamelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self.__dict__.copy()
SCREAMING_SNAKE_CASE__ : Dict = None
return state
def __setstate__( self , _a ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
SCREAMING_SNAKE_CASE__ : Any = {}
SCREAMING_SNAKE_CASE__ : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _a ( self , _a ) -> Union[str, Any]:
"""simple docstring"""
return self.sp_model.encode(UpperCamelCase_ , out_type=UpperCamelCase_ )
def _a ( self , _a ) -> List[str]:
"""simple docstring"""
return self.sp_model.piece_to_id(UpperCamelCase_ )
def _a ( self , _a ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self.sp_model.IdToPiece(UpperCamelCase_ )
return token
def _a ( self , _a ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = []
SCREAMING_SNAKE_CASE__ : Optional[Any] = ''
SCREAMING_SNAKE_CASE__ : str = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(UpperCamelCase_ ) + token
SCREAMING_SNAKE_CASE__ : Optional[Any] = True
SCREAMING_SNAKE_CASE__ : str = []
else:
current_sub_tokens.append(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ : Tuple = False
out_string += self.sp_model.decode(UpperCamelCase_ )
return out_string.strip()
def _a ( self , _a , _a = False , _a = None , _a = True , **_a , ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = kwargs.pop("""use_source_tokenizer""" , UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ : List[Any] = self.convert_ids_to_tokens(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
SCREAMING_SNAKE_CASE__ : List[str] = []
SCREAMING_SNAKE_CASE__ : Any = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(UpperCamelCase_ ) )
SCREAMING_SNAKE_CASE__ : Dict = []
sub_texts.append(UpperCamelCase_ )
else:
current_sub_text.append(UpperCamelCase_ )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(UpperCamelCase_ ) )
# Mimic the behavior of the Rust tokenizer:
# No space before [MASK] and [SEP]
if spaces_between_special_tokens:
SCREAMING_SNAKE_CASE__ : List[str] = re.sub(r""" (\[(MASK|SEP)\])""" , r"""\1""" , """ """.join(UpperCamelCase_ ) )
else:
SCREAMING_SNAKE_CASE__ : str = ''.join(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
SCREAMING_SNAKE_CASE__ : str = self.clean_up_tokenization(UpperCamelCase_ )
return clean_text
else:
return text
def _a ( self , _a , _a = None ) -> List[Any]:
"""simple docstring"""
if not os.path.isdir(UpperCamelCase_ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
SCREAMING_SNAKE_CASE__ : Tuple = os.path.join(
UpperCamelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase_ , """wb""" ) as fi:
SCREAMING_SNAKE_CASE__ : str = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase_ )
return (out_vocab_file,)
def _a ( self , _a , _a = None ) -> List[str]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE__ : int = [self.cls_token_id]
SCREAMING_SNAKE_CASE__ : Optional[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + token_ids_a + sep
def _a ( self , _a , _a = None , _a = False ) -> int:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase_ )) + [1]
return [1] + ([0] * len(UpperCamelCase_ )) + [1] + ([0] * len(UpperCamelCase_ )) + [1]
def _a ( self , _a , _a = None ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
| 680 |
"""simple docstring"""
import argparse
from copy import deepcopy
import numpy as np
from datasets import ClassLabel, DatasetDict, load_dataset
from evaluate import load
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
Trainer,
TrainerCallback,
TrainingArguments,
set_seed,
)
def lowerCamelCase ( ):
UpperCAmelCase__ : List[Any] = argparse.ArgumentParser()
parser.add_argument('--model_ckpt' ,type=_snake_case ,default='microsoft/unixcoder-base-nine' )
parser.add_argument('--num_epochs' ,type=_snake_case ,default=5 )
parser.add_argument('--batch_size' ,type=_snake_case ,default=6 )
parser.add_argument('--gradient_accumulation_steps' ,type=_snake_case ,default=1 )
parser.add_argument('--freeze' ,type=_snake_case ,default=_snake_case )
parser.add_argument('--learning_rate' ,type=_snake_case ,default=5e-4 )
parser.add_argument('--seed' ,type=_snake_case ,default=0 )
parser.add_argument('--lr_scheduler_type' ,type=_snake_case ,default='cosine' )
parser.add_argument('--num_warmup_steps' ,type=_snake_case ,default=10 )
parser.add_argument('--weight_decay' ,type=_snake_case ,default=0.01 )
parser.add_argument('--output_dir' ,type=_snake_case ,default='./results' )
return parser.parse_args()
UpperCamelCase__ = load('accuracy')
def lowerCamelCase ( _snake_case ):
UpperCAmelCase__ , UpperCAmelCase__ : int = eval_pred
UpperCAmelCase__ : Optional[int] = np.argmax(_snake_case ,axis=1 )
return metric.compute(predictions=_snake_case ,references=_snake_case )
class a ( lowercase ):
def __init__( self , UpperCamelCase_ ):
super().__init__()
UpperCAmelCase__ : List[str] = trainer
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ):
if control.should_evaluate:
UpperCAmelCase__ : int = deepcopy(UpperCamelCase_ )
self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix='train' )
return control_copy
def lowerCamelCase ( ):
UpperCAmelCase__ : int = get_args()
set_seed(args.seed )
UpperCAmelCase__ : Optional[int] = load_dataset('codeparrot/codecomplex' ,split='train' )
UpperCAmelCase__ : Tuple = dataset.train_test_split(test_size=0.2 )
UpperCAmelCase__ : List[Any] = train_test['test'].train_test_split(test_size=0.5 )
UpperCAmelCase__ : Tuple = DatasetDict(
{
'train': train_test['train'],
'test': test_validation['train'],
'valid': test_validation['test'],
} )
print('Loading tokenizer and model' )
UpperCAmelCase__ : str = AutoTokenizer.from_pretrained(args.model_ckpt )
UpperCAmelCase__ : Optional[Any] = tokenizer.eos_token
UpperCAmelCase__ : Optional[Any] = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt ,num_labels=7 )
UpperCAmelCase__ : Dict = model.config.eos_token_id
if args.freeze:
for param in model.roberta.parameters():
UpperCAmelCase__ : Optional[int] = False
UpperCAmelCase__ : Optional[Any] = ClassLabel(num_classes=7 ,names=list(set(train_test_validation['train']['complexity'] ) ) )
def tokenize(_snake_case ):
UpperCAmelCase__ : Dict = tokenizer(example['src'] ,truncation=_snake_case ,max_length=1024 )
UpperCAmelCase__ : Any = labels.straint(example['complexity'] )
return {
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"label": label,
}
UpperCAmelCase__ : List[Any] = train_test_validation.map(
_snake_case ,batched=_snake_case ,remove_columns=train_test_validation['train'].column_names ,)
UpperCAmelCase__ : Tuple = DataCollatorWithPadding(tokenizer=_snake_case )
UpperCAmelCase__ : Union[str, Any] = TrainingArguments(
output_dir=args.output_dir ,learning_rate=args.learning_rate ,lr_scheduler_type=args.lr_scheduler_type ,evaluation_strategy='epoch' ,save_strategy='epoch' ,logging_strategy='epoch' ,per_device_train_batch_size=args.batch_size ,per_device_eval_batch_size=args.batch_size ,num_train_epochs=args.num_epochs ,gradient_accumulation_steps=args.gradient_accumulation_steps ,weight_decay=0.01 ,metric_for_best_model='accuracy' ,run_name='complexity-java' ,report_to='wandb' ,)
UpperCAmelCase__ : Dict = Trainer(
model=_snake_case ,args=_snake_case ,train_dataset=tokenized_datasets['train'] ,eval_dataset=tokenized_datasets['valid'] ,tokenizer=_snake_case ,data_collator=_snake_case ,compute_metrics=_snake_case ,)
print('Training...' )
trainer.add_callback(CustomCallback(_snake_case ) )
trainer.train()
if __name__ == "__main__":
main()
| 110 | 0 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class lowerCamelCase_ ( lowercase ):
"""simple docstring"""
_lowerCAmelCase : Optional[int] = "Salesforce/blip-image-captioning-base"
_lowerCAmelCase : str = (
"This is a tool that generates a description of an image. It takes an input named `image` which should be the "
"image to caption, and returns a text that contains the description in English."
)
_lowerCAmelCase : Optional[Any] = "image_captioner"
_lowerCAmelCase : int = AutoModelForVisionaSeq
_lowerCAmelCase : List[Any] = ["image"]
_lowerCAmelCase : List[Any] = ["text"]
def __init__( self , *UpperCAmelCase__ , **UpperCAmelCase__ ):
requires_backends(self , ["vision"] )
super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ )
def lowerCAmelCase__ ( self , UpperCAmelCase__ ):
return self.pre_processor(images=UpperCAmelCase__ , return_tensors="pt" )
def lowerCAmelCase__ ( self , UpperCAmelCase__ ):
return self.model.generate(**UpperCAmelCase__ )
def lowerCAmelCase__ ( self , UpperCAmelCase__ ):
return self.pre_processor.batch_decode(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ )[0].strip()
| 112 |
"""simple docstring"""
from transformers import BertTokenizerFast
from .custom_tokenization import CustomTokenizer
class lowerCamelCase_ ( lowercase ):
"""simple docstring"""
_lowerCAmelCase : List[Any] = CustomTokenizer
pass
| 112 | 1 |
Subsets and Splits