code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
lowerCamelCase : List[str] = logging.getLogger(__name__)
lowerCamelCase : Dict = tf.data.AUTOTUNE
def SCREAMING_SNAKE_CASE__ ( ) -> Dict:
snake_case : Optional[int] = argparse.ArgumentParser(description="""Train a masked language model on TPU.""" )
parser.add_argument(
"""--pretrained_model_config""" ,type=lowercase ,default="""roberta-base""" ,help="""The model config to use. Note that we don't copy the model's weights, only the config!""" ,)
parser.add_argument(
"""--tokenizer""" ,type=lowercase ,default="""unigram-tokenizer-wikitext""" ,help="""The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model's vocab size.""" ,)
parser.add_argument(
"""--per_replica_batch_size""" ,type=lowercase ,default=8 ,help="""Batch size per TPU core.""" ,)
parser.add_argument(
"""--no_tpu""" ,action="""store_true""" ,help="""If set, run on CPU and don't try to initialize a TPU. Useful for debugging on non-TPU instances.""" ,)
parser.add_argument(
"""--tpu_name""" ,type=lowercase ,help="""Name of TPU resource to initialize. Should be blank on Colab, and 'local' on TPU VMs.""" ,default="""local""" ,)
parser.add_argument(
"""--tpu_zone""" ,type=lowercase ,help="""Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.""" ,)
parser.add_argument(
"""--gcp_project""" ,type=lowercase ,help="""Google cloud project name. Only used for non-Colab TPU nodes.""" )
parser.add_argument(
"""--bfloat16""" ,action="""store_true""" ,help="""Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.""" ,)
parser.add_argument(
"""--train_dataset""" ,type=lowercase ,help="""Path to training dataset to load. If the path begins with `gs://`"""
""" then the dataset will be loaded from a Google Cloud Storage bucket.""" ,)
parser.add_argument(
"""--shuffle_buffer_size""" ,type=lowercase ,default=2**18 ,help="""Size of the shuffle buffer (in samples)""" ,)
parser.add_argument(
"""--eval_dataset""" ,type=lowercase ,help="""Path to evaluation dataset to load. If the path begins with `gs://`"""
""" then the dataset will be loaded from a Google Cloud Storage bucket.""" ,)
parser.add_argument(
"""--num_epochs""" ,type=lowercase ,default=1 ,help="""Number of epochs to train for.""" ,)
parser.add_argument(
"""--learning_rate""" ,type=lowercase ,default=1E-4 ,help="""Learning rate to use for training.""" ,)
parser.add_argument(
"""--weight_decay_rate""" ,type=lowercase ,default=1E-3 ,help="""Weight decay rate to use for training.""" ,)
parser.add_argument(
"""--max_length""" ,type=lowercase ,default=512 ,help="""Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py""" ,)
parser.add_argument(
"""--mlm_probability""" ,type=lowercase ,default=0.15 ,help="""Fraction of tokens to mask during training.""" ,)
parser.add_argument("""--output_dir""" ,type=lowercase ,required=lowercase ,help="""Path to save model checkpoints to.""" )
parser.add_argument("""--hub_model_id""" ,type=lowercase ,help="""Model ID to upload to on the Hugging Face Hub.""" )
snake_case : List[Any] = parser.parse_args()
return args
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> int:
try:
if args.tpu_name:
snake_case : Any = tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name ,zone=args.tpu_zone ,project=args.gcp_project )
else:
snake_case : Any = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
"""Couldn't connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or """
"""--gcp_project. When running on a TPU VM, use --tpu_name local.""" )
tf.config.experimental_connect_to_cluster(lowercase )
tf.tpu.experimental.initialize_tpu_system(lowercase )
return tpu
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Union[str, Any]:
snake_case : int = 0
for file in file_list:
snake_case : List[str] = file.split("""/""" )[-1]
snake_case : Any = re.search(R"""-\d+-(\d+)\.tfrecord""" ,lowercase ).group(1 )
snake_case : int = int(lowercase )
num_samples += sample_count
return num_samples
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ,lowercase ,lowercase ,lowercase=None ) -> int:
snake_case : Optional[Any] = count_samples(lowercase )
snake_case : Union[str, Any] = tf.data.Dataset.from_tensor_slices(lowercase )
if shuffle:
snake_case : Union[str, Any] = dataset.shuffle(len(lowercase ) )
snake_case : Tuple = tf.data.TFRecordDataset(lowercase ,num_parallel_reads=lowercase )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
snake_case : str = dataset.apply(tf.data.experimental.assert_cardinality(lowercase ) )
snake_case : List[str] = dataset.map(lowercase ,num_parallel_calls=lowercase )
if shuffle:
assert shuffle_buffer_size is not None
snake_case : Tuple = dataset.shuffle(args.shuffle_buffer_size )
snake_case : Any = dataset.batch(lowercase ,drop_remainder=lowercase )
snake_case : List[str] = dataset.map(lowercase ,num_parallel_calls=lowercase )
snake_case : Union[str, Any] = dataset.prefetch(lowercase )
return dataset
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Optional[int]:
if not args.no_tpu:
snake_case : List[str] = initialize_tpu(lowercase )
snake_case : Tuple = tf.distribute.TPUStrategy(lowercase )
else:
snake_case : Optional[Any] = tf.distribute.OneDeviceStrategy(device="""/gpu:0""" )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy("""mixed_bfloat16""" )
snake_case : Optional[int] = AutoTokenizer.from_pretrained(args.tokenizer )
snake_case : List[Any] = AutoConfig.from_pretrained(args.pretrained_model_config )
snake_case : Tuple = tokenizer.vocab_size
snake_case : List[str] = tf.io.gfile.glob(os.path.join(args.train_dataset ,"""*.tfrecord""" ) )
if not training_records:
raise ValueError(f"""No .tfrecord files found in {args.train_dataset}.""" )
snake_case : Optional[int] = tf.io.gfile.glob(os.path.join(args.eval_dataset ,"""*.tfrecord""" ) )
if not eval_records:
raise ValueError(f"""No .tfrecord files found in {args.eval_dataset}.""" )
snake_case : Dict = count_samples(lowercase )
snake_case : str = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
snake_case : Optional[int] = steps_per_epoch * args.num_epochs
with strategy.scope():
snake_case : Any = TFAutoModelForMaskedLM.from_config(lowercase )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
snake_case , snake_case : List[Any] = create_optimizer(
num_train_steps=lowercase ,num_warmup_steps=total_train_steps // 20 ,init_lr=args.learning_rate ,weight_decay_rate=args.weight_decay_rate ,)
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=lowercase ,metrics=["""accuracy"""] )
def decode_fn(lowercase ):
snake_case : Dict = {
"""input_ids""": tf.io.FixedLenFeature(dtype=tf.intaa ,shape=(args.max_length,) ),
"""attention_mask""": tf.io.FixedLenFeature(dtype=tf.intaa ,shape=(args.max_length,) ),
}
return tf.io.parse_single_example(lowercase ,lowercase )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
snake_case : Optional[Any] = DataCollatorForLanguageModeling(
tokenizer=lowercase ,mlm_probability=args.mlm_probability ,mlm=lowercase ,return_tensors="""tf""" )
def mask_with_collator(lowercase ):
# TF really needs an isin() function
snake_case : Dict = (
~tf.cast(batch["""attention_mask"""] ,tf.bool )
| (batch["""input_ids"""] == tokenizer.cls_token_id)
| (batch["""input_ids"""] == tokenizer.sep_token_id)
)
snake_case , snake_case : Dict = data_collator.tf_mask_tokens(
batch["""input_ids"""] ,vocab_size=len(lowercase ) ,mask_token_id=tokenizer.mask_token_id ,special_tokens_mask=lowercase ,)
return batch
snake_case : Union[str, Any] = args.per_replica_batch_size * strategy.num_replicas_in_sync
snake_case : Union[str, Any] = prepare_dataset(
lowercase ,decode_fn=lowercase ,mask_fn=lowercase ,batch_size=lowercase ,shuffle=lowercase ,shuffle_buffer_size=args.shuffle_buffer_size ,)
snake_case : List[str] = prepare_dataset(
lowercase ,decode_fn=lowercase ,mask_fn=lowercase ,batch_size=lowercase ,shuffle=lowercase ,)
snake_case : Union[str, Any] = []
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir ,hub_model_id=args.hub_model_id ,tokenizer=lowercase ) )
model.fit(
lowercase ,validation_data=lowercase ,epochs=args.num_epochs ,callbacks=lowercase ,)
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
lowerCamelCase : int = parse_args()
main(args)
| 684 |
import contextlib
import importlib
import io
import unittest
import transformers
# Try to import everything from transformers to ensure every object can be loaded.
from transformers import * # noqa F406
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch
from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available
if is_torch_available():
from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification
if is_tf_available():
from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification
if is_flax_available():
from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification
lowerCamelCase : Union[str, Any] = DUMMY_UNKNOWN_IDENTIFIER
# An actual model hosted on huggingface.co
lowerCamelCase : List[Any] = 'main'
# Default branch name
lowerCamelCase : Tuple = 'f2c752cfc5c0ab6f4bdec59acea69eefbee381c2'
# One particular commit (not the top of `main`)
lowerCamelCase : List[Any] = 'aaaaaaa'
# This commit does not exist, so we should 404.
lowerCamelCase : List[Any] = 'd9e9f15bc825e4b2c9249e9578f884bbcb5e3684'
# Sha-1 of config.json on the top of `main`, for checking purposes
lowerCamelCase : int = '4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3'
@contextlib.contextmanager
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[int]:
print("""Welcome!""" )
yield
print("""Bye!""" )
@contextlib.contextmanager
def SCREAMING_SNAKE_CASE__ ( ) -> List[str]:
print("""Bonjour!""" )
yield
print("""Au revoir!""" )
class __lowercase (unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> int:
# If the spec is missing, importlib would not be able to import the module dynamically.
assert transformers.__spec__ is not None
assert importlib.util.find_spec("""transformers""" ) is not None
class __lowercase (unittest.TestCase ):
"""simple docstring"""
@unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO )
def UpperCAmelCase ( self , A ) -> Optional[Any]:
with ContextManagers([] ):
print("""Transformers are awesome!""" )
# The print statement adds a new line at the end of the output
self.assertEqual(mock_stdout.getvalue() , """Transformers are awesome!\n""" )
@unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO )
def UpperCAmelCase ( self , A ) -> int:
with ContextManagers([context_en()] ):
print("""Transformers are awesome!""" )
# The output should be wrapped with an English welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , """Welcome!\nTransformers are awesome!\nBye!\n""" )
@unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO )
def UpperCAmelCase ( self , A ) -> int:
with ContextManagers([context_fr(), context_en()] ):
print("""Transformers are awesome!""" )
# The output should be wrapped with an English and French welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , """Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n""" )
@require_torch
def UpperCAmelCase ( self ) -> Optional[Any]:
self.assertEqual(find_labels(A ) , ["""labels"""] )
self.assertEqual(find_labels(A ) , ["""labels""", """next_sentence_label"""] )
self.assertEqual(find_labels(A ) , ["""start_positions""", """end_positions"""] )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
pass
self.assertEqual(find_labels(A ) , ["""labels"""] )
@require_tf
def UpperCAmelCase ( self ) -> str:
self.assertEqual(find_labels(A ) , ["""labels"""] )
self.assertEqual(find_labels(A ) , ["""labels""", """next_sentence_label"""] )
self.assertEqual(find_labels(A ) , ["""start_positions""", """end_positions"""] )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
pass
self.assertEqual(find_labels(A ) , ["""labels"""] )
@require_flax
def UpperCAmelCase ( self ) -> Any:
# Flax models don't have labels
self.assertEqual(find_labels(A ) , [] )
self.assertEqual(find_labels(A ) , [] )
self.assertEqual(find_labels(A ) , [] )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
pass
self.assertEqual(find_labels(A ) , [] )
| 684 | 1 |
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
lowerCamelCase : List[Any] = {
'google/efficientnet-b7': 'https://huggingface.co/google/efficientnet-b7/resolve/main/config.json',
}
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """efficientnet"""
def __init__( self , A = 3 , A = 6_0_0 , A = 2.0 , A = 3.1 , A = 8 , A = [3, 3, 5, 3, 5, 5, 3] , A = [3_2, 1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2] , A = [1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2, 3_2_0] , A = [] , A = [1, 2, 2, 2, 1, 2, 1] , A = [1, 2, 2, 3, 3, 4, 1] , A = [1, 6, 6, 6, 6, 6, 6] , A = 0.25 , A = "swish" , A = 2_5_6_0 , A = "mean" , A = 0.02 , A = 0.0_01 , A = 0.99 , A = 0.5 , A = 0.2 , **A , ) -> Optional[int]:
super().__init__(**A )
snake_case : List[str] = num_channels
snake_case : Any = image_size
snake_case : Optional[Any] = width_coefficient
snake_case : List[Any] = depth_coefficient
snake_case : Union[str, Any] = depth_divisor
snake_case : Optional[int] = kernel_sizes
snake_case : Optional[int] = in_channels
snake_case : Optional[int] = out_channels
snake_case : str = depthwise_padding
snake_case : List[str] = strides
snake_case : str = num_block_repeats
snake_case : Union[str, Any] = expand_ratios
snake_case : Tuple = squeeze_expansion_ratio
snake_case : List[str] = hidden_act
snake_case : List[Any] = hidden_dim
snake_case : List[Any] = pooling_type
snake_case : Union[str, Any] = initializer_range
snake_case : Any = batch_norm_eps
snake_case : Tuple = batch_norm_momentum
snake_case : Dict = dropout_rate
snake_case : List[Any] = drop_connect_rate
snake_case : Optional[int] = sum(A ) * 4
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = version.parse("""1.11""" )
@property
def UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def UpperCAmelCase ( self ) -> float:
return 1e-5
| 684 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
lowerCamelCase : Dict = {
'MIT/ast-finetuned-audioset-10-10-0.4593': (
'https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json'
),
}
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """audio-spectrogram-transformer"""
def __init__( self , A=7_6_8 , A=1_2 , A=1_2 , A=3_0_7_2 , A="gelu" , A=0.0 , A=0.0 , A=0.02 , A=1e-1_2 , A=1_6 , A=True , A=1_0 , A=1_0 , A=1_0_2_4 , A=1_2_8 , **A , ) -> int:
super().__init__(**A )
snake_case : Any = hidden_size
snake_case : Tuple = num_hidden_layers
snake_case : Any = num_attention_heads
snake_case : Dict = intermediate_size
snake_case : int = hidden_act
snake_case : int = hidden_dropout_prob
snake_case : Tuple = attention_probs_dropout_prob
snake_case : int = initializer_range
snake_case : int = layer_norm_eps
snake_case : Any = patch_size
snake_case : List[Any] = qkv_bias
snake_case : int = frequency_stride
snake_case : Any = time_stride
snake_case : Union[str, Any] = max_length
snake_case : Any = num_mel_bins
| 684 | 1 |
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
lowerCamelCase : Any = logging.get_logger(__name__)
class __lowercase (enum.Enum ):
"""simple docstring"""
_snake_case = 0
_snake_case = 1
@add_end_docstrings(UpperCamelCase__ )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """generated"""
def __init__( self , *A , **A ) -> Optional[Any]:
super().__init__(*A , **A )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == """tf"""
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def UpperCAmelCase ( self , A=None , A=None , A=None , A=None , A=None , A=None , **A , ) -> Optional[int]:
snake_case : Tuple = {}
if truncation is not None:
snake_case : Union[str, Any] = truncation
snake_case : Dict = generate_kwargs
snake_case : int = {}
if return_tensors is not None and return_type is None:
snake_case : List[Any] = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
snake_case : List[str] = return_type
if clean_up_tokenization_spaces is not None:
snake_case : int = clean_up_tokenization_spaces
if stop_sequence is not None:
snake_case : Tuple = self.tokenizer.encode(A , add_special_tokens=A )
if len(A ) > 1:
warnings.warn(
"""Stopping on a multiple token sequence is not yet supported on transformers. The first token of"""
""" the stop sequence will be used as the stop sequence string in the interim.""" )
snake_case : List[str] = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def UpperCAmelCase ( self , A , A , A ) -> Union[str, Any]:
return True
def UpperCAmelCase ( self , *A , A ) -> Tuple:
snake_case : Union[str, Any] = self.model.config.prefix if self.model.config.prefix is not None else """"""
if isinstance(args[0] , A ):
if self.tokenizer.pad_token_id is None:
raise ValueError("""Please make sure that the tokenizer has a pad_token_id when using a batch input""" )
snake_case : Union[str, Any] = ([prefix + arg for arg in args[0]],)
snake_case : List[Any] = True
elif isinstance(args[0] , A ):
snake_case : str = (prefix + args[0],)
snake_case : str = False
else:
raise ValueError(
f""" `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`""" )
snake_case : Optional[Any] = self.tokenizer(*A , padding=A , truncation=A , return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self , *A , **A ) -> Union[str, Any]:
snake_case : Tuple = super().__call__(*A , **A )
if (
isinstance(args[0] , A )
and all(isinstance(A , A ) for el in args[0] )
and all(len(A ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def UpperCAmelCase ( self , A , A=TruncationStrategy.DO_NOT_TRUNCATE , **A ) -> str:
snake_case : Optional[Any] = self._parse_and_tokenize(A , truncation=A , **A )
return inputs
def UpperCAmelCase ( self , A , **A ) -> Tuple:
if self.framework == "pt":
snake_case , snake_case : List[str] = model_inputs["""input_ids"""].shape
elif self.framework == "tf":
snake_case , snake_case : Optional[Any] = tf.shape(model_inputs["""input_ids"""] ).numpy()
snake_case : Dict = generate_kwargs.get("""min_length""" , self.model.config.min_length )
snake_case : str = generate_kwargs.get("""max_length""" , self.model.config.max_length )
self.check_inputs(A , generate_kwargs["""min_length"""] , generate_kwargs["""max_length"""] )
snake_case : List[str] = self.model.generate(**A , **A )
snake_case : Dict = output_ids.shape[0]
if self.framework == "pt":
snake_case : List[Any] = output_ids.reshape(A , out_b // in_b , *output_ids.shape[1:] )
elif self.framework == "tf":
snake_case : Any = tf.reshape(A , (in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def UpperCAmelCase ( self , A , A=ReturnType.TEXT , A=False ) -> Union[str, Any]:
snake_case : Tuple = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
snake_case : Dict = {f"""{self.return_name}_token_ids""": output_ids}
elif return_type == ReturnType.TEXT:
snake_case : int = {
f"""{self.return_name}_text""": self.tokenizer.decode(
A , skip_special_tokens=A , clean_up_tokenization_spaces=A , )
}
records.append(A )
return records
@add_end_docstrings(UpperCamelCase__ )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """summary"""
def __call__( self , *A , **A ) -> str:
return super().__call__(*A , **A )
def UpperCAmelCase ( self , A , A , A ) -> bool:
if max_length < min_length:
logger.warning(f"""Your min_length={min_length} must be inferior than your max_length={max_length}.""" )
if input_length < max_length:
logger.warning(
f"""Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is """
"""a summarization task, where outputs shorter than the input are typically wanted, you might """
f"""consider decreasing max_length manually, e.g. summarizer('...', max_length={input_length//2})""" )
@add_end_docstrings(UpperCamelCase__ )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """translation"""
def UpperCAmelCase ( self , A , A , A ) -> Union[str, Any]:
if input_length > 0.9 * max_length:
logger.warning(
f"""Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider """
"""increasing your max_length manually, e.g. translator('...', max_length=400)""" )
return True
def UpperCAmelCase ( self , *A , A=TruncationStrategy.DO_NOT_TRUNCATE , A=None , A=None ) -> Optional[int]:
if getattr(self.tokenizer , """_build_translation_inputs""" , A ):
return self.tokenizer._build_translation_inputs(
*A , return_tensors=self.framework , truncation=A , src_lang=A , tgt_lang=A )
else:
return super()._parse_and_tokenize(*A , truncation=A )
def UpperCAmelCase ( self , A=None , A=None , **A ) -> Union[str, Any]:
snake_case , snake_case , snake_case : str = super()._sanitize_parameters(**A )
if src_lang is not None:
snake_case : Tuple = src_lang
if tgt_lang is not None:
snake_case : str = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
snake_case : Union[str, Any] = kwargs.get("""task""" , self.task )
snake_case : Any = task.split("""_""" )
if task and len(A ) == 4:
# translation, XX, to YY
snake_case : Optional[Any] = items[1]
snake_case : Dict = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self , *A , **A ) -> str:
return super().__call__(*A , **A )
| 684 |
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
lowerCamelCase : Any = logging.get_logger(__name__)
class __lowercase (enum.Enum ):
"""simple docstring"""
_snake_case = 0
_snake_case = 1
@add_end_docstrings(UpperCamelCase__ )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """generated"""
def __init__( self , *A , **A ) -> Optional[Any]:
super().__init__(*A , **A )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == """tf"""
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def UpperCAmelCase ( self , A=None , A=None , A=None , A=None , A=None , A=None , **A , ) -> Optional[int]:
snake_case : Tuple = {}
if truncation is not None:
snake_case : Union[str, Any] = truncation
snake_case : Dict = generate_kwargs
snake_case : int = {}
if return_tensors is not None and return_type is None:
snake_case : List[Any] = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
snake_case : List[str] = return_type
if clean_up_tokenization_spaces is not None:
snake_case : int = clean_up_tokenization_spaces
if stop_sequence is not None:
snake_case : Tuple = self.tokenizer.encode(A , add_special_tokens=A )
if len(A ) > 1:
warnings.warn(
"""Stopping on a multiple token sequence is not yet supported on transformers. The first token of"""
""" the stop sequence will be used as the stop sequence string in the interim.""" )
snake_case : List[str] = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def UpperCAmelCase ( self , A , A , A ) -> Union[str, Any]:
return True
def UpperCAmelCase ( self , *A , A ) -> Tuple:
snake_case : Union[str, Any] = self.model.config.prefix if self.model.config.prefix is not None else """"""
if isinstance(args[0] , A ):
if self.tokenizer.pad_token_id is None:
raise ValueError("""Please make sure that the tokenizer has a pad_token_id when using a batch input""" )
snake_case : Union[str, Any] = ([prefix + arg for arg in args[0]],)
snake_case : List[Any] = True
elif isinstance(args[0] , A ):
snake_case : str = (prefix + args[0],)
snake_case : str = False
else:
raise ValueError(
f""" `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`""" )
snake_case : Optional[Any] = self.tokenizer(*A , padding=A , truncation=A , return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self , *A , **A ) -> Union[str, Any]:
snake_case : Tuple = super().__call__(*A , **A )
if (
isinstance(args[0] , A )
and all(isinstance(A , A ) for el in args[0] )
and all(len(A ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def UpperCAmelCase ( self , A , A=TruncationStrategy.DO_NOT_TRUNCATE , **A ) -> str:
snake_case : Optional[Any] = self._parse_and_tokenize(A , truncation=A , **A )
return inputs
def UpperCAmelCase ( self , A , **A ) -> Tuple:
if self.framework == "pt":
snake_case , snake_case : List[str] = model_inputs["""input_ids"""].shape
elif self.framework == "tf":
snake_case , snake_case : Optional[Any] = tf.shape(model_inputs["""input_ids"""] ).numpy()
snake_case : Dict = generate_kwargs.get("""min_length""" , self.model.config.min_length )
snake_case : str = generate_kwargs.get("""max_length""" , self.model.config.max_length )
self.check_inputs(A , generate_kwargs["""min_length"""] , generate_kwargs["""max_length"""] )
snake_case : List[str] = self.model.generate(**A , **A )
snake_case : Dict = output_ids.shape[0]
if self.framework == "pt":
snake_case : List[Any] = output_ids.reshape(A , out_b // in_b , *output_ids.shape[1:] )
elif self.framework == "tf":
snake_case : Any = tf.reshape(A , (in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def UpperCAmelCase ( self , A , A=ReturnType.TEXT , A=False ) -> Union[str, Any]:
snake_case : Tuple = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
snake_case : Dict = {f"""{self.return_name}_token_ids""": output_ids}
elif return_type == ReturnType.TEXT:
snake_case : int = {
f"""{self.return_name}_text""": self.tokenizer.decode(
A , skip_special_tokens=A , clean_up_tokenization_spaces=A , )
}
records.append(A )
return records
@add_end_docstrings(UpperCamelCase__ )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """summary"""
def __call__( self , *A , **A ) -> str:
return super().__call__(*A , **A )
def UpperCAmelCase ( self , A , A , A ) -> bool:
if max_length < min_length:
logger.warning(f"""Your min_length={min_length} must be inferior than your max_length={max_length}.""" )
if input_length < max_length:
logger.warning(
f"""Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is """
"""a summarization task, where outputs shorter than the input are typically wanted, you might """
f"""consider decreasing max_length manually, e.g. summarizer('...', max_length={input_length//2})""" )
@add_end_docstrings(UpperCamelCase__ )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """translation"""
def UpperCAmelCase ( self , A , A , A ) -> Union[str, Any]:
if input_length > 0.9 * max_length:
logger.warning(
f"""Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider """
"""increasing your max_length manually, e.g. translator('...', max_length=400)""" )
return True
def UpperCAmelCase ( self , *A , A=TruncationStrategy.DO_NOT_TRUNCATE , A=None , A=None ) -> Optional[int]:
if getattr(self.tokenizer , """_build_translation_inputs""" , A ):
return self.tokenizer._build_translation_inputs(
*A , return_tensors=self.framework , truncation=A , src_lang=A , tgt_lang=A )
else:
return super()._parse_and_tokenize(*A , truncation=A )
def UpperCAmelCase ( self , A=None , A=None , **A ) -> Union[str, Any]:
snake_case , snake_case , snake_case : str = super()._sanitize_parameters(**A )
if src_lang is not None:
snake_case : Tuple = src_lang
if tgt_lang is not None:
snake_case : str = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
snake_case : Union[str, Any] = kwargs.get("""task""" , self.task )
snake_case : Any = task.split("""_""" )
if task and len(A ) == 4:
# translation, XX, to YY
snake_case : Optional[Any] = items[1]
snake_case : Dict = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self , *A , **A ) -> str:
return super().__call__(*A , **A )
| 684 | 1 |
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """M-CLIP"""
def __init__( self , A=1_0_2_4 , A=7_6_8 , **A ) -> str:
snake_case : int = transformerDimSize
snake_case : List[Any] = imageDimSize
super().__init__(**A )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = MCLIPConfig
def __init__( self , A , *A , **A ) -> str:
super().__init__(A , *A , **A )
snake_case : List[Any] = XLMRobertaModel(A )
snake_case : Optional[Any] = torch.nn.Linear(
in_features=config.transformerDimensions , out_features=config.numDims )
def UpperCAmelCase ( self , A , A ) -> List[str]:
snake_case : Union[str, Any] = self.transformer(input_ids=A , attention_mask=A )[0]
snake_case : Tuple = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(A ), embs
| 684 |
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> str:
snake_case : int = []
for line in lines:
snake_case : Dict = re.sub(R"""#.*""" ,"""""" ,lowercase ) # remove comments
if line:
filtered_lines.append(lowercase )
snake_case : Optional[int] = """\n""".join(lowercase )
# Make a hash from all this code
snake_case : List[str] = full_str.encode("""utf-8""" )
return shaaaa(lowercase ).hexdigest()
# get importable module names and hash for caching
lowerCamelCase : Any = {
'csv': (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
'json': (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
'pandas': (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
'parquet': (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
'arrow': (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
'text': (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
'imagefolder': (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
'audiofolder': (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
lowerCamelCase : Optional[int] = {
'.csv': ('csv', {}),
'.tsv': ('csv', {'sep': '\t'}),
'.json': ('json', {}),
'.jsonl': ('json', {}),
'.parquet': ('parquet', {}),
'.arrow': ('arrow', {}),
'.txt': ('text', {}),
}
_EXTENSION_TO_MODULE.update({ext: ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
lowerCamelCase : Tuple = {'imagefolder', 'audiofolder'}
# Used to filter data files based on extensions given a module name
lowerCamelCase : Dict[str, List[str]] = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append('.zip')
_MODULE_TO_EXTENSIONS["audiofolder"].append('.zip')
| 684 | 1 |
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
lowerCamelCase : List[str] = logging.get_logger(__name__)
lowerCamelCase : Dict = {name: getattr(transformers, name + 'Fast') for name in SLOW_TO_FAST_CONVERTERS}
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ,lowercase ) -> Optional[int]:
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(f"""Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.""" )
if tokenizer_name is None:
snake_case : Dict = TOKENIZER_CLASSES
else:
snake_case : int = {tokenizer_name: getattr(lowercase ,tokenizer_name + """Fast""" )}
logger.info(f"""Loading tokenizer classes: {tokenizer_names}""" )
for tokenizer_name in tokenizer_names:
snake_case : Optional[int] = TOKENIZER_CLASSES[tokenizer_name]
snake_case : List[str] = True
if checkpoint_name is None:
snake_case : int = list(tokenizer_class.max_model_input_sizes.keys() )
else:
snake_case : List[Any] = [checkpoint_name]
logger.info(f"""For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}""" )
for checkpoint in checkpoint_names:
logger.info(f"""Loading {tokenizer_class.__class__.__name__} {checkpoint}""" )
# Load tokenizer
snake_case : List[str] = tokenizer_class.from_pretrained(lowercase ,force_download=lowercase )
# Save fast tokenizer
logger.info(f"""Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}""" )
# For organization names we create sub-directories
if "/" in checkpoint:
snake_case , snake_case : Optional[int] = checkpoint.split("""/""" )
snake_case : Any = os.path.join(lowercase ,lowercase )
elif add_prefix:
snake_case : int = checkpoint
snake_case : str = dump_path
else:
snake_case : Optional[Any] = None
snake_case : Union[str, Any] = dump_path
logger.info(f"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
snake_case : Tuple = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
snake_case : int = file_path.split(lowercase )[-1][0]
if next_char == "/":
snake_case : Optional[Any] = os.path.join(lowercase ,lowercase )
snake_case : Optional[Any] = None
logger.info(f"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" )
snake_case : Dict = tokenizer.save_pretrained(
lowercase ,legacy_format=lowercase ,filename_prefix=lowercase )
logger.info(f"""=> File names {file_names}""" )
for file_name in file_names:
if not file_name.endswith("""tokenizer.json""" ):
os.remove(lowercase )
logger.info(f"""=> removing {file_name}""" )
if __name__ == "__main__":
lowerCamelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--dump_path', default=None, type=str, required=True, help='Path to output generated fast tokenizer files.'
)
parser.add_argument(
'--tokenizer_name',
default=None,
type=str,
help=(
f"""Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will """
'download and convert all the checkpoints from AWS.'
),
)
parser.add_argument(
'--checkpoint_name',
default=None,
type=str,
help='Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.',
)
parser.add_argument(
'--force_download',
action='store_true',
help='Re-download checkpoints.',
)
lowerCamelCase : Optional[Any] = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 684 |
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ) -> Tuple:
# Initialise PyTorch model
snake_case : int = RemBertConfig.from_json_file(lowercase )
print("""Building PyTorch model from configuration: {}""".format(str(lowercase ) ) )
snake_case : Tuple = RemBertModel(lowercase )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(lowercase ,lowercase ,lowercase )
# Save pytorch-model
print("""Save PyTorch model to {}""".format(lowercase ) )
torch.save(model.state_dict() ,lowercase )
if __name__ == "__main__":
lowerCamelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--rembert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained RemBERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowerCamelCase : Dict = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 684 | 1 |
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
lowerCamelCase : str = {
# 1536-bit
5: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF',
base=1_6,
),
'generator': 2,
},
# 2048-bit
1_4: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AACAA68FFFFFFFFFFFFFFFF',
base=1_6,
),
'generator': 2,
},
# 3072-bit
1_5: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'
+ 'ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'
+ 'ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'
+ 'F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'
+ '43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF',
base=1_6,
),
'generator': 2,
},
# 4096-bit
1_6: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'
+ 'ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'
+ 'ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'
+ 'F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'
+ '43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'
+ '88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'
+ '2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'
+ '287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'
+ '1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'
+ '93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199'
+ 'FFFFFFFFFFFFFFFF',
base=1_6,
),
'generator': 2,
},
# 6144-bit
1_7: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08'
+ '8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B'
+ '302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9'
+ 'A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6'
+ '49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8'
+ 'FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C'
+ '180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718'
+ '3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D'
+ '04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D'
+ 'B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226'
+ '1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC'
+ 'E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26'
+ '99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB'
+ '04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2'
+ '233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127'
+ 'D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'
+ '36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406'
+ 'AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918'
+ 'DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151'
+ '2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03'
+ 'F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F'
+ 'BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'
+ 'CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B'
+ 'B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632'
+ '387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E'
+ '6DCC4024FFFFFFFFFFFFFFFF',
base=1_6,
),
'generator': 2,
},
# 8192-bit
1_8: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'
+ 'ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'
+ 'ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'
+ 'F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'
+ '43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'
+ '88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'
+ '2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'
+ '287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'
+ '1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'
+ '93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'
+ '36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD'
+ 'F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831'
+ '179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B'
+ 'DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF'
+ '5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6'
+ 'D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3'
+ '23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'
+ 'CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328'
+ '06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C'
+ 'DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE'
+ '12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4'
+ '38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300'
+ '741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568'
+ '3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9'
+ '22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B'
+ '4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A'
+ '062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36'
+ '4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1'
+ 'B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92'
+ '4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47'
+ '9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71'
+ '60C980DD98EDD3DFFFFFFFFFFFFFFFFF',
base=1_6,
),
'generator': 2,
},
}
class __lowercase :
"""simple docstring"""
def __init__( self , A = 1_4 ) -> None:
if group not in primes:
raise ValueError("""Unsupported Group""" )
snake_case : Dict = primes[group]["""prime"""]
snake_case : str = primes[group]["""generator"""]
snake_case : Optional[Any] = int(hexlify(urandom(3_2 ) ) , base=1_6 )
def UpperCAmelCase ( self ) -> str:
return hex(self.__private_key )[2:]
def UpperCAmelCase ( self ) -> str:
snake_case : Dict = pow(self.generator , self.__private_key , self.prime )
return hex(A )[2:]
def UpperCAmelCase ( self , A ) -> bool:
# check if the other public key is valid based on NIST SP800-56
return (
2 <= key <= self.prime - 2
and pow(A , (self.prime - 1) // 2 , self.prime ) == 1
)
def UpperCAmelCase ( self , A ) -> str:
snake_case : Tuple = int(A , base=1_6 )
if not self.is_valid_public_key(A ):
raise ValueError("""Invalid public key""" )
snake_case : Optional[int] = pow(A , self.__private_key , self.prime )
return shaaaa(str(A ).encode() ).hexdigest()
@staticmethod
def UpperCAmelCase ( A , A ) -> bool:
# check if the other public key is valid based on NIST SP800-56
return (
2 <= remote_public_key_str <= prime - 2
and pow(A , (prime - 1) // 2 , A ) == 1
)
@staticmethod
def UpperCAmelCase ( A , A , A = 1_4 ) -> str:
snake_case : Tuple = int(A , base=1_6 )
snake_case : Optional[int] = int(A , base=1_6 )
snake_case : Tuple = primes[group]["""prime"""]
if not DiffieHellman.is_valid_public_key_static(A , A ):
raise ValueError("""Invalid public key""" )
snake_case : int = pow(A , A , A )
return shaaaa(str(A ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 684 |
from ..utils import DummyObject, requires_backends
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Tuple:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> List[str]:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> int:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Any:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Union[str, Any]:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Tuple:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Optional[int]:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Union[str, Any]:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Union[str, Any]:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> str:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> List[Any]:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Optional[Any]:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Union[str, Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> int:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Union[str, Any]:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> List[Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Tuple:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Optional[Any]:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Optional[Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> str:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Any:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Tuple:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> int:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Dict:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Dict:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Optional[int]:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Optional[Any]:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> str:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> List[Any]:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Optional[int]:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Optional[Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Tuple:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Tuple:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> List[Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> str:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> str:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Tuple:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Dict:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Union[str, Any]:
requires_backends(cls , ["""flax"""] )
| 684 | 1 |
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
lowerCamelCase : List[Any] = datasets.load_iris()
lowerCamelCase : str = np.array(data['data'])
lowerCamelCase : Any = np.array(data['target'])
lowerCamelCase : Any = data['target_names']
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : Dict = train_test_split(X, y)
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> int:
return np.linalg.norm(np.array(lowercase ) - np.array(lowercase ) )
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ,lowercase ,lowercase=5 ) -> str:
snake_case : Optional[int] = zip(lowercase ,lowercase )
# List of distances of all points from the point to be classified
snake_case : str = []
for data_point in data:
snake_case : Union[str, Any] = euclidean_distance(data_point[0] ,lowercase )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
snake_case : str = [i[1] for i in sorted(lowercase )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
snake_case : Any = Counter(lowercase ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 684 |
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
lowerCamelCase : List[str] = 3
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> int:
print("""Generating primitive root of p""" )
while True:
snake_case : Optional[int] = random.randrange(3 ,lowercase )
if pow(lowercase ,2 ,lowercase ) == 1:
continue
if pow(lowercase ,lowercase ,lowercase ) == 1:
continue
return g
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> tuple[tuple[int, int, int, int], tuple[int, int]]:
print("""Generating prime p...""" )
snake_case : Optional[int] = rabin_miller.generate_large_prime(lowercase ) # select large prime number.
snake_case : Optional[int] = primitive_root(lowercase ) # one primitive root on modulo p.
snake_case : Optional[Any] = random.randrange(3 ,lowercase ) # private_key -> have to be greater than 2 for safety.
snake_case : Tuple = cryptomath.find_mod_inverse(pow(lowercase ,lowercase ,lowercase ) ,lowercase )
snake_case : str = (key_size, e_a, e_a, p)
snake_case : Optional[Any] = (key_size, d)
return public_key, private_key
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> None:
if os.path.exists(f"""{name}_pubkey.txt""" ) or os.path.exists(f"""{name}_privkey.txt""" ):
print("""\nWARNING:""" )
print(
f"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"""
"""Use a different name or delete these files and re-run this program.""" )
sys.exit()
snake_case , snake_case : Optional[Any] = generate_key(lowercase )
print(f"""\nWriting public key to file {name}_pubkey.txt...""" )
with open(f"""{name}_pubkey.txt""" ,"""w""" ) as fo:
fo.write(f"""{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}""" )
print(f"""Writing private key to file {name}_privkey.txt...""" )
with open(f"""{name}_privkey.txt""" ,"""w""" ) as fo:
fo.write(f"""{private_key[0]},{private_key[1]}""" )
def SCREAMING_SNAKE_CASE__ ( ) -> None:
print("""Making key files...""" )
make_key_files("""elgamal""" ,2048 )
print("""Key files generation successful""" )
if __name__ == "__main__":
main()
| 684 | 1 |
import os
import unittest
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
BertTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class __lowercase (UpperCamelCase__ , unittest.TestCase ):
"""simple docstring"""
_snake_case = BertTokenizer
_snake_case = BertTokenizerFast
_snake_case = True
_snake_case = True
_snake_case = filter_non_english
def UpperCAmelCase ( self ) -> Any:
super().setUp()
snake_case : List[Any] = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
snake_case : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def UpperCAmelCase ( self , A ) -> Optional[int]:
snake_case : Optional[Any] = """UNwant\u00E9d,running"""
snake_case : Tuple = """unwanted, running"""
return input_text, output_text
def UpperCAmelCase ( self ) -> int:
snake_case : Optional[Any] = self.tokenizer_class(self.vocab_file )
snake_case : Dict = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(A , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , [9, 6, 7, 1_2, 1_0, 1_1] )
def UpperCAmelCase ( self ) -> List[Any]:
if not self.test_rust_tokenizer:
return
snake_case : int = self.get_tokenizer()
snake_case : Tuple = self.get_rust_tokenizer()
snake_case : List[str] = """UNwant\u00E9d,running"""
snake_case : Dict = tokenizer.tokenize(A )
snake_case : Union[str, Any] = rust_tokenizer.tokenize(A )
self.assertListEqual(A , A )
snake_case : Optional[Any] = tokenizer.encode(A , add_special_tokens=A )
snake_case : Tuple = rust_tokenizer.encode(A , add_special_tokens=A )
self.assertListEqual(A , A )
snake_case : List[Any] = self.get_rust_tokenizer()
snake_case : Optional[Any] = tokenizer.encode(A )
snake_case : Optional[int] = rust_tokenizer.encode(A )
self.assertListEqual(A , A )
# With lower casing
snake_case : Tuple = self.get_tokenizer(do_lower_case=A )
snake_case : str = self.get_rust_tokenizer(do_lower_case=A )
snake_case : Union[str, Any] = """UNwant\u00E9d,running"""
snake_case : Optional[Any] = tokenizer.tokenize(A )
snake_case : int = rust_tokenizer.tokenize(A )
self.assertListEqual(A , A )
snake_case : Union[str, Any] = tokenizer.encode(A , add_special_tokens=A )
snake_case : Any = rust_tokenizer.encode(A , add_special_tokens=A )
self.assertListEqual(A , A )
snake_case : Union[str, Any] = self.get_rust_tokenizer()
snake_case : List[Any] = tokenizer.encode(A )
snake_case : Optional[int] = rust_tokenizer.encode(A )
self.assertListEqual(A , A )
def UpperCAmelCase ( self ) -> Union[str, Any]:
snake_case : Any = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""" ) , ["""ah""", """\u535A""", """\u63A8""", """zz"""] )
def UpperCAmelCase ( self ) -> str:
snake_case : int = BasicTokenizer(do_lower_case=A )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""hello""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def UpperCAmelCase ( self ) -> Tuple:
snake_case : Optional[int] = BasicTokenizer(do_lower_case=A , strip_accents=A )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hällo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""h\u00E9llo"""] )
def UpperCAmelCase ( self ) -> List[str]:
snake_case : Tuple = BasicTokenizer(do_lower_case=A , strip_accents=A )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def UpperCAmelCase ( self ) -> str:
snake_case : Tuple = BasicTokenizer(do_lower_case=A )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def UpperCAmelCase ( self ) -> str:
snake_case : str = BasicTokenizer(do_lower_case=A )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def UpperCAmelCase ( self ) -> Dict:
snake_case : Dict = BasicTokenizer(do_lower_case=A , strip_accents=A )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def UpperCAmelCase ( self ) -> Tuple:
snake_case : Any = BasicTokenizer(do_lower_case=A , strip_accents=A )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def UpperCAmelCase ( self ) -> Union[str, Any]:
snake_case : List[str] = BasicTokenizer(do_lower_case=A , never_split=["""[UNK]"""] )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""" ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""] )
def UpperCAmelCase ( self ) -> List[Any]:
snake_case : Dict = BasicTokenizer()
snake_case : List[str] = """a\n'll !!to?'d of, can't."""
snake_case : Optional[int] = ["""a""", """'""", """ll""", """!""", """!""", """to""", """?""", """'""", """d""", """of""", """,""", """can""", """'""", """t""", """."""]
self.assertListEqual(tokenizer.tokenize(A ) , A )
def UpperCAmelCase ( self ) -> Optional[int]:
snake_case : Optional[Any] = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""]
snake_case : Optional[Any] = {}
for i, token in enumerate(A ):
snake_case : Optional[int] = i
snake_case : Union[str, Any] = WordpieceTokenizer(vocab=A , unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) , [] )
self.assertListEqual(tokenizer.tokenize("""unwanted running""" ) , ["""un""", """##want""", """##ed""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.tokenize("""unwantedX running""" ) , ["""[UNK]""", """runn""", """##ing"""] )
def UpperCAmelCase ( self ) -> Union[str, Any]:
self.assertTrue(_is_whitespace(""" """ ) )
self.assertTrue(_is_whitespace("""\t""" ) )
self.assertTrue(_is_whitespace("""\r""" ) )
self.assertTrue(_is_whitespace("""\n""" ) )
self.assertTrue(_is_whitespace("""\u00A0""" ) )
self.assertFalse(_is_whitespace("""A""" ) )
self.assertFalse(_is_whitespace("""-""" ) )
def UpperCAmelCase ( self ) -> Dict:
self.assertTrue(_is_control("""\u0005""" ) )
self.assertFalse(_is_control("""A""" ) )
self.assertFalse(_is_control(""" """ ) )
self.assertFalse(_is_control("""\t""" ) )
self.assertFalse(_is_control("""\r""" ) )
def UpperCAmelCase ( self ) -> List[Any]:
self.assertTrue(_is_punctuation("""-""" ) )
self.assertTrue(_is_punctuation("""$""" ) )
self.assertTrue(_is_punctuation("""`""" ) )
self.assertTrue(_is_punctuation(""".""" ) )
self.assertFalse(_is_punctuation("""A""" ) )
self.assertFalse(_is_punctuation(""" """ ) )
def UpperCAmelCase ( self ) -> Any:
snake_case : Optional[int] = self.get_tokenizer()
snake_case : Tuple = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(A ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] )
self.assertListEqual(
[rust_tokenizer.tokenize(A ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] )
@slow
def UpperCAmelCase ( self ) -> Tuple:
snake_case : int = self.tokenizer_class.from_pretrained("""bert-base-uncased""" )
snake_case : Optional[Any] = tokenizer.encode("""sequence builders""" , add_special_tokens=A )
snake_case : Optional[int] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=A )
snake_case : List[str] = tokenizer.build_inputs_with_special_tokens(A )
snake_case : Tuple = tokenizer.build_inputs_with_special_tokens(A , A )
assert encoded_sentence == [1_0_1] + text + [1_0_2]
assert encoded_pair == [1_0_1] + text + [1_0_2] + text_a + [1_0_2]
def UpperCAmelCase ( self ) -> List[Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
snake_case : Optional[int] = self.rust_tokenizer_class.from_pretrained(A , **A )
snake_case : Dict = f"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence."""
snake_case : Optional[int] = tokenizer_r.encode_plus(
A , return_attention_mask=A , return_token_type_ids=A , return_offsets_mapping=A , add_special_tokens=A , )
snake_case : Optional[Any] = tokenizer_r.do_lower_case if hasattr(A , """do_lower_case""" ) else False
snake_case : List[str] = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), """A"""),
((1, 2), ""","""),
((3, 5), """na"""),
((5, 6), """##ï"""),
((6, 8), """##ve"""),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), """Allen"""),
((2_1, 2_3), """##NL"""),
((2_3, 2_4), """##P"""),
((2_5, 3_3), """sentence"""),
((3_3, 3_4), """."""),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), """a"""),
((1, 2), ""","""),
((3, 8), """naive"""),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), """allen"""),
((2_1, 2_3), """##nl"""),
((2_3, 2_4), """##p"""),
((2_5, 3_3), """sentence"""),
((3_3, 3_4), """."""),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["""input_ids"""] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["""offset_mapping"""] )
def UpperCAmelCase ( self ) -> Optional[Any]:
snake_case : Any = ["""的""", """人""", """有"""]
snake_case : Optional[int] = """""".join(A )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
snake_case : Optional[int] = True
snake_case : List[Any] = self.tokenizer_class.from_pretrained(A , **A )
snake_case : int = self.rust_tokenizer_class.from_pretrained(A , **A )
snake_case : Any = tokenizer_p.encode(A , add_special_tokens=A )
snake_case : Dict = tokenizer_r.encode(A , add_special_tokens=A )
snake_case : Tuple = tokenizer_r.convert_ids_to_tokens(A )
snake_case : str = tokenizer_p.convert_ids_to_tokens(A )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(A , A )
self.assertListEqual(A , A )
snake_case : Dict = False
snake_case : int = self.rust_tokenizer_class.from_pretrained(A , **A )
snake_case : Dict = self.tokenizer_class.from_pretrained(A , **A )
snake_case : int = tokenizer_r.encode(A , add_special_tokens=A )
snake_case : int = tokenizer_p.encode(A , add_special_tokens=A )
snake_case : List[str] = tokenizer_r.convert_ids_to_tokens(A )
snake_case : Tuple = tokenizer_p.convert_ids_to_tokens(A )
# it is expected that only the first Chinese character is not preceded by "##".
snake_case : Tuple = [
f"""##{token}""" if idx != 0 else token for idx, token in enumerate(A )
]
self.assertListEqual(A , A )
self.assertListEqual(A , A )
| 684 |
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ) -> int:
if exponent == 1:
return base
if exponent % 2 == 0:
snake_case : Dict = _modexpt(lowercase ,exponent // 2 ,lowercase ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(lowercase ,exponent - 1 ,lowercase )) % modulo_value
def SCREAMING_SNAKE_CASE__ ( lowercase = 1777 ,lowercase = 1855 ,lowercase = 8 ) -> int:
snake_case : int = base
for _ in range(1 ,lowercase ):
snake_case : List[str] = _modexpt(lowercase ,lowercase ,10**digits )
return result
if __name__ == "__main__":
print(f"""{solution() = }""")
| 684 | 1 |
from random import randint
from tempfile import TemporaryFile
import numpy as np
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ) -> Optional[Any]:
snake_case : Optional[int] = 0
if start < end:
snake_case : Any = randint(lowercase ,lowercase )
snake_case : List[Any] = a[end]
snake_case : str = a[pivot]
snake_case : int = temp
snake_case , snake_case : Any = _in_place_partition(lowercase ,lowercase ,lowercase )
count += _in_place_quick_sort(lowercase ,lowercase ,p - 1 )
count += _in_place_quick_sort(lowercase ,p + 1 ,lowercase )
return count
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ) -> Dict:
snake_case : List[Any] = 0
snake_case : Optional[int] = randint(lowercase ,lowercase )
snake_case : List[str] = a[end]
snake_case : Tuple = a[pivot]
snake_case : Dict = temp
snake_case : Any = start - 1
for index in range(lowercase ,lowercase ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
snake_case : List[Any] = new_pivot_index + 1
snake_case : int = a[new_pivot_index]
snake_case : Dict = a[index]
snake_case : str = temp
snake_case : Tuple = a[new_pivot_index + 1]
snake_case : Tuple = a[end]
snake_case : Any = temp
return new_pivot_index + 1, count
lowerCamelCase : Optional[int] = TemporaryFile()
lowerCamelCase : List[str] = 1_0_0 # 1000 elements are to be sorted
lowerCamelCase , lowerCamelCase : int = 0, 1 # mean and standard deviation
lowerCamelCase : List[str] = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print('The array is')
print(X)
outfile.seek(0) # using the same array
lowerCamelCase : List[Any] = np.load(outfile)
lowerCamelCase : Tuple = len(M) - 1
lowerCamelCase : int = _in_place_quick_sort(M, 0, r)
print(
'No of Comparisons for 100 elements selected from a standard normal distribution'
'is :'
)
print(z)
| 684 |
from itertools import product
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> list[int]:
snake_case : Tuple = sides_number
snake_case : List[str] = max_face_number * dice_number
snake_case : Any = [0] * (max_total + 1)
snake_case : int = 1
snake_case : List[str] = range(lowercase ,max_face_number + 1 )
for dice_numbers in product(lowercase ,repeat=lowercase ):
snake_case : Any = sum(lowercase )
totals_frequencies[total] += 1
return totals_frequencies
def SCREAMING_SNAKE_CASE__ ( ) -> float:
snake_case : List[str] = total_frequency_distribution(
sides_number=4 ,dice_number=9 )
snake_case : str = total_frequency_distribution(
sides_number=6 ,dice_number=6 )
snake_case : Optional[int] = 0
snake_case : List[str] = 9
snake_case : Union[str, Any] = 4 * 9
snake_case : Dict = 6
for peter_total in range(lowercase ,max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
snake_case : str = (4**9) * (6**6)
snake_case : int = peter_wins_count / total_games_number
snake_case : Optional[int] = round(lowercase ,ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(f"""{solution() = }""")
| 684 | 1 |
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
lowerCamelCase : List[str] = importlib.util.find_spec('s3fs') is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
lowerCamelCase : List[compression.BaseCompressedFileFileSystem] = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(f"""A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.""")
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> str:
if "://" in dataset_path:
snake_case : int = dataset_path.split("""://""" )[1]
return dataset_path
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> bool:
if fs is not None and fs.protocol != "file":
return True
else:
return False
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ) -> int:
snake_case : Optional[Any] = not is_remote_filesystem(lowercase )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(lowercase ) ,fs._strip_protocol(lowercase ) )
else:
fs.mv(lowercase ,lowercase ,recursive=lowercase )
def SCREAMING_SNAKE_CASE__ ( ) -> None:
if hasattr(fsspec.asyn ,"""reset_lock""" ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
snake_case : Union[str, Any] = None
snake_case : Union[str, Any] = None
snake_case : str = threading.Lock()
| 684 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
| 684 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __lowercase (UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
"""simple docstring"""
_snake_case = StableDiffusionInstructPixaPixPipeline
_snake_case = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width""", """cross_attention_kwargs"""}
_snake_case = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
_snake_case = IMAGE_TO_IMAGE_IMAGE_PARAMS
_snake_case = IMAGE_TO_IMAGE_IMAGE_PARAMS
def UpperCAmelCase ( self ) -> List[Any]:
torch.manual_seed(0 )
snake_case : int = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=8 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=3_2 , )
snake_case : Any = PNDMScheduler(skip_prk_steps=A )
torch.manual_seed(0 )
snake_case : str = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
snake_case : Any = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
snake_case : int = CLIPTextModel(A )
snake_case : List[str] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
snake_case : List[str] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def UpperCAmelCase ( self , A , A=0 ) -> Optional[int]:
snake_case : Union[str, Any] = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(A ) ).to(A )
snake_case : int = image.cpu().permute(0 , 2 , 3 , 1 )[0]
snake_case : Any = Image.fromarray(np.uinta(A ) ).convert("""RGB""" )
if str(A ).startswith("""mps""" ):
snake_case : Any = torch.manual_seed(A )
else:
snake_case : List[Any] = torch.Generator(device=A ).manual_seed(A )
snake_case : Optional[int] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""image_guidance_scale""": 1,
"""output_type""": """numpy""",
}
return inputs
def UpperCAmelCase ( self ) -> Tuple:
snake_case : Optional[int] = """cpu""" # ensure determinism for the device-dependent torch.Generator
snake_case : List[Any] = self.get_dummy_components()
snake_case : Optional[int] = StableDiffusionInstructPixaPixPipeline(**A )
snake_case : Union[str, Any] = sd_pipe.to(A )
sd_pipe.set_progress_bar_config(disable=A )
snake_case : Optional[Any] = self.get_dummy_inputs(A )
snake_case : str = sd_pipe(**A ).images
snake_case : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
snake_case : Dict = np.array([0.75_26, 0.37_50, 0.45_47, 0.61_17, 0.58_66, 0.50_16, 0.43_27, 0.56_42, 0.48_15] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def UpperCAmelCase ( self ) -> Optional[Any]:
snake_case : Dict = """cpu""" # ensure determinism for the device-dependent torch.Generator
snake_case : Union[str, Any] = self.get_dummy_components()
snake_case : Union[str, Any] = StableDiffusionInstructPixaPixPipeline(**A )
snake_case : Dict = sd_pipe.to(A )
sd_pipe.set_progress_bar_config(disable=A )
snake_case : Optional[Any] = self.get_dummy_inputs(A )
snake_case : Dict = """french fries"""
snake_case : Optional[Any] = sd_pipe(**A , negative_prompt=A )
snake_case : Dict = output.images
snake_case : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
snake_case : List[Any] = np.array([0.75_11, 0.36_42, 0.45_53, 0.62_36, 0.57_97, 0.50_13, 0.43_43, 0.56_11, 0.48_31] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def UpperCAmelCase ( self ) -> List[Any]:
snake_case : Dict = """cpu""" # ensure determinism for the device-dependent torch.Generator
snake_case : str = self.get_dummy_components()
snake_case : Dict = StableDiffusionInstructPixaPixPipeline(**A )
snake_case : List[Any] = sd_pipe.to(A )
sd_pipe.set_progress_bar_config(disable=A )
snake_case : Dict = self.get_dummy_inputs(A )
snake_case : Dict = [inputs["""prompt"""]] * 2
snake_case : int = np.array(inputs["""image"""] ).astype(np.floataa ) / 2_55.0
snake_case : str = torch.from_numpy(A ).unsqueeze(0 ).to(A )
snake_case : Union[str, Any] = image / 2 + 0.5
snake_case : int = image.permute(0 , 3 , 1 , 2 )
snake_case : Optional[Any] = image.repeat(2 , 1 , 1 , 1 )
snake_case : Dict = sd_pipe(**A ).images
snake_case : str = image[-1, -3:, -3:, -1]
assert image.shape == (2, 3_2, 3_2, 3)
snake_case : List[str] = np.array([0.58_12, 0.57_48, 0.52_22, 0.59_08, 0.56_95, 0.71_74, 0.68_04, 0.55_23, 0.55_79] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def UpperCAmelCase ( self ) -> List[Any]:
snake_case : Optional[Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
snake_case : str = self.get_dummy_components()
snake_case : Any = EulerAncestralDiscreteScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="""scaled_linear""" )
snake_case : Dict = StableDiffusionInstructPixaPixPipeline(**A )
snake_case : List[str] = sd_pipe.to(A )
sd_pipe.set_progress_bar_config(disable=A )
snake_case : Optional[int] = self.get_dummy_inputs(A )
snake_case : List[str] = sd_pipe(**A ).images
snake_case : Dict = image[0, -3:, -3:, -1]
snake_case : int = [round(A , 4 ) for x in image_slice.flatten().tolist()]
print(""",""".join([str(A ) for x in slice] ) )
assert image.shape == (1, 3_2, 3_2, 3)
snake_case : Tuple = np.array([0.74_17, 0.38_42, 0.47_32, 0.57_76, 0.58_91, 0.51_39, 0.40_52, 0.56_73, 0.49_86] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def UpperCAmelCase ( self ) -> Tuple:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def UpperCAmelCase ( self ) -> List[str]:
snake_case : Optional[Any] = self.get_dummy_components()
snake_case : Optional[int] = StableDiffusionInstructPixaPixPipeline(**A )
snake_case : Tuple = VaeImageProcessor(do_resize=A , do_normalize=A )
snake_case : Union[str, Any] = pipe.to(A )
pipe.set_progress_bar_config(disable=A )
snake_case : Optional[Any] = pipe(**self.get_dummy_inputs_by_type(A , input_image_type="""pt""" ) )[0]
snake_case : Dict = components["""vae"""]
snake_case : Optional[Any] = self.get_dummy_inputs_by_type(A , input_image_type="""pt""" )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
snake_case : Any = vae.encode(inputs[image_param] ).latent_dist.mode()
snake_case : Tuple = pipe(**A )[0]
snake_case : str = np.abs(out - out_latents_inputs ).max()
self.assertLess(A , 1e-4 , """passing latents as image input generate different result from passing image""" )
@slow
@require_torch_gpu
class __lowercase (unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> str:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase ( self , A=0 ) -> Optional[Any]:
snake_case : List[str] = torch.manual_seed(A )
snake_case : int = load_image(
"""https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg""" )
snake_case : Union[str, Any] = {
"""prompt""": """turn him into a cyborg""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""image_guidance_scale""": 1.0,
"""output_type""": """numpy""",
}
return inputs
def UpperCAmelCase ( self ) -> Tuple:
snake_case : Any = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=A )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
pipe.enable_attention_slicing()
snake_case : Dict = self.get_inputs()
snake_case : Optional[Any] = pipe(**A ).images
snake_case : Dict = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
snake_case : Union[str, Any] = np.array([0.59_02, 0.60_15, 0.60_27, 0.59_83, 0.60_92, 0.60_61, 0.57_65, 0.57_85, 0.55_55] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def UpperCAmelCase ( self ) -> Optional[int]:
snake_case : Tuple = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=A )
snake_case : Optional[Any] = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
pipe.enable_attention_slicing()
snake_case : List[str] = self.get_inputs()
snake_case : List[Any] = pipe(**A ).images
snake_case : List[str] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
snake_case : str = np.array([0.65_78, 0.68_17, 0.69_72, 0.67_61, 0.68_56, 0.69_16, 0.64_28, 0.65_16, 0.63_01] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def UpperCAmelCase ( self ) -> str:
snake_case : str = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=A )
snake_case : Dict = DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
pipe.enable_attention_slicing()
snake_case : Union[str, Any] = self.get_inputs()
snake_case : str = pipe(**A ).images
snake_case : int = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
snake_case : List[Any] = np.array([0.38_28, 0.38_34, 0.38_18, 0.37_92, 0.38_65, 0.37_52, 0.37_92, 0.38_47, 0.37_53] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def UpperCAmelCase ( self ) -> str:
snake_case : Any = 0
def callback_fn(A , A , A ) -> None:
snake_case : Tuple = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
snake_case : str = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 6_4, 6_4)
snake_case : Union[str, Any] = latents[0, -3:, -3:, -1]
snake_case : int = np.array([-0.24_63, -0.46_44, -0.97_56, 1.51_76, 1.44_14, 0.78_66, 0.98_97, 0.85_21, 0.79_83] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
snake_case : Optional[Any] = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 6_4, 6_4)
snake_case : List[str] = latents[0, -3:, -3:, -1]
snake_case : Tuple = np.array([-0.26_44, -0.46_26, -0.96_53, 1.51_76, 1.45_51, 0.76_86, 0.98_05, 0.84_52, 0.81_15] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
snake_case : Optional[Any] = False
snake_case : Tuple = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=A , torch_dtype=torch.floataa )
snake_case : Any = pipe.to(A )
pipe.set_progress_bar_config(disable=A )
pipe.enable_attention_slicing()
snake_case : str = self.get_inputs()
pipe(**A , callback=A , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def UpperCAmelCase ( self ) -> Optional[int]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
snake_case : Optional[Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=A , torch_dtype=torch.floataa )
snake_case : List[str] = pipe.to(A )
pipe.set_progress_bar_config(disable=A )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
snake_case : Optional[int] = self.get_inputs()
snake_case : Dict = pipe(**A )
snake_case : Optional[Any] = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 1_0**9
def UpperCAmelCase ( self ) -> Any:
snake_case : List[str] = self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
snake_case : List[str] = inputs["""image"""].resize((5_0_4, 5_0_4) )
snake_case : Optional[Any] = """timbrooks/instruct-pix2pix"""
snake_case : Dict = StableDiffusionInstructPixaPixPipeline.from_pretrained(
A , safety_checker=A , )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
pipe.enable_attention_slicing()
snake_case : int = pipe(**A )
snake_case : Dict = output.images[0]
snake_case : Any = image[2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert image.shape == (5_0_4, 5_0_4, 3)
snake_case : Tuple = np.array([0.27_26, 0.25_29, 0.26_64, 0.26_55, 0.26_41, 0.26_42, 0.25_91, 0.26_49, 0.25_90] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
| 684 |
import os
def SCREAMING_SNAKE_CASE__ ( ) -> Dict:
with open(os.path.dirname(lowercase ) + """/grid.txt""" ) as f:
snake_case : Tuple = [] # noqa: E741
for _ in range(20 ):
l.append([int(lowercase ) for x in f.readline().split()] )
snake_case : Optional[Any] = 0
# right
for i in range(20 ):
for j in range(17 ):
snake_case : List[Any] = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
snake_case : Tuple = temp
# down
for i in range(17 ):
for j in range(20 ):
snake_case : Any = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
snake_case : str = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
snake_case : int = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
snake_case : int = temp
# diagonal 2
for i in range(17 ):
for j in range(3 ,20 ):
snake_case : Any = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
snake_case : Any = temp
return maximum
if __name__ == "__main__":
print(solution())
| 684 | 1 |
import json
import os
import re
import unicodedata
from json.encoder import INFINITY
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import regex
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging
from ...utils.generic import _is_jax, _is_numpy
lowerCamelCase : Tuple = logging.get_logger(__name__)
lowerCamelCase : Any = {
'artists_file': 'artists.json',
'lyrics_file': 'lyrics.json',
'genres_file': 'genres.json',
}
lowerCamelCase : Any = {
'artists_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json',
},
'genres_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json',
},
'lyrics_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json',
},
}
lowerCamelCase : Optional[int] = {
'jukebox': 5_1_2,
}
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = VOCAB_FILES_NAMES
_snake_case = PRETRAINED_VOCAB_FILES_MAP
_snake_case = PRETRAINED_LYRIC_TOKENS_SIZES
_snake_case = ["""input_ids""", """attention_mask"""]
def __init__( self , A , A , A , A=["v3", "v2", "v2"] , A=5_1_2 , A=5 , A="<|endoftext|>" , **A , ) -> Optional[Any]:
snake_case : Dict = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else unk_token
super().__init__(
unk_token=A , n_genres=A , version=A , max_n_lyric_tokens=A , **A , )
snake_case : Optional[Any] = version
snake_case : Optional[Any] = max_n_lyric_tokens
snake_case : Tuple = n_genres
with open(A , encoding="""utf-8""" ) as vocab_handle:
snake_case : Union[str, Any] = json.load(A )
with open(A , encoding="""utf-8""" ) as vocab_handle:
snake_case : str = json.load(A )
with open(A , encoding="""utf-8""" ) as vocab_handle:
snake_case : List[str] = json.load(A )
snake_case : Tuple = r"""[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+"""
# In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters.
if len(self.lyrics_encoder ) == 7_9:
snake_case : Optional[Any] = oov.replace(r"""\-'""" , r"""\-+'""" )
snake_case : Optional[Any] = regex.compile(A )
snake_case : Optional[Any] = {v: k for k, v in self.artists_encoder.items()}
snake_case : int = {v: k for k, v in self.genres_encoder.items()}
snake_case : List[Any] = {v: k for k, v in self.lyrics_encoder.items()}
@property
def UpperCAmelCase ( self ) -> Optional[Any]:
return len(self.artists_encoder ) + len(self.genres_encoder ) + len(self.lyrics_encoder )
def UpperCAmelCase ( self ) -> str:
return dict(self.artists_encoder , self.genres_encoder , self.lyrics_encoder )
def UpperCAmelCase ( self , A , A , A ) -> Optional[Any]:
snake_case : Optional[int] = [self.artists_encoder.get(A , 0 ) for artist in list_artists]
for genres in range(len(A ) ):
snake_case : Optional[int] = [self.genres_encoder.get(A , 0 ) for genre in list_genres[genres]]
snake_case : Union[str, Any] = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres] ))
snake_case : Optional[Any] = [[self.lyrics_encoder.get(A , 0 ) for character in list_lyrics[0]], [], []]
return artists_id, list_genres, lyric_ids
def UpperCAmelCase ( self , A ) -> List[str]:
return list(A )
def UpperCAmelCase ( self , A , A , A , **A ) -> List[str]:
snake_case , snake_case , snake_case : Any = self.prepare_for_tokenization(A , A , A )
snake_case : Tuple = self._tokenize(A )
return artist, genre, lyrics
def UpperCAmelCase ( self , A , A , A , A = False ) -> Tuple[str, str, str, Dict[str, Any]]:
for idx in range(len(self.version ) ):
if self.version[idx] == "v3":
snake_case : Tuple = artists[idx].lower()
snake_case : List[Any] = [genres[idx].lower()]
else:
snake_case : Union[str, Any] = self._normalize(artists[idx] ) + """.v2"""
snake_case : Any = [
self._normalize(A ) + """.v2""" for genre in genres[idx].split("""_""" )
] # split is for the full dictionary with combined genres
if self.version[0] == "v2":
snake_case : str = regex.compile(r"""[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+""" )
snake_case : Dict = """ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+'\"()[] \t\n"""
snake_case : Union[str, Any] = {vocab[index]: index + 1 for index in range(len(A ) )}
snake_case : Optional[int] = 0
snake_case : Union[str, Any] = len(A ) + 1
snake_case : Optional[int] = self.vocab
snake_case : str = {v: k for k, v in self.vocab.items()}
snake_case : int = """"""
else:
snake_case : Optional[int] = regex.compile(r"""[^A-Za-z0-9.,:;!?\-+'\"()\[\] \t\n]+""" )
snake_case : int = self._run_strip_accents(A )
snake_case : Any = lyrics.replace("""\\""" , """\n""" )
snake_case : Tuple = self.out_of_vocab.sub("""""" , A ), [], []
return artists, genres, lyrics
def UpperCAmelCase ( self , A ) -> List[Any]:
snake_case : int = unicodedata.normalize("""NFD""" , A )
snake_case : int = []
for char in text:
snake_case : Optional[Any] = unicodedata.category(A )
if cat == "Mn":
continue
output.append(A )
return "".join(A )
def UpperCAmelCase ( self , A ) -> str:
snake_case : Dict = (
[chr(A ) for i in range(ord("""a""" ) , ord("""z""" ) + 1 )]
+ [chr(A ) for i in range(ord("""A""" ) , ord("""Z""" ) + 1 )]
+ [chr(A ) for i in range(ord("""0""" ) , ord("""9""" ) + 1 )]
+ ["""."""]
)
snake_case : Dict = frozenset(A )
snake_case : Dict = re.compile(r"""_+""" )
snake_case : str = """""".join([c if c in accepted else """_""" for c in text.lower()] )
snake_case : List[Any] = pattern.sub("""_""" , A ).strip("""_""" )
return text
def UpperCAmelCase ( self , A ) -> str:
return " ".join(A )
def UpperCAmelCase ( self , A , A = None , A = False ) -> List[Any]:
# Convert to TensorType
if not isinstance(A , A ):
snake_case : Tuple = TensorType(A )
# Get a function reference for the correct framework
if tensor_type == TensorType.TENSORFLOW:
if not is_tf_available():
raise ImportError(
"""Unable to convert output to TensorFlow tensors format, TensorFlow is not installed.""" )
import tensorflow as tf
snake_case : Union[str, Any] = tf.constant
snake_case : int = tf.is_tensor
elif tensor_type == TensorType.PYTORCH:
if not is_torch_available():
raise ImportError("""Unable to convert output to PyTorch tensors format, PyTorch is not installed.""" )
import torch
snake_case : List[str] = torch.tensor
snake_case : Optional[Any] = torch.is_tensor
elif tensor_type == TensorType.JAX:
if not is_flax_available():
raise ImportError("""Unable to convert output to JAX tensors format, JAX is not installed.""" )
import jax.numpy as jnp # noqa: F811
snake_case : Optional[int] = jnp.array
snake_case : Dict = _is_jax
else:
snake_case : List[str] = np.asarray
snake_case : Tuple = _is_numpy
# Do the tensor conversion in batch
try:
if prepend_batch_axis:
snake_case : Any = [inputs]
if not is_tensor(A ):
snake_case : List[Any] = as_tensor(A )
except: # noqa E722
raise ValueError(
"""Unable to create tensor, you should probably activate truncation and/or padding """
"""with 'padding=True' 'truncation=True' to have batched tensors with the same length.""" )
return inputs
def __call__( self , A , A , A="" , A="pt" ) -> BatchEncoding:
snake_case : List[str] = [0, 0, 0]
snake_case : List[str] = [artist] * len(self.version )
snake_case : List[Any] = [genres] * len(self.version )
snake_case , snake_case , snake_case : Optional[int] = self.tokenize(A , A , A )
snake_case , snake_case , snake_case : int = self._convert_token_to_id(A , A , A )
snake_case : Any = [-INFINITY] * len(full_tokens[-1] )
snake_case : int = [
self.convert_to_tensors(
[input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] , tensor_type=A )
for i in range(len(self.version ) )
]
return BatchEncoding({"""input_ids""": input_ids, """attention_masks""": attention_masks} )
def UpperCAmelCase ( self , A , A = None ) -> Tuple[str]:
if not os.path.isdir(A ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case : Any = os.path.join(
A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""artists_file"""] )
with open(A , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.artists_encoder , ensure_ascii=A ) )
snake_case : Any = os.path.join(
A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""genres_file"""] )
with open(A , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.genres_encoder , ensure_ascii=A ) )
snake_case : Tuple = os.path.join(
A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""lyrics_file"""] )
with open(A , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.lyrics_encoder , ensure_ascii=A ) )
return (artists_file, genres_file, lyrics_file)
def UpperCAmelCase ( self , A , A , A ) -> List[Any]:
snake_case : Optional[int] = self.artists_decoder.get(A )
snake_case : Optional[Any] = [self.genres_decoder.get(A ) for genre in genres_index]
snake_case : Optional[int] = [self.lyrics_decoder.get(A ) for character in lyric_index]
return artist, genres, lyrics
| 684 |
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> list:
for i in range(len(lowercase ) - 1 ,0 ,-1 ):
snake_case : Any = False
for j in range(lowercase ,0 ,-1 ):
if unsorted[j] < unsorted[j - 1]:
snake_case , snake_case : Optional[Any] = unsorted[j - 1], unsorted[j]
snake_case : Dict = True
for j in range(lowercase ):
if unsorted[j] > unsorted[j + 1]:
snake_case , snake_case : Dict = unsorted[j + 1], unsorted[j]
snake_case : Tuple = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase : Any = input('Enter numbers separated by a comma:\n').strip()
lowerCamelCase : Optional[int] = [int(item) for item in user_input.split(',')]
print(f"""{cocktail_shaker_sort(unsorted) = }""")
| 684 | 1 |
from __future__ import annotations
class __lowercase :
"""simple docstring"""
def __init__( self , A ) -> None:
snake_case : Optional[Any] = order
# a_{0} ... a_{k}
snake_case : Dict = [1.0] + [0.0] * order
# b_{0} ... b_{k}
snake_case : List[str] = [1.0] + [0.0] * order
# x[n-1] ... x[n-k]
snake_case : List[Any] = [0.0] * self.order
# y[n-1] ... y[n-k]
snake_case : int = [0.0] * self.order
def UpperCAmelCase ( self , A , A ) -> None:
if len(A ) < self.order:
snake_case : int = [1.0, *a_coeffs]
if len(A ) != self.order + 1:
snake_case : Union[str, Any] = (
f"""Expected a_coeffs to have {self.order + 1} elements """
f"""for {self.order}-order filter, got {len(A )}"""
)
raise ValueError(A )
if len(A ) != self.order + 1:
snake_case : int = (
f"""Expected b_coeffs to have {self.order + 1} elements """
f"""for {self.order}-order filter, got {len(A )}"""
)
raise ValueError(A )
snake_case : Optional[Any] = a_coeffs
snake_case : List[str] = b_coeffs
def UpperCAmelCase ( self , A ) -> float:
snake_case : Optional[int] = 0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
snake_case : Union[str, Any] = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
snake_case : Tuple = self.input_history[:-1]
snake_case : int = self.output_history[:-1]
snake_case : Dict = sample
snake_case : Optional[Any] = result
return result
| 684 |
import json
import os
import re
import unicodedata
from json.encoder import INFINITY
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import regex
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging
from ...utils.generic import _is_jax, _is_numpy
lowerCamelCase : Tuple = logging.get_logger(__name__)
lowerCamelCase : Any = {
'artists_file': 'artists.json',
'lyrics_file': 'lyrics.json',
'genres_file': 'genres.json',
}
lowerCamelCase : Any = {
'artists_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json',
},
'genres_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json',
},
'lyrics_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json',
},
}
lowerCamelCase : Optional[int] = {
'jukebox': 5_1_2,
}
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = VOCAB_FILES_NAMES
_snake_case = PRETRAINED_VOCAB_FILES_MAP
_snake_case = PRETRAINED_LYRIC_TOKENS_SIZES
_snake_case = ["""input_ids""", """attention_mask"""]
def __init__( self , A , A , A , A=["v3", "v2", "v2"] , A=5_1_2 , A=5 , A="<|endoftext|>" , **A , ) -> Optional[Any]:
snake_case : Dict = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else unk_token
super().__init__(
unk_token=A , n_genres=A , version=A , max_n_lyric_tokens=A , **A , )
snake_case : Optional[Any] = version
snake_case : Optional[Any] = max_n_lyric_tokens
snake_case : Tuple = n_genres
with open(A , encoding="""utf-8""" ) as vocab_handle:
snake_case : Union[str, Any] = json.load(A )
with open(A , encoding="""utf-8""" ) as vocab_handle:
snake_case : str = json.load(A )
with open(A , encoding="""utf-8""" ) as vocab_handle:
snake_case : List[str] = json.load(A )
snake_case : Tuple = r"""[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+"""
# In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters.
if len(self.lyrics_encoder ) == 7_9:
snake_case : Optional[Any] = oov.replace(r"""\-'""" , r"""\-+'""" )
snake_case : Optional[Any] = regex.compile(A )
snake_case : Optional[Any] = {v: k for k, v in self.artists_encoder.items()}
snake_case : int = {v: k for k, v in self.genres_encoder.items()}
snake_case : List[Any] = {v: k for k, v in self.lyrics_encoder.items()}
@property
def UpperCAmelCase ( self ) -> Optional[Any]:
return len(self.artists_encoder ) + len(self.genres_encoder ) + len(self.lyrics_encoder )
def UpperCAmelCase ( self ) -> str:
return dict(self.artists_encoder , self.genres_encoder , self.lyrics_encoder )
def UpperCAmelCase ( self , A , A , A ) -> Optional[Any]:
snake_case : Optional[int] = [self.artists_encoder.get(A , 0 ) for artist in list_artists]
for genres in range(len(A ) ):
snake_case : Optional[int] = [self.genres_encoder.get(A , 0 ) for genre in list_genres[genres]]
snake_case : Union[str, Any] = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres] ))
snake_case : Optional[Any] = [[self.lyrics_encoder.get(A , 0 ) for character in list_lyrics[0]], [], []]
return artists_id, list_genres, lyric_ids
def UpperCAmelCase ( self , A ) -> List[str]:
return list(A )
def UpperCAmelCase ( self , A , A , A , **A ) -> List[str]:
snake_case , snake_case , snake_case : Any = self.prepare_for_tokenization(A , A , A )
snake_case : Tuple = self._tokenize(A )
return artist, genre, lyrics
def UpperCAmelCase ( self , A , A , A , A = False ) -> Tuple[str, str, str, Dict[str, Any]]:
for idx in range(len(self.version ) ):
if self.version[idx] == "v3":
snake_case : Tuple = artists[idx].lower()
snake_case : List[Any] = [genres[idx].lower()]
else:
snake_case : Union[str, Any] = self._normalize(artists[idx] ) + """.v2"""
snake_case : Any = [
self._normalize(A ) + """.v2""" for genre in genres[idx].split("""_""" )
] # split is for the full dictionary with combined genres
if self.version[0] == "v2":
snake_case : str = regex.compile(r"""[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+""" )
snake_case : Dict = """ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+'\"()[] \t\n"""
snake_case : Union[str, Any] = {vocab[index]: index + 1 for index in range(len(A ) )}
snake_case : Optional[int] = 0
snake_case : Union[str, Any] = len(A ) + 1
snake_case : Optional[int] = self.vocab
snake_case : str = {v: k for k, v in self.vocab.items()}
snake_case : int = """"""
else:
snake_case : Optional[int] = regex.compile(r"""[^A-Za-z0-9.,:;!?\-+'\"()\[\] \t\n]+""" )
snake_case : int = self._run_strip_accents(A )
snake_case : Any = lyrics.replace("""\\""" , """\n""" )
snake_case : Tuple = self.out_of_vocab.sub("""""" , A ), [], []
return artists, genres, lyrics
def UpperCAmelCase ( self , A ) -> List[Any]:
snake_case : int = unicodedata.normalize("""NFD""" , A )
snake_case : int = []
for char in text:
snake_case : Optional[Any] = unicodedata.category(A )
if cat == "Mn":
continue
output.append(A )
return "".join(A )
def UpperCAmelCase ( self , A ) -> str:
snake_case : Dict = (
[chr(A ) for i in range(ord("""a""" ) , ord("""z""" ) + 1 )]
+ [chr(A ) for i in range(ord("""A""" ) , ord("""Z""" ) + 1 )]
+ [chr(A ) for i in range(ord("""0""" ) , ord("""9""" ) + 1 )]
+ ["""."""]
)
snake_case : Dict = frozenset(A )
snake_case : Dict = re.compile(r"""_+""" )
snake_case : str = """""".join([c if c in accepted else """_""" for c in text.lower()] )
snake_case : List[Any] = pattern.sub("""_""" , A ).strip("""_""" )
return text
def UpperCAmelCase ( self , A ) -> str:
return " ".join(A )
def UpperCAmelCase ( self , A , A = None , A = False ) -> List[Any]:
# Convert to TensorType
if not isinstance(A , A ):
snake_case : Tuple = TensorType(A )
# Get a function reference for the correct framework
if tensor_type == TensorType.TENSORFLOW:
if not is_tf_available():
raise ImportError(
"""Unable to convert output to TensorFlow tensors format, TensorFlow is not installed.""" )
import tensorflow as tf
snake_case : Union[str, Any] = tf.constant
snake_case : int = tf.is_tensor
elif tensor_type == TensorType.PYTORCH:
if not is_torch_available():
raise ImportError("""Unable to convert output to PyTorch tensors format, PyTorch is not installed.""" )
import torch
snake_case : List[str] = torch.tensor
snake_case : Optional[Any] = torch.is_tensor
elif tensor_type == TensorType.JAX:
if not is_flax_available():
raise ImportError("""Unable to convert output to JAX tensors format, JAX is not installed.""" )
import jax.numpy as jnp # noqa: F811
snake_case : Optional[int] = jnp.array
snake_case : Dict = _is_jax
else:
snake_case : List[str] = np.asarray
snake_case : Tuple = _is_numpy
# Do the tensor conversion in batch
try:
if prepend_batch_axis:
snake_case : Any = [inputs]
if not is_tensor(A ):
snake_case : List[Any] = as_tensor(A )
except: # noqa E722
raise ValueError(
"""Unable to create tensor, you should probably activate truncation and/or padding """
"""with 'padding=True' 'truncation=True' to have batched tensors with the same length.""" )
return inputs
def __call__( self , A , A , A="" , A="pt" ) -> BatchEncoding:
snake_case : List[str] = [0, 0, 0]
snake_case : List[str] = [artist] * len(self.version )
snake_case : List[Any] = [genres] * len(self.version )
snake_case , snake_case , snake_case : Optional[int] = self.tokenize(A , A , A )
snake_case , snake_case , snake_case : int = self._convert_token_to_id(A , A , A )
snake_case : Any = [-INFINITY] * len(full_tokens[-1] )
snake_case : int = [
self.convert_to_tensors(
[input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] , tensor_type=A )
for i in range(len(self.version ) )
]
return BatchEncoding({"""input_ids""": input_ids, """attention_masks""": attention_masks} )
def UpperCAmelCase ( self , A , A = None ) -> Tuple[str]:
if not os.path.isdir(A ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case : Any = os.path.join(
A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""artists_file"""] )
with open(A , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.artists_encoder , ensure_ascii=A ) )
snake_case : Any = os.path.join(
A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""genres_file"""] )
with open(A , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.genres_encoder , ensure_ascii=A ) )
snake_case : Tuple = os.path.join(
A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""lyrics_file"""] )
with open(A , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.lyrics_encoder , ensure_ascii=A ) )
return (artists_file, genres_file, lyrics_file)
def UpperCAmelCase ( self , A , A , A ) -> List[Any]:
snake_case : Optional[int] = self.artists_decoder.get(A )
snake_case : Optional[Any] = [self.genres_decoder.get(A ) for genre in genres_index]
snake_case : Optional[int] = [self.lyrics_decoder.get(A ) for character in lyric_index]
return artist, genres, lyrics
| 684 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : List[str] = logging.get_logger(__name__)
lowerCamelCase : str = {
'caidas/swin2sr-classicalsr-x2-64': (
'https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json'
),
}
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """swin2sr"""
_snake_case = {
"""hidden_size""": """embed_dim""",
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self , A=6_4 , A=1 , A=3 , A=1_8_0 , A=[6, 6, 6, 6, 6, 6] , A=[6, 6, 6, 6, 6, 6] , A=8 , A=2.0 , A=True , A=0.0 , A=0.0 , A=0.1 , A="gelu" , A=False , A=0.02 , A=1e-5 , A=2 , A=1.0 , A="1conv" , A="pixelshuffle" , **A , ) -> Optional[Any]:
super().__init__(**A )
snake_case : int = image_size
snake_case : Tuple = patch_size
snake_case : Union[str, Any] = num_channels
snake_case : str = embed_dim
snake_case : List[str] = depths
snake_case : Dict = len(A )
snake_case : Any = num_heads
snake_case : List[str] = window_size
snake_case : Tuple = mlp_ratio
snake_case : int = qkv_bias
snake_case : int = hidden_dropout_prob
snake_case : List[str] = attention_probs_dropout_prob
snake_case : int = drop_path_rate
snake_case : List[Any] = hidden_act
snake_case : List[Any] = use_absolute_embeddings
snake_case : List[Any] = layer_norm_eps
snake_case : Tuple = initializer_range
snake_case : Any = upscale
snake_case : Union[str, Any] = img_range
snake_case : Optional[Any] = resi_connection
snake_case : Tuple = upsampler
| 684 |
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> list:
snake_case : str = len(lowercase )
snake_case : Tuple = []
for i in range(len(lowercase ) - pat_len + 1 ):
snake_case : str = True
for j in range(lowercase ):
if s[i + j] != pattern[j]:
snake_case : Dict = False
break
if match_found:
position.append(lowercase )
return position
if __name__ == "__main__":
assert naive_pattern_search('ABCDEFG', 'DE') == [3]
print(naive_pattern_search('ABAAABCDBBABCDDEBCABC', 'ABC'))
| 684 | 1 |
import math
from datetime import datetime, timedelta
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> datetime:
snake_case : Dict = year % 19
snake_case : Optional[int] = year % 4
snake_case : List[str] = year % 7
snake_case : int = math.floor(year / 100 )
snake_case : Union[str, Any] = math.floor((13 + 8 * leap_day_inhibits) / 25 )
snake_case : Optional[int] = leap_day_inhibits / 4
snake_case : str = (
15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 30
snake_case : Optional[Any] = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
snake_case : Any = (19 * metonic_cycle + secular_moon_shift) % 30
# PHM -> Paschal Full Moon
snake_case : str = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 29 and days_from_phm_to_sunday == 6:
return datetime(lowercase ,4 ,19 )
elif days_to_add == 28 and days_from_phm_to_sunday == 6:
return datetime(lowercase ,4 ,18 )
else:
return datetime(lowercase ,3 ,22 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (1_9_9_4, 2_0_0_0, 2_0_1_0, 2_0_2_1, 2_0_2_3):
lowerCamelCase : List[str] = 'will be' if year > datetime.now().year else 'was'
print(f"""Easter in {year} {tense} {gauss_easter(year)}""")
| 684 |
import numpy as np
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> np.array:
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 684 | 1 |
import logging
from transformers.configuration_utils import PretrainedConfig
lowerCamelCase : Any = logging.getLogger(__name__)
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """masked_bert"""
def __init__( self , A=3_0_5_2_2 , A=7_6_8 , A=1_2 , A=1_2 , A=3_0_7_2 , A="gelu" , A=0.1 , A=0.1 , A=5_1_2 , A=2 , A=0.02 , A=1e-1_2 , A=0 , A="topK" , A="constant" , A=0.0 , **A , ) -> Union[str, Any]:
super().__init__(pad_token_id=A , **A )
snake_case : Union[str, Any] = vocab_size
snake_case : int = hidden_size
snake_case : Optional[int] = num_hidden_layers
snake_case : Optional[int] = num_attention_heads
snake_case : Union[str, Any] = hidden_act
snake_case : Union[str, Any] = intermediate_size
snake_case : str = hidden_dropout_prob
snake_case : Any = attention_probs_dropout_prob
snake_case : List[str] = max_position_embeddings
snake_case : Optional[Any] = type_vocab_size
snake_case : Tuple = initializer_range
snake_case : List[Any] = layer_norm_eps
snake_case : Optional[Any] = pruning_method
snake_case : Any = mask_init
snake_case : List[Any] = mask_scale
| 684 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCamelCase : Tuple = {'configuration_vit_mae': ['VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMAEConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : int = [
'VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMAEForPreTraining',
'ViTMAELayer',
'ViTMAEModel',
'ViTMAEPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Dict = [
'TFViTMAEForPreTraining',
'TFViTMAEModel',
'TFViTMAEPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
lowerCamelCase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 684 | 1 |
from __future__ import annotations
from typing import Any
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> int:
if not postfix_notation:
return 0
snake_case : Optional[Any] = {"""+""", """-""", """*""", """/"""}
snake_case : list[Any] = []
for token in postfix_notation:
if token in operations:
snake_case , snake_case : int = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(lowercase ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 684 |
lowerCamelCase : Union[str, Any] = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
lowerCamelCase : Tuple = [{'type': 'code', 'content': INSTALL_CONTENT}]
lowerCamelCase : Union[str, Any] = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 684 | 1 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import evaluate
import numpy as np
import torch
from datasets import load_dataset
from PIL import Image
from torchvision.transforms import (
CenterCrop,
Compose,
Normalize,
RandomHorizontalFlip,
RandomResizedCrop,
Resize,
ToTensor,
)
import transformers
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForImageClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
lowerCamelCase : str = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-classification/requirements.txt')
lowerCamelCase : Tuple = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys())
lowerCamelCase : List[str] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Optional[int]:
with open(lowercase ,"""rb""" ) as f:
snake_case : Optional[int] = Image.open(lowercase )
return im.convert("""RGB""" )
@dataclass
class __lowercase :
"""simple docstring"""
_snake_case = field(
default=UpperCamelCase__ , metadata={
"""help""": """Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub)."""
} , )
_snake_case = field(
default=UpperCamelCase__ , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
_snake_case = field(default=UpperCamelCase__ , metadata={"""help""": """A folder containing the training data."""} )
_snake_case = field(default=UpperCamelCase__ , metadata={"""help""": """A folder containing the validation data."""} )
_snake_case = field(
default=0.15 , metadata={"""help""": """Percent to split off of train for validation."""} )
_snake_case = field(
default=UpperCamelCase__ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
_snake_case = field(
default=UpperCamelCase__ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def UpperCAmelCase ( self ) -> Any:
if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None):
raise ValueError(
"""You must specify either a dataset name from the hub or a train and/or validation directory.""" )
@dataclass
class __lowercase :
"""simple docstring"""
_snake_case = field(
default="""google/vit-base-patch16-224-in21k""" , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} , )
_snake_case = field(
default=UpperCamelCase__ , metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(UpperCamelCase__ )} , )
_snake_case = field(
default=UpperCamelCase__ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
_snake_case = field(
default=UpperCamelCase__ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from s3"""} )
_snake_case = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
_snake_case = field(default=UpperCamelCase__ , metadata={"""help""": """Name or path of preprocessor config."""} )
_snake_case = field(
default=UpperCamelCase__ , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
_snake_case = field(
default=UpperCamelCase__ , metadata={"""help""": """Will enable to load a pretrained model whose head dimensions are different."""} , )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> List[str]:
snake_case : str = torch.stack([example["""pixel_values"""] for example in examples] )
snake_case : str = torch.tensor([example["""labels"""] for example in examples] )
return {"pixel_values": pixel_values, "labels": labels}
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[int]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
snake_case : Dict = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
snake_case , snake_case , snake_case : Tuple = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
snake_case , snake_case , snake_case : Dict = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_image_classification""" ,lowercase ,lowercase )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" ,datefmt="""%m/%d/%Y %H:%M:%S""" ,handlers=[logging.StreamHandler(sys.stdout )] ,)
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
snake_case : List[str] = training_args.get_process_log_level()
logger.setLevel(lowercase )
transformers.utils.logging.set_verbosity(lowercase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
snake_case : Tuple = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
snake_case : Dict = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Initialize our dataset and prepare it for the 'image-classification' task.
if data_args.dataset_name is not None:
snake_case : Union[str, Any] = load_dataset(
data_args.dataset_name ,data_args.dataset_config_name ,cache_dir=model_args.cache_dir ,task="""image-classification""" ,use_auth_token=True if model_args.use_auth_token else None ,)
else:
snake_case : List[str] = {}
if data_args.train_dir is not None:
snake_case : int = os.path.join(data_args.train_dir ,"""**""" )
if data_args.validation_dir is not None:
snake_case : Optional[int] = os.path.join(data_args.validation_dir ,"""**""" )
snake_case : Optional[Any] = load_dataset(
"""imagefolder""" ,data_files=lowercase ,cache_dir=model_args.cache_dir ,task="""image-classification""" ,)
# If we don't have a validation split, split off a percentage of train as validation.
snake_case : List[Any] = None if """validation""" in dataset.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split ,lowercase ) and data_args.train_val_split > 0.0:
snake_case : Optional[int] = dataset["""train"""].train_test_split(data_args.train_val_split )
snake_case : List[Any] = split["""train"""]
snake_case : int = split["""test"""]
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
snake_case : List[str] = dataset["""train"""].features["""labels"""].names
snake_case , snake_case : Optional[int] = {}, {}
for i, label in enumerate(lowercase ):
snake_case : Dict = str(lowercase )
snake_case : Union[str, Any] = label
# Load the accuracy metric from the datasets package
snake_case : Dict = evaluate.load("""accuracy""" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(lowercase ):
return metric.compute(predictions=np.argmax(p.predictions ,axis=1 ) ,references=p.label_ids )
snake_case : int = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path ,num_labels=len(lowercase ) ,labelaid=lowercase ,idalabel=lowercase ,finetuning_task="""image-classification""" ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
snake_case : Tuple = AutoModelForImageClassification.from_pretrained(
model_args.model_name_or_path ,from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) ,config=lowercase ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,ignore_mismatched_sizes=model_args.ignore_mismatched_sizes ,)
snake_case : List[str] = AutoImageProcessor.from_pretrained(
model_args.image_processor_name or model_args.model_name_or_path ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
# Define torchvision transforms to be applied to each image.
if "shortest_edge" in image_processor.size:
snake_case : List[Any] = image_processor.size["""shortest_edge"""]
else:
snake_case : Dict = (image_processor.size["""height"""], image_processor.size["""width"""])
snake_case : List[str] = Normalize(mean=image_processor.image_mean ,std=image_processor.image_std )
snake_case : Dict = Compose(
[
RandomResizedCrop(lowercase ),
RandomHorizontalFlip(),
ToTensor(),
normalize,
] )
snake_case : Optional[Any] = Compose(
[
Resize(lowercase ),
CenterCrop(lowercase ),
ToTensor(),
normalize,
] )
def train_transforms(lowercase ):
snake_case : Union[str, Any] = [
_train_transforms(pil_img.convert("""RGB""" ) ) for pil_img in example_batch["""image"""]
]
return example_batch
def val_transforms(lowercase ):
snake_case : List[Any] = [_val_transforms(pil_img.convert("""RGB""" ) ) for pil_img in example_batch["""image"""]]
return example_batch
if training_args.do_train:
if "train" not in dataset:
raise ValueError("""--do_train requires a train dataset""" )
if data_args.max_train_samples is not None:
snake_case : Optional[Any] = (
dataset["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
dataset["train"].set_transform(lowercase )
if training_args.do_eval:
if "validation" not in dataset:
raise ValueError("""--do_eval requires a validation dataset""" )
if data_args.max_eval_samples is not None:
snake_case : List[str] = (
dataset["""validation"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
dataset["validation"].set_transform(lowercase )
# Initalize our trainer
snake_case : Union[str, Any] = Trainer(
model=lowercase ,args=lowercase ,train_dataset=dataset["""train"""] if training_args.do_train else None ,eval_dataset=dataset["""validation"""] if training_args.do_eval else None ,compute_metrics=lowercase ,tokenizer=lowercase ,data_collator=lowercase ,)
# Training
if training_args.do_train:
snake_case : List[Any] = None
if training_args.resume_from_checkpoint is not None:
snake_case : Optional[Any] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
snake_case : int = last_checkpoint
snake_case : Optional[Any] = trainer.train(resume_from_checkpoint=lowercase )
trainer.save_model()
trainer.log_metrics("""train""" ,train_result.metrics )
trainer.save_metrics("""train""" ,train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
snake_case : Any = trainer.evaluate()
trainer.log_metrics("""eval""" ,lowercase )
trainer.save_metrics("""eval""" ,lowercase )
# Write model card and (optionally) push to hub
snake_case : int = {
"""finetuned_from""": model_args.model_name_or_path,
"""tasks""": """image-classification""",
"""dataset""": data_args.dataset_name,
"""tags""": ["""image-classification""", """vision"""],
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowercase )
else:
trainer.create_model_card(**lowercase )
if __name__ == "__main__":
main()
| 684 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase : Any = logging.get_logger(__name__)
lowerCamelCase : Optional[int] = {'vocab_file': 'spm_char.model'}
lowerCamelCase : List[str] = {
'vocab_file': {
'microsoft/speecht5_asr': 'https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model',
'microsoft/speecht5_tts': 'https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model',
'microsoft/speecht5_vc': 'https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model',
}
}
lowerCamelCase : List[Any] = {
'microsoft/speecht5_asr': 1_0_2_4,
'microsoft/speecht5_tts': 1_0_2_4,
'microsoft/speecht5_vc': 1_0_2_4,
}
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = VOCAB_FILES_NAMES
_snake_case = PRETRAINED_VOCAB_FILES_MAP
_snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case = ["""input_ids""", """attention_mask"""]
def __init__( self , A , A="<s>" , A="</s>" , A="<unk>" , A="<pad>" , A = None , **A , ) -> None:
snake_case : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=A , eos_token=A , unk_token=A , pad_token=A , sp_model_kwargs=self.sp_model_kwargs , **A , )
snake_case : Tuple = vocab_file
snake_case : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(A )
@property
def UpperCAmelCase ( self ) -> List[Any]:
return self.sp_model.get_piece_size()
def UpperCAmelCase ( self ) -> Optional[int]:
snake_case : Any = {self.convert_ids_to_tokens(A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> List[str]:
snake_case : Optional[Any] = self.__dict__.copy()
snake_case : Optional[Any] = None
return state
def __setstate__( self , A ) -> Tuple:
snake_case : Any = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
snake_case : List[Any] = {}
snake_case : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase ( self , A ) -> List[str]:
return self.sp_model.encode(A , out_type=A )
def UpperCAmelCase ( self , A ) -> Tuple:
return self.sp_model.piece_to_id(A )
def UpperCAmelCase ( self , A ) -> int:
snake_case : Union[str, Any] = self.sp_model.IdToPiece(A )
return token
def UpperCAmelCase ( self , A ) -> Tuple:
snake_case : Optional[int] = []
snake_case : str = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(A ) + token
snake_case : Dict = []
else:
current_sub_tokens.append(A )
out_string += self.sp_model.decode(A )
return out_string.strip()
def UpperCAmelCase ( self , A , A=None ) -> List[int]:
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def UpperCAmelCase ( self , A , A = None , A = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A , token_ids_a=A , already_has_special_tokens=A )
snake_case : Any = [1]
if token_ids_a is None:
return ([0] * len(A )) + suffix_ones
return ([0] * len(A )) + ([0] * len(A )) + suffix_ones
def UpperCAmelCase ( self , A , A = None ) -> Tuple[str]:
if not os.path.isdir(A ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case : Optional[Any] = os.path.join(
A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A )
elif not os.path.isfile(self.vocab_file ):
with open(A , """wb""" ) as fi:
snake_case : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(A )
return (out_vocab_file,)
| 684 | 1 |
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
lowerCamelCase : Union[str, Any] = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
'text-classification',
'language-modeling',
'summarization',
'token-classification',
'question-answering',
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
lowerCamelCase : List[str] = logging.getLogger()
def SCREAMING_SNAKE_CASE__ ( ) -> str:
snake_case : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("""-f""" )
snake_case : Optional[int] = parser.parse_args()
return args.f
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase="eval" ) -> int:
snake_case : Union[str, Any] = os.path.join(lowercase ,f"""{split}_results.json""" )
if os.path.exists(lowercase ):
with open(lowercase ,"""r""" ) as f:
return json.load(lowercase )
raise ValueError(f"""can't find {path}""" )
lowerCamelCase : Tuple = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Tuple:
snake_case : int = self.get_auto_remove_tmp_dir()
snake_case : int = f"""
run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--eval_steps=2
--warmup_steps=2
--seed=42
--max_seq_length=128
""".split()
with patch.object(A , """argv""" , A ):
run_flax_glue.main()
snake_case : Union[str, Any] = get_results(A )
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.75 )
@slow
def UpperCAmelCase ( self ) -> Union[str, Any]:
snake_case : Any = self.get_auto_remove_tmp_dir()
snake_case : List[str] = f"""
run_clm_flax.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--block_size 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(A , """argv""" , A ):
run_clm_flax.main()
snake_case : Optional[int] = get_results(A )
self.assertLess(result["""eval_perplexity"""] , 1_0_0 )
@slow
def UpperCAmelCase ( self ) -> Optional[Any]:
snake_case : Union[str, Any] = self.get_auto_remove_tmp_dir()
snake_case : Optional[int] = f"""
run_summarization.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--test_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=8
--do_train
--do_eval
--do_predict
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--predict_with_generate
""".split()
with patch.object(A , """argv""" , A ):
run_summarization_flax.main()
snake_case : Optional[Any] = get_results(A , split="""test""" )
self.assertGreaterEqual(result["""test_rouge1"""] , 1_0 )
self.assertGreaterEqual(result["""test_rouge2"""] , 2 )
self.assertGreaterEqual(result["""test_rougeL"""] , 7 )
self.assertGreaterEqual(result["""test_rougeLsum"""] , 7 )
@slow
def UpperCAmelCase ( self ) -> List[str]:
snake_case : List[Any] = self.get_auto_remove_tmp_dir()
snake_case : str = f"""
run_mlm.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--overwrite_output_dir
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--logging_steps 2 --eval_steps 2
--do_train
--do_eval
--num_train_epochs=1
""".split()
with patch.object(A , """argv""" , A ):
run_mlm_flax.main()
snake_case : Optional[int] = get_results(A )
self.assertLess(result["""eval_perplexity"""] , 4_2 )
@slow
def UpperCAmelCase ( self ) -> Union[str, Any]:
snake_case : Dict = self.get_auto_remove_tmp_dir()
snake_case : Optional[Any] = f"""
run_t5_mlm_flax.py
--model_name_or_path t5-small
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(A , """argv""" , A ):
run_ta_mlm_flax.main()
snake_case : Tuple = get_results(A )
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.42 )
@slow
def UpperCAmelCase ( self ) -> Any:
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
snake_case : Tuple = 7 if get_gpu_count() > 1 else 2
snake_case : str = self.get_auto_remove_tmp_dir()
snake_case : int = f"""
run_flax_ner.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--do_train
--do_eval
--warmup_steps=2
--learning_rate=2e-4
--logging_steps 2 --eval_steps 2
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
""".split()
with patch.object(A , """argv""" , A ):
run_flax_ner.main()
snake_case : Any = get_results(A )
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.75 )
self.assertGreaterEqual(result["""eval_f1"""] , 0.3 )
@slow
def UpperCAmelCase ( self ) -> Union[str, Any]:
snake_case : List[str] = self.get_auto_remove_tmp_dir()
snake_case : Optional[int] = f"""
run_qa.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=2
--do_train
--do_eval
--logging_steps 2 --eval_steps 2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
""".split()
with patch.object(A , """argv""" , A ):
run_qa.main()
snake_case : Dict = get_results(A )
self.assertGreaterEqual(result["""eval_f1"""] , 3_0 )
self.assertGreaterEqual(result["""eval_exact"""] , 3_0 )
| 684 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Any = logging.get_logger(__name__)
lowerCamelCase : Optional[int] = {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json',
}
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """gpt_neox_japanese"""
def __init__( self , A=3_2_0_0_0 , A=2_5_6_0 , A=3_2 , A=3_2 , A=4 , A="gelu" , A=1.00 , A=1_0_0_0_0 , A=2_0_4_8 , A=0.02 , A=1e-5 , A=True , A=3_1_9_9_6 , A=3_1_9_9_9 , A=0.1 , A=0.0 , **A , ) -> str:
super().__init__(bos_token_id=A , eos_token_id=A , **A )
snake_case : Optional[Any] = vocab_size
snake_case : Optional[Any] = max_position_embeddings
snake_case : Union[str, Any] = hidden_size
snake_case : Union[str, Any] = num_hidden_layers
snake_case : Optional[int] = num_attention_heads
snake_case : Optional[int] = intermediate_multiple_size
snake_case : int = hidden_act
snake_case : str = rotary_pct
snake_case : Optional[Any] = rotary_emb_base
snake_case : Any = initializer_range
snake_case : Any = layer_norm_eps
snake_case : Optional[Any] = use_cache
snake_case : Tuple = attention_dropout
snake_case : Tuple = hidden_dropout
| 684 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Any = logging.get_logger(__name__)
lowerCamelCase : Optional[int] = {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json',
}
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """gpt_neox_japanese"""
def __init__( self , A=3_2_0_0_0 , A=2_5_6_0 , A=3_2 , A=3_2 , A=4 , A="gelu" , A=1.00 , A=1_0_0_0_0 , A=2_0_4_8 , A=0.02 , A=1e-5 , A=True , A=3_1_9_9_6 , A=3_1_9_9_9 , A=0.1 , A=0.0 , **A , ) -> str:
super().__init__(bos_token_id=A , eos_token_id=A , **A )
snake_case : Optional[Any] = vocab_size
snake_case : Optional[Any] = max_position_embeddings
snake_case : Union[str, Any] = hidden_size
snake_case : Union[str, Any] = num_hidden_layers
snake_case : Optional[int] = num_attention_heads
snake_case : Optional[int] = intermediate_multiple_size
snake_case : int = hidden_act
snake_case : str = rotary_pct
snake_case : Optional[Any] = rotary_emb_base
snake_case : Any = initializer_range
snake_case : Any = layer_norm_eps
snake_case : Optional[Any] = use_cache
snake_case : Tuple = attention_dropout
snake_case : Tuple = hidden_dropout
| 684 |
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> int:
snake_case : Optional[Any] = hex_num.strip()
if not hex_num:
raise ValueError("""No value was passed to the function""" )
snake_case : Any = hex_num[0] == """-"""
if is_negative:
snake_case : int = hex_num[1:]
try:
snake_case : List[Any] = int(lowercase ,16 )
except ValueError:
raise ValueError("""Invalid value was passed to the function""" )
snake_case : Dict = """"""
while int_num > 0:
snake_case : Dict = str(int_num % 2 ) + bin_str
int_num >>= 1
return int(("""-""" + bin_str) if is_negative else bin_str )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 684 | 1 |
import re
from filelock import FileLock
try:
import nltk
lowerCamelCase : List[str] = True
except (ImportError, ModuleNotFoundError):
lowerCamelCase : Optional[int] = False
if NLTK_AVAILABLE:
with FileLock('.lock') as lock:
nltk.download('punkt', quiet=True)
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> str:
re.sub("""<n>""" ,"""""" ,lowercase ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(lowercase ) )
| 684 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCamelCase : Optional[int] = logging.get_logger(__name__)
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""pixel_values"""]
def __init__( self , A = True , A = None , A = PIL.Image.BICUBIC , A = True , A = None , A = 1 / 2_5_5 , A = True , A = True , A = None , A = None , **A , ) -> None:
super().__init__(**A )
snake_case : int = size if size is not None else {"""height""": 2_5_6, """width""": 2_5_6}
snake_case : int = get_size_dict(A )
snake_case : Optional[Any] = crop_size if crop_size is not None else {"""height""": 2_2_4, """width""": 2_2_4}
snake_case : Dict = get_size_dict(A , param_name="""crop_size""" )
snake_case : int = do_resize
snake_case : str = size
snake_case : Tuple = resample
snake_case : Any = do_center_crop
snake_case : Tuple = crop_size
snake_case : int = do_rescale
snake_case : Dict = rescale_factor
snake_case : Union[str, Any] = do_normalize
snake_case : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
snake_case : Optional[Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCAmelCase ( self , A , A , A = PIL.Image.BICUBIC , A = None , **A , ) -> np.ndarray:
snake_case : Dict = get_size_dict(A )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" )
return resize(
A , size=(size["""height"""], size["""width"""]) , resample=A , data_format=A , **A )
def UpperCAmelCase ( self , A , A , A = None , **A , ) -> np.ndarray:
snake_case : Any = get_size_dict(A )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" )
return center_crop(A , size=(size["""height"""], size["""width"""]) , data_format=A , **A )
def UpperCAmelCase ( self , A , A , A = None , **A , ) -> Tuple:
return rescale(A , scale=A , data_format=A , **A )
def UpperCAmelCase ( self , A , A , A , A = None , **A , ) -> np.ndarray:
return normalize(A , mean=A , std=A , data_format=A , **A )
def UpperCAmelCase ( self , A , A = None , A = None , A=None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = ChannelDimension.FIRST , **A , ) -> PIL.Image.Image:
snake_case : str = do_resize if do_resize is not None else self.do_resize
snake_case : Dict = resample if resample is not None else self.resample
snake_case : Dict = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case : Tuple = do_rescale if do_rescale is not None else self.do_rescale
snake_case : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case : List[str] = do_normalize if do_normalize is not None else self.do_normalize
snake_case : int = image_mean if image_mean is not None else self.image_mean
snake_case : List[str] = image_std if image_std is not None else self.image_std
snake_case : Dict = size if size is not None else self.size
snake_case : Tuple = get_size_dict(A )
snake_case : Dict = crop_size if crop_size is not None else self.crop_size
snake_case : List[str] = get_size_dict(A , param_name="""crop_size""" )
snake_case : int = make_list_of_images(A )
if not valid_images(A ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
snake_case : Optional[Any] = [to_numpy_array(A ) for image in images]
if do_resize:
snake_case : Dict = [self.resize(image=A , size=A , resample=A ) for image in images]
if do_center_crop:
snake_case : List[str] = [self.center_crop(image=A , size=A ) for image in images]
if do_rescale:
snake_case : List[str] = [self.rescale(image=A , scale=A ) for image in images]
if do_normalize:
snake_case : str = [self.normalize(image=A , mean=A , std=A ) for image in images]
snake_case : Union[str, Any] = [to_channel_dimension_format(A , A ) for image in images]
snake_case : List[Any] = {"""pixel_values""": images}
return BatchFeature(data=A , tensor_type=A )
| 684 | 1 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class __lowercase (unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase ( self ) -> Union[str, Any]:
snake_case : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained("""google/mt5-small""" )
snake_case : int = AutoTokenizer.from_pretrained("""google/mt5-small""" )
snake_case : Dict = tokenizer("""Hello there""" , return_tensors="""tf""" ).input_ids
snake_case : List[str] = tokenizer("""Hi I am""" , return_tensors="""tf""" ).input_ids
snake_case : Optional[int] = model(A , labels=A ).loss
snake_case : List[str] = -tf.math.reduce_mean(A ).numpy()
snake_case : Tuple = -21.22_81_68
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2e-4 )
| 684 |
import inspect
import unittest
class __lowercase (unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> List[Any]:
try:
import diffusers # noqa: F401
except ImportError:
assert False
def UpperCAmelCase ( self ) -> Tuple:
import diffusers
from diffusers.dependency_versions_table import deps
snake_case : List[str] = inspect.getmembers(A , inspect.isclass )
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
snake_case : Tuple = """k-diffusion"""
elif backend == "invisible_watermark":
snake_case : Optional[int] = """invisible-watermark"""
assert backend in deps, f"""{backend} is not in the deps table!"""
| 684 | 1 |
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> str:
return "".join(sorted(lowercase ) )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> list[str]:
return word_by_signature[signature(lowercase )]
lowerCamelCase : str = Path(__file__).parent.joinpath('words.txt').read_text(encoding='utf-8')
lowerCamelCase : int = sorted({word.strip().lower() for word in data.splitlines()})
lowerCamelCase : List[str] = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
lowerCamelCase : Dict = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open('anagrams.txt', 'w') as file:
file.write('all_anagrams = \n ')
file.write(pprint.pformat(all_anagrams))
| 684 |
import contextlib
import importlib
import io
import unittest
import transformers
# Try to import everything from transformers to ensure every object can be loaded.
from transformers import * # noqa F406
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch
from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available
if is_torch_available():
from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification
if is_tf_available():
from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification
if is_flax_available():
from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification
lowerCamelCase : Union[str, Any] = DUMMY_UNKNOWN_IDENTIFIER
# An actual model hosted on huggingface.co
lowerCamelCase : List[Any] = 'main'
# Default branch name
lowerCamelCase : Tuple = 'f2c752cfc5c0ab6f4bdec59acea69eefbee381c2'
# One particular commit (not the top of `main`)
lowerCamelCase : List[Any] = 'aaaaaaa'
# This commit does not exist, so we should 404.
lowerCamelCase : List[Any] = 'd9e9f15bc825e4b2c9249e9578f884bbcb5e3684'
# Sha-1 of config.json on the top of `main`, for checking purposes
lowerCamelCase : int = '4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3'
@contextlib.contextmanager
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[int]:
print("""Welcome!""" )
yield
print("""Bye!""" )
@contextlib.contextmanager
def SCREAMING_SNAKE_CASE__ ( ) -> List[str]:
print("""Bonjour!""" )
yield
print("""Au revoir!""" )
class __lowercase (unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> int:
# If the spec is missing, importlib would not be able to import the module dynamically.
assert transformers.__spec__ is not None
assert importlib.util.find_spec("""transformers""" ) is not None
class __lowercase (unittest.TestCase ):
"""simple docstring"""
@unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO )
def UpperCAmelCase ( self , A ) -> Optional[Any]:
with ContextManagers([] ):
print("""Transformers are awesome!""" )
# The print statement adds a new line at the end of the output
self.assertEqual(mock_stdout.getvalue() , """Transformers are awesome!\n""" )
@unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO )
def UpperCAmelCase ( self , A ) -> int:
with ContextManagers([context_en()] ):
print("""Transformers are awesome!""" )
# The output should be wrapped with an English welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , """Welcome!\nTransformers are awesome!\nBye!\n""" )
@unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO )
def UpperCAmelCase ( self , A ) -> int:
with ContextManagers([context_fr(), context_en()] ):
print("""Transformers are awesome!""" )
# The output should be wrapped with an English and French welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , """Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n""" )
@require_torch
def UpperCAmelCase ( self ) -> Optional[Any]:
self.assertEqual(find_labels(A ) , ["""labels"""] )
self.assertEqual(find_labels(A ) , ["""labels""", """next_sentence_label"""] )
self.assertEqual(find_labels(A ) , ["""start_positions""", """end_positions"""] )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
pass
self.assertEqual(find_labels(A ) , ["""labels"""] )
@require_tf
def UpperCAmelCase ( self ) -> str:
self.assertEqual(find_labels(A ) , ["""labels"""] )
self.assertEqual(find_labels(A ) , ["""labels""", """next_sentence_label"""] )
self.assertEqual(find_labels(A ) , ["""start_positions""", """end_positions"""] )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
pass
self.assertEqual(find_labels(A ) , ["""labels"""] )
@require_flax
def UpperCAmelCase ( self ) -> Any:
# Flax models don't have labels
self.assertEqual(find_labels(A ) , [] )
self.assertEqual(find_labels(A ) , [] )
self.assertEqual(find_labels(A ) , [] )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
pass
self.assertEqual(find_labels(A ) , [] )
| 684 | 1 |
from __future__ import annotations
import time
lowerCamelCase : int = list[tuple[int, int]]
lowerCamelCase : List[Any] = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
lowerCamelCase : str = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class __lowercase :
"""simple docstring"""
def __init__( self , A , A , A , A , A ) -> str:
snake_case : int = pos_x
snake_case : Union[str, Any] = pos_y
snake_case : Optional[Any] = (pos_y, pos_x)
snake_case : Optional[int] = goal_x
snake_case : List[Any] = goal_y
snake_case : str = parent
class __lowercase :
"""simple docstring"""
def __init__( self , A , A ) -> int:
snake_case : Optional[Any] = Node(start[1] , start[0] , goal[1] , goal[0] , A )
snake_case : Dict = Node(goal[1] , goal[0] , goal[1] , goal[0] , A )
snake_case : Dict = [self.start]
snake_case : Optional[Any] = False
def UpperCAmelCase ( self ) -> Path | None:
while self.node_queue:
snake_case : Optional[int] = self.node_queue.pop(0 )
if current_node.pos == self.target.pos:
snake_case : Optional[int] = True
return self.retrace_path(A )
snake_case : List[str] = self.get_successors(A )
for node in successors:
self.node_queue.append(A )
if not self.reached:
return [self.start.pos]
return None
def UpperCAmelCase ( self , A ) -> list[Node]:
snake_case : Tuple = []
for action in delta:
snake_case : List[str] = parent.pos_x + action[1]
snake_case : Dict = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(A ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(A , A , self.target.pos_y , self.target.pos_x , A ) )
return successors
def UpperCAmelCase ( self , A ) -> Path:
snake_case : Optional[int] = node
snake_case : int = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
snake_case : Dict = current_node.parent
path.reverse()
return path
class __lowercase :
"""simple docstring"""
def __init__( self , A , A ) -> Union[str, Any]:
snake_case : Union[str, Any] = BreadthFirstSearch(A , A )
snake_case : List[str] = BreadthFirstSearch(A , A )
snake_case : Optional[int] = False
def UpperCAmelCase ( self ) -> Path | None:
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
snake_case : List[Any] = self.fwd_bfs.node_queue.pop(0 )
snake_case : List[Any] = self.bwd_bfs.node_queue.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
snake_case : List[str] = True
return self.retrace_bidirectional_path(
A , A )
snake_case : str = current_bwd_node
snake_case : List[Any] = current_fwd_node
snake_case : Optional[Any] = {
self.fwd_bfs: self.fwd_bfs.get_successors(A ),
self.bwd_bfs: self.bwd_bfs.get_successors(A ),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(A )
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def UpperCAmelCase ( self , A , A ) -> Path:
snake_case : List[str] = self.fwd_bfs.retrace_path(A )
snake_case : Optional[Any] = self.bwd_bfs.retrace_path(A )
bwd_path.pop()
bwd_path.reverse()
snake_case : Tuple = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
lowerCamelCase : List[Any] = (0, 0)
lowerCamelCase : str = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
lowerCamelCase : Union[str, Any] = time.time()
lowerCamelCase : Any = BreadthFirstSearch(init, goal)
lowerCamelCase : List[Any] = bfs.search()
lowerCamelCase : List[Any] = time.time() - start_bfs_time
print('Unidirectional BFS computation time : ', bfs_time)
lowerCamelCase : Tuple = time.time()
lowerCamelCase : List[Any] = BidirectionalBreadthFirstSearch(init, goal)
lowerCamelCase : Dict = bd_bfs.search()
lowerCamelCase : List[str] = time.time() - start_bd_bfs_time
print('Bidirectional BFS computation time : ', bd_bfs_time)
| 684 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
lowerCamelCase : Dict = {
'MIT/ast-finetuned-audioset-10-10-0.4593': (
'https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json'
),
}
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """audio-spectrogram-transformer"""
def __init__( self , A=7_6_8 , A=1_2 , A=1_2 , A=3_0_7_2 , A="gelu" , A=0.0 , A=0.0 , A=0.02 , A=1e-1_2 , A=1_6 , A=True , A=1_0 , A=1_0 , A=1_0_2_4 , A=1_2_8 , **A , ) -> int:
super().__init__(**A )
snake_case : Any = hidden_size
snake_case : Tuple = num_hidden_layers
snake_case : Any = num_attention_heads
snake_case : Dict = intermediate_size
snake_case : int = hidden_act
snake_case : int = hidden_dropout_prob
snake_case : Tuple = attention_probs_dropout_prob
snake_case : int = initializer_range
snake_case : int = layer_norm_eps
snake_case : Any = patch_size
snake_case : List[Any] = qkv_bias
snake_case : int = frequency_stride
snake_case : Any = time_stride
snake_case : Union[str, Any] = max_length
snake_case : Any = num_mel_bins
| 684 | 1 |
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
lowerCamelCase : str = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ,lowercase ) -> Tuple[int, int]:
def constraint_to_multiple_of(lowercase ,lowercase ,lowercase=0 ,lowercase=None ):
snake_case : Union[str, Any] = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
snake_case : Any = math.floor(val / multiple ) * multiple
if x < min_val:
snake_case : Optional[int] = math.ceil(val / multiple ) * multiple
return x
snake_case : Optional[Any] = (output_size, output_size) if isinstance(lowercase ,lowercase ) else output_size
snake_case , snake_case : Dict = get_image_size(lowercase )
snake_case , snake_case : Union[str, Any] = output_size
# determine new height and width
snake_case : int = output_height / input_height
snake_case : Any = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
snake_case : int = scale_width
else:
# fit height
snake_case : Optional[int] = scale_height
snake_case : Optional[int] = constraint_to_multiple_of(scale_height * input_height ,multiple=lowercase )
snake_case : Any = constraint_to_multiple_of(scale_width * input_width ,multiple=lowercase )
return (new_height, new_width)
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""pixel_values"""]
def __init__( self , A = True , A = None , A = PILImageResampling.BILINEAR , A = False , A = 1 , A = True , A = 1 / 2_5_5 , A = True , A = None , A = None , **A , ) -> None:
super().__init__(**A )
snake_case : Optional[Any] = size if size is not None else {"""height""": 3_8_4, """width""": 3_8_4}
snake_case : Optional[Any] = get_size_dict(A )
snake_case : str = do_resize
snake_case : Any = size
snake_case : str = keep_aspect_ratio
snake_case : Any = ensure_multiple_of
snake_case : Union[str, Any] = resample
snake_case : List[str] = do_rescale
snake_case : List[Any] = rescale_factor
snake_case : Dict = do_normalize
snake_case : int = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
snake_case : Optional[Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCAmelCase ( self , A , A , A = False , A = 1 , A = PILImageResampling.BICUBIC , A = None , **A , ) -> np.ndarray:
snake_case : Any = get_size_dict(A )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
snake_case : Optional[Any] = get_resize_output_image_size(
A , output_size=(size["""height"""], size["""width"""]) , keep_aspect_ratio=A , multiple=A , )
return resize(A , size=A , resample=A , data_format=A , **A )
def UpperCAmelCase ( self , A , A , A = None , **A , ) -> List[Any]:
return rescale(A , scale=A , data_format=A , **A )
def UpperCAmelCase ( self , A , A , A , A = None , **A , ) -> np.ndarray:
return normalize(A , mean=A , std=A , data_format=A , **A )
def UpperCAmelCase ( self , A , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = ChannelDimension.FIRST , **A , ) -> PIL.Image.Image:
snake_case : Optional[Any] = do_resize if do_resize is not None else self.do_resize
snake_case : str = size if size is not None else self.size
snake_case : List[str] = get_size_dict(A )
snake_case : Optional[Any] = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
snake_case : List[str] = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
snake_case : Union[str, Any] = resample if resample is not None else self.resample
snake_case : List[str] = do_rescale if do_rescale is not None else self.do_rescale
snake_case : str = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize
snake_case : Union[str, Any] = image_mean if image_mean is not None else self.image_mean
snake_case : Optional[int] = image_std if image_std is not None else self.image_std
snake_case : Union[str, Any] = make_list_of_images(A )
if not valid_images(A ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
snake_case : Dict = [to_numpy_array(A ) for image in images]
if do_resize:
snake_case : Tuple = [self.resize(image=A , size=A , resample=A ) for image in images]
if do_rescale:
snake_case : List[str] = [self.rescale(image=A , scale=A ) for image in images]
if do_normalize:
snake_case : List[str] = [self.normalize(image=A , mean=A , std=A ) for image in images]
snake_case : str = [to_channel_dimension_format(A , A ) for image in images]
snake_case : int = {"""pixel_values""": images}
return BatchFeature(data=A , tensor_type=A )
def UpperCAmelCase ( self , A , A = None ) -> Dict:
snake_case : List[str] = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(A ) != len(A ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(A ):
snake_case : Optional[int] = target_sizes.numpy()
snake_case : Any = []
for idx in range(len(A ) ):
snake_case : Union[str, Any] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="""bilinear""" , align_corners=A )
snake_case : str = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(A )
else:
snake_case : str = logits.argmax(dim=1 )
snake_case : Dict = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 684 |
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
lowerCamelCase : Any = logging.get_logger(__name__)
class __lowercase (enum.Enum ):
"""simple docstring"""
_snake_case = 0
_snake_case = 1
@add_end_docstrings(UpperCamelCase__ )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """generated"""
def __init__( self , *A , **A ) -> Optional[Any]:
super().__init__(*A , **A )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == """tf"""
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def UpperCAmelCase ( self , A=None , A=None , A=None , A=None , A=None , A=None , **A , ) -> Optional[int]:
snake_case : Tuple = {}
if truncation is not None:
snake_case : Union[str, Any] = truncation
snake_case : Dict = generate_kwargs
snake_case : int = {}
if return_tensors is not None and return_type is None:
snake_case : List[Any] = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
snake_case : List[str] = return_type
if clean_up_tokenization_spaces is not None:
snake_case : int = clean_up_tokenization_spaces
if stop_sequence is not None:
snake_case : Tuple = self.tokenizer.encode(A , add_special_tokens=A )
if len(A ) > 1:
warnings.warn(
"""Stopping on a multiple token sequence is not yet supported on transformers. The first token of"""
""" the stop sequence will be used as the stop sequence string in the interim.""" )
snake_case : List[str] = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def UpperCAmelCase ( self , A , A , A ) -> Union[str, Any]:
return True
def UpperCAmelCase ( self , *A , A ) -> Tuple:
snake_case : Union[str, Any] = self.model.config.prefix if self.model.config.prefix is not None else """"""
if isinstance(args[0] , A ):
if self.tokenizer.pad_token_id is None:
raise ValueError("""Please make sure that the tokenizer has a pad_token_id when using a batch input""" )
snake_case : Union[str, Any] = ([prefix + arg for arg in args[0]],)
snake_case : List[Any] = True
elif isinstance(args[0] , A ):
snake_case : str = (prefix + args[0],)
snake_case : str = False
else:
raise ValueError(
f""" `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`""" )
snake_case : Optional[Any] = self.tokenizer(*A , padding=A , truncation=A , return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self , *A , **A ) -> Union[str, Any]:
snake_case : Tuple = super().__call__(*A , **A )
if (
isinstance(args[0] , A )
and all(isinstance(A , A ) for el in args[0] )
and all(len(A ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def UpperCAmelCase ( self , A , A=TruncationStrategy.DO_NOT_TRUNCATE , **A ) -> str:
snake_case : Optional[Any] = self._parse_and_tokenize(A , truncation=A , **A )
return inputs
def UpperCAmelCase ( self , A , **A ) -> Tuple:
if self.framework == "pt":
snake_case , snake_case : List[str] = model_inputs["""input_ids"""].shape
elif self.framework == "tf":
snake_case , snake_case : Optional[Any] = tf.shape(model_inputs["""input_ids"""] ).numpy()
snake_case : Dict = generate_kwargs.get("""min_length""" , self.model.config.min_length )
snake_case : str = generate_kwargs.get("""max_length""" , self.model.config.max_length )
self.check_inputs(A , generate_kwargs["""min_length"""] , generate_kwargs["""max_length"""] )
snake_case : List[str] = self.model.generate(**A , **A )
snake_case : Dict = output_ids.shape[0]
if self.framework == "pt":
snake_case : List[Any] = output_ids.reshape(A , out_b // in_b , *output_ids.shape[1:] )
elif self.framework == "tf":
snake_case : Any = tf.reshape(A , (in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def UpperCAmelCase ( self , A , A=ReturnType.TEXT , A=False ) -> Union[str, Any]:
snake_case : Tuple = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
snake_case : Dict = {f"""{self.return_name}_token_ids""": output_ids}
elif return_type == ReturnType.TEXT:
snake_case : int = {
f"""{self.return_name}_text""": self.tokenizer.decode(
A , skip_special_tokens=A , clean_up_tokenization_spaces=A , )
}
records.append(A )
return records
@add_end_docstrings(UpperCamelCase__ )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """summary"""
def __call__( self , *A , **A ) -> str:
return super().__call__(*A , **A )
def UpperCAmelCase ( self , A , A , A ) -> bool:
if max_length < min_length:
logger.warning(f"""Your min_length={min_length} must be inferior than your max_length={max_length}.""" )
if input_length < max_length:
logger.warning(
f"""Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is """
"""a summarization task, where outputs shorter than the input are typically wanted, you might """
f"""consider decreasing max_length manually, e.g. summarizer('...', max_length={input_length//2})""" )
@add_end_docstrings(UpperCamelCase__ )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """translation"""
def UpperCAmelCase ( self , A , A , A ) -> Union[str, Any]:
if input_length > 0.9 * max_length:
logger.warning(
f"""Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider """
"""increasing your max_length manually, e.g. translator('...', max_length=400)""" )
return True
def UpperCAmelCase ( self , *A , A=TruncationStrategy.DO_NOT_TRUNCATE , A=None , A=None ) -> Optional[int]:
if getattr(self.tokenizer , """_build_translation_inputs""" , A ):
return self.tokenizer._build_translation_inputs(
*A , return_tensors=self.framework , truncation=A , src_lang=A , tgt_lang=A )
else:
return super()._parse_and_tokenize(*A , truncation=A )
def UpperCAmelCase ( self , A=None , A=None , **A ) -> Union[str, Any]:
snake_case , snake_case , snake_case : str = super()._sanitize_parameters(**A )
if src_lang is not None:
snake_case : Tuple = src_lang
if tgt_lang is not None:
snake_case : str = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
snake_case : Union[str, Any] = kwargs.get("""task""" , self.task )
snake_case : Any = task.split("""_""" )
if task and len(A ) == 4:
# translation, XX, to YY
snake_case : Optional[Any] = items[1]
snake_case : Dict = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self , *A , **A ) -> str:
return super().__call__(*A , **A )
| 684 | 1 |
def SCREAMING_SNAKE_CASE__ ( ) -> int:
return 1
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> int:
return 0 if x < 0 else two_pence(x - 2 ) + one_pence()
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> int:
return 0 if x < 0 else five_pence(x - 5 ) + two_pence(lowercase )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> int:
return 0 if x < 0 else ten_pence(x - 10 ) + five_pence(lowercase )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> int:
return 0 if x < 0 else twenty_pence(x - 20 ) + ten_pence(lowercase )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> int:
return 0 if x < 0 else fifty_pence(x - 50 ) + twenty_pence(lowercase )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> int:
return 0 if x < 0 else one_pound(x - 100 ) + fifty_pence(lowercase )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> int:
return 0 if x < 0 else two_pound(x - 200 ) + one_pound(lowercase )
def SCREAMING_SNAKE_CASE__ ( lowercase = 200 ) -> int:
return two_pound(lowercase )
if __name__ == "__main__":
print(solution(int(input().strip())))
| 684 |
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> str:
snake_case : int = []
for line in lines:
snake_case : Dict = re.sub(R"""#.*""" ,"""""" ,lowercase ) # remove comments
if line:
filtered_lines.append(lowercase )
snake_case : Optional[int] = """\n""".join(lowercase )
# Make a hash from all this code
snake_case : List[str] = full_str.encode("""utf-8""" )
return shaaaa(lowercase ).hexdigest()
# get importable module names and hash for caching
lowerCamelCase : Any = {
'csv': (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
'json': (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
'pandas': (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
'parquet': (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
'arrow': (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
'text': (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
'imagefolder': (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
'audiofolder': (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
lowerCamelCase : Optional[int] = {
'.csv': ('csv', {}),
'.tsv': ('csv', {'sep': '\t'}),
'.json': ('json', {}),
'.jsonl': ('json', {}),
'.parquet': ('parquet', {}),
'.arrow': ('arrow', {}),
'.txt': ('text', {}),
}
_EXTENSION_TO_MODULE.update({ext: ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
lowerCamelCase : Tuple = {'imagefolder', 'audiofolder'}
# Used to filter data files based on extensions given a module name
lowerCamelCase : Dict[str, List[str]] = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append('.zip')
_MODULE_TO_EXTENSIONS["audiofolder"].append('.zip')
| 684 | 1 |
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> str:
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4_E00 and cp <= 0X9_FFF)
or (cp >= 0X3_400 and cp <= 0X4_DBF) #
or (cp >= 0X20_000 and cp <= 0X2A_6DF) #
or (cp >= 0X2A_700 and cp <= 0X2B_73F) #
or (cp >= 0X2B_740 and cp <= 0X2B_81F) #
or (cp >= 0X2B_820 and cp <= 0X2C_EAF) #
or (cp >= 0XF_900 and cp <= 0XF_AFF)
or (cp >= 0X2F_800 and cp <= 0X2F_A1F) #
): #
return True
return False
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> int:
# word like '180' or '身高' or '神'
for char in word:
snake_case : str = ord(lowercase )
if not _is_chinese_char(lowercase ):
return 0
return 1
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Any:
snake_case : Any = set()
for token in tokens:
snake_case : int = len(lowercase ) > 1 and is_chinese(lowercase )
if chinese_word:
word_set.add(lowercase )
snake_case : str = list(lowercase )
return word_list
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> int:
if not chinese_word_set:
return bert_tokens
snake_case : Dict = max([len(lowercase ) for w in chinese_word_set] )
snake_case : Union[str, Any] = bert_tokens
snake_case , snake_case : Optional[int] = 0, len(lowercase )
while start < end:
snake_case : Optional[int] = True
if is_chinese(bert_word[start] ):
snake_case : str = min(end - start ,lowercase )
for i in range(lowercase ,1 ,-1 ):
snake_case : Dict = """""".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 ,start + i ):
snake_case : List[Any] = """##""" + bert_word[j]
snake_case : List[str] = start + i
snake_case : Optional[Any] = False
break
if single_word:
start += 1
return bert_word
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ) -> str:
snake_case : List[Any] = []
for i in range(0 ,len(lowercase ) ,100 ):
snake_case : Union[str, Any] = ltp_tokenizer.seg(lines[i : i + 100] )[0]
snake_case : Optional[Any] = [get_chinese_word(lowercase ) for r in res]
ltp_res.extend(lowercase )
assert len(lowercase ) == len(lowercase )
snake_case : int = []
for i in range(0 ,len(lowercase ) ,100 ):
snake_case : Dict = bert_tokenizer(lines[i : i + 100] ,add_special_tokens=lowercase ,truncation=lowercase ,max_length=512 )
bert_res.extend(res["""input_ids"""] )
assert len(lowercase ) == len(lowercase )
snake_case : Any = []
for input_ids, chinese_word in zip(lowercase ,lowercase ):
snake_case : Union[str, Any] = []
for id in input_ids:
snake_case : Dict = bert_tokenizer._convert_id_to_token(lowercase )
input_tokens.append(lowercase )
snake_case : int = add_sub_symbol(lowercase ,lowercase )
snake_case : List[Any] = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(lowercase ):
if token[:2] == "##":
snake_case : Dict = token[2:]
# save chinese tokens' pos
if len(lowercase ) == 1 and _is_chinese_char(ord(lowercase ) ):
ref_id.append(lowercase )
ref_ids.append(lowercase )
assert len(lowercase ) == len(lowercase )
return ref_ids
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Union[str, Any]:
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name ,"""r""" ,encoding="""utf-8""" ) as f:
snake_case : Tuple = f.readlines()
snake_case : Dict = [line.strip() for line in data if len(lowercase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
snake_case : List[Any] = LTP(args.ltp ) # faster in GPU device
snake_case : Any = BertTokenizer.from_pretrained(args.bert )
snake_case : int = prepare_ref(lowercase ,lowercase ,lowercase )
with open(args.save_path ,"""w""" ,encoding="""utf-8""" ) as f:
snake_case : str = [json.dumps(lowercase ) + """\n""" for ref in ref_ids]
f.writelines(lowercase )
if __name__ == "__main__":
lowerCamelCase : List[Any] = argparse.ArgumentParser(description='prepare_chinese_ref')
parser.add_argument(
'--file_name',
type=str,
default='./resources/chinese-demo.txt',
help='file need process, same as training data in lm',
)
parser.add_argument(
'--ltp', type=str, default='./resources/ltp', help='resources for LTP tokenizer, usually a path'
)
parser.add_argument('--bert', type=str, default='./resources/robert', help='resources for Bert tokenizer')
parser.add_argument('--save_path', type=str, default='./resources/ref.txt', help='path to save res')
lowerCamelCase : List[Any] = parser.parse_args()
main(args)
| 684 |
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ) -> Tuple:
# Initialise PyTorch model
snake_case : int = RemBertConfig.from_json_file(lowercase )
print("""Building PyTorch model from configuration: {}""".format(str(lowercase ) ) )
snake_case : Tuple = RemBertModel(lowercase )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(lowercase ,lowercase ,lowercase )
# Save pytorch-model
print("""Save PyTorch model to {}""".format(lowercase ) )
torch.save(model.state_dict() ,lowercase )
if __name__ == "__main__":
lowerCamelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--rembert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained RemBERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowerCamelCase : Dict = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 684 | 1 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase : int = logging.get_logger(__name__)
lowerCamelCase : Optional[int] = '▁'
lowerCamelCase : str = {'vocab_file': 'sentencepiece.bpe.model'}
lowerCamelCase : Dict = {
'vocab_file': {
'facebook/xglm-564M': 'https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model',
}
}
lowerCamelCase : Optional[Any] = {
'facebook/xglm-564M': 2_0_4_8,
}
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = VOCAB_FILES_NAMES
_snake_case = PRETRAINED_VOCAB_FILES_MAP
_snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case = ["""input_ids""", """attention_mask"""]
def __init__( self , A , A="<s>" , A="</s>" , A="</s>" , A="<s>" , A="<unk>" , A="<pad>" , A = None , **A , ) -> None:
snake_case : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
snake_case : Optional[int] = 7
snake_case : int = [f"""<madeupword{i}>""" for i in range(self.num_madeup_words )]
snake_case : Tuple = kwargs.get("""additional_special_tokens""" , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=A , eos_token=A , unk_token=A , sep_token=A , cls_token=A , pad_token=A , sp_model_kwargs=self.sp_model_kwargs , **A , )
snake_case : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(A ) )
snake_case : Tuple = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
snake_case : Union[str, Any] = 1
# Mimic fairseq token-to-id alignment for the first 4 token
snake_case : Tuple = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
snake_case : Optional[Any] = len(self.sp_model )
snake_case : Any = {f"""<madeupword{i}>""": sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(A )
snake_case : int = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> List[Any]:
snake_case : Tuple = self.__dict__.copy()
snake_case : Optional[int] = None
snake_case : str = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , A ) -> List[Any]:
snake_case : Any = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
snake_case : List[Any] = {}
snake_case : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def UpperCAmelCase ( self , A , A = None ) -> List[int]:
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
snake_case : Dict = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def UpperCAmelCase ( self , A , A = None , A = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A , token_ids_a=A , already_has_special_tokens=A )
if token_ids_a is None:
return [1] + ([0] * len(A ))
return [1] + ([0] * len(A )) + [1, 1] + ([0] * len(A ))
def UpperCAmelCase ( self , A , A = None ) -> List[int]:
snake_case : List[str] = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def UpperCAmelCase ( self ) -> Any:
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def UpperCAmelCase ( self ) -> Any:
snake_case : Dict = {self.convert_ids_to_tokens(A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCAmelCase ( self , A ) -> List[str]:
return self.sp_model.encode(A , out_type=A )
def UpperCAmelCase ( self , A ) -> Optional[int]:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
snake_case : Union[str, Any] = self.sp_model.PieceToId(A )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def UpperCAmelCase ( self , A ) -> Optional[int]:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def UpperCAmelCase ( self , A ) -> Optional[Any]:
snake_case : Dict = """""".join(A ).replace(A , """ """ ).strip()
return out_string
def UpperCAmelCase ( self , A , A = None ) -> Tuple[str]:
if not os.path.isdir(A ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case : str = os.path.join(
A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A )
elif not os.path.isfile(self.vocab_file ):
with open(A , """wb""" ) as fi:
snake_case : int = self.sp_model.serialized_model_proto()
fi.write(A )
return (out_vocab_file,)
| 684 |
from ..utils import DummyObject, requires_backends
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Tuple:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> List[str]:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> int:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Any:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Union[str, Any]:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Tuple:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Optional[int]:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Union[str, Any]:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Union[str, Any]:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> str:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> List[Any]:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Optional[Any]:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Union[str, Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> int:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Union[str, Any]:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> List[Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Tuple:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Optional[Any]:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Optional[Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> str:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Any:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Tuple:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> int:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Dict:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Dict:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Optional[int]:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Optional[Any]:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> str:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> List[Any]:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Optional[int]:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Optional[Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Tuple:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Tuple:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> List[Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> str:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> str:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Tuple:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Dict:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Union[str, Any]:
requires_backends(cls , ["""flax"""] )
| 684 | 1 |
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
lowerCamelCase : str = {
'gwf-440k': {
'url': 'https://model-server.zqevans2.workers.dev/gwf-440k.ckpt',
'sample_rate': 4_8_0_0_0,
'sample_size': 6_5_5_3_6,
},
'jmann-small-190k': {
'url': 'https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt',
'sample_rate': 4_8_0_0_0,
'sample_size': 6_5_5_3_6,
},
'jmann-large-580k': {
'url': 'https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt',
'sample_rate': 4_8_0_0_0,
'sample_size': 1_3_1_0_7_2,
},
'maestro-uncond-150k': {
'url': 'https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt',
'sample_rate': 1_6_0_0_0,
'sample_size': 6_5_5_3_6,
},
'unlocked-uncond-250k': {
'url': 'https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt',
'sample_rate': 1_6_0_0_0,
'sample_size': 6_5_5_3_6,
},
'honk-140k': {
'url': 'https://model-server.zqevans2.workers.dev/honk-140k.ckpt',
'sample_rate': 1_6_0_0_0,
'sample_size': 6_5_5_3_6,
},
}
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> int:
return torch.atana(lowercase ,lowercase ) / math.pi * 2
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Dict:
snake_case : Tuple = torch.sin(t * math.pi / 2 ) ** 2
snake_case : Tuple = (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(lowercase ,lowercase )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
pass
class __lowercase (nn.Module ):
"""simple docstring"""
def __init__( self , A ) -> Any:
super().__init__()
snake_case : Optional[int] = DiffusionAttnUnetaD(A , n_attn_layers=4 )
snake_case : Tuple = deepcopy(self.diffusion )
snake_case : List[Any] = torch.quasirandom.SobolEngine(1 , scramble=A )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Dict:
snake_case : str = MODELS_MAP[model_name]["""url"""]
os.system(f"""wget {url} ./""" )
return f"""./{model_name}.ckpt"""
lowerCamelCase : int = {
'1': 'resnets.0',
'2': 'attentions.0',
'3': 'resnets.1',
'4': 'attentions.1',
'5': 'resnets.2',
'6': 'attentions.2',
}
lowerCamelCase : Optional[Any] = {
'8': 'resnets.0',
'9': 'attentions.0',
'10': 'resnets.1',
'11': 'attentions.1',
'12': 'resnets.2',
'13': 'attentions.2',
}
lowerCamelCase : int = {
'1': 'resnets.0',
'2': 'attentions.0',
'3': 'resnets.1',
'4': 'attentions.1',
'5': 'resnets.2',
'6': 'attentions.2',
'8': 'resnets.3',
'9': 'attentions.3',
'10': 'resnets.4',
'11': 'attentions.4',
'12': 'resnets.5',
'13': 'attentions.5',
}
lowerCamelCase : Optional[Any] = {
'0': 'resnets.0',
'1': 'resnets.1',
'2': 'resnets.2',
'4': 'resnets.0',
'5': 'resnets.1',
'6': 'resnets.2',
}
lowerCamelCase : int = {
'skip': 'conv_skip',
'main.0': 'conv_1',
'main.1': 'group_norm_1',
'main.3': 'conv_2',
'main.4': 'group_norm_2',
}
lowerCamelCase : Dict = {
'norm': 'group_norm',
'qkv_proj': ['query', 'key', 'value'],
'out_proj': ['proj_attn'],
}
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Dict:
if name.startswith("""skip""" ):
return name.replace("""skip""" ,RES_CONV_MAP["""skip"""] )
# name has to be of format main.{digit}
if not name.startswith("""main.""" ):
raise ValueError(f"""ResConvBlock error with {name}""" )
return name.replace(name[:6] ,RES_CONV_MAP[name[:6]] )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> str:
for key, value in ATTN_MAP.items():
if name.startswith(lowercase ) and not isinstance(lowercase ,lowercase ):
return name.replace(lowercase ,lowercase )
elif name.startswith(lowercase ):
return [name.replace(lowercase ,lowercase ) for v in value]
raise ValueError(f"""Attn error with {name}""" )
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase=13 ) -> Any:
snake_case : List[Any] = input_string
if string.split(""".""" )[0] == "timestep_embed":
return string.replace("""timestep_embed""" ,"""time_proj""" )
snake_case : Optional[Any] = 0
if string.startswith("""net.3.""" ):
depth += 1
snake_case : Union[str, Any] = string[6:]
elif string.startswith("""net.""" ):
snake_case : Tuple = string[4:]
while string.startswith("""main.7.""" ):
depth += 1
snake_case : List[Any] = string[7:]
if string.startswith("""main.""" ):
snake_case : Union[str, Any] = string[5:]
# mid block
if string[:2].isdigit():
snake_case : List[str] = string[:2]
snake_case : str = string[2:]
else:
snake_case : Optional[Any] = string[0]
snake_case : str = string[1:]
if depth == max_depth:
snake_case : str = MID_NUM_TO_LAYER[layer_num]
snake_case : Optional[Any] = """mid_block"""
elif depth > 0 and int(lowercase ) < 7:
snake_case : Optional[int] = DOWN_NUM_TO_LAYER[layer_num]
snake_case : List[Any] = f"""down_blocks.{depth}"""
elif depth > 0 and int(lowercase ) > 7:
snake_case : Optional[int] = UP_NUM_TO_LAYER[layer_num]
snake_case : Dict = f"""up_blocks.{max_depth - depth - 1}"""
elif depth == 0:
snake_case : Tuple = DEPTH_0_TO_LAYER[layer_num]
snake_case : Union[str, Any] = f"""up_blocks.{max_depth - 1}""" if int(lowercase ) > 3 else """down_blocks.0"""
if not string_left.startswith(""".""" ):
raise ValueError(f"""Naming error with {input_string} and string_left: {string_left}.""" )
snake_case : Any = string_left[1:]
if "resnets" in new_layer:
snake_case : Any = convert_resconv_naming(lowercase )
elif "attentions" in new_layer:
snake_case : int = convert_attn_naming(lowercase )
snake_case : Optional[Any] = new_string_left
if not isinstance(lowercase ,lowercase ):
snake_case : Optional[Any] = prefix + """.""" + new_layer + """.""" + string_left
else:
snake_case : Union[str, Any] = [prefix + """.""" + new_layer + """.""" + s for s in string_left]
return new_string
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Optional[Any]:
snake_case : Optional[Any] = {}
for k, v in state_dict.items():
if k.endswith("""kernel""" ):
# up- and downsample layers, don't have trainable weights
continue
snake_case : List[str] = rename(lowercase )
# check if we need to transform from Conv => Linear for attention
if isinstance(lowercase ,lowercase ):
snake_case : Any = transform_conv_attns(lowercase ,lowercase ,lowercase )
else:
snake_case : Optional[Any] = v
return new_state_dict
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ) -> int:
if len(lowercase ) == 1:
if len(v.shape ) == 3:
# weight
snake_case : int = v[:, :, 0]
else:
# bias
snake_case : Any = v
else:
# qkv matrices
snake_case : str = v.shape[0]
snake_case : Tuple = trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
snake_case : Union[str, Any] = v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
snake_case : Tuple = v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Optional[Any]:
snake_case : Any = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
snake_case : Dict = args.model_path.split("""/""" )[-1].split(""".""" )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), f"""Make sure to provide one of the official model names {MODELS_MAP.keys()}"""
snake_case : Union[str, Any] = download(lowercase )
snake_case : Tuple = MODELS_MAP[model_name]["""sample_rate"""]
snake_case : Tuple = MODELS_MAP[model_name]["""sample_size"""]
snake_case : Tuple = Object()
snake_case : int = sample_size
snake_case : List[str] = sample_rate
snake_case : int = 0
snake_case : List[Any] = UNetaDModel(sample_size=lowercase ,sample_rate=lowercase )
snake_case : Optional[Any] = diffusers_model.state_dict()
snake_case : Optional[Any] = DiffusionUncond(lowercase )
orig_model.load_state_dict(torch.load(args.model_path ,map_location=lowercase )["""state_dict"""] )
snake_case : Union[str, Any] = orig_model.diffusion_ema.eval()
snake_case : Optional[Any] = orig_model.state_dict()
snake_case : Union[str, Any] = rename_orig_weights(lowercase )
snake_case : List[str] = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
snake_case : Tuple = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(lowercase ) == 0, f"""Problem with {renamed_minus_diffusers}"""
assert all(k.endswith("""kernel""" ) for k in list(lowercase ) ), f"""Problem with {diffusers_minus_renamed}"""
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), f"""Shape for {key} doesn't match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}"""
if key == "time_proj.weight":
snake_case : Optional[int] = value.squeeze()
snake_case : int = value
diffusers_model.load_state_dict(lowercase )
snake_case : List[Any] = 100
snake_case : List[str] = 33
snake_case : Optional[Any] = IPNDMScheduler(num_train_timesteps=lowercase )
snake_case : Tuple = torch.manual_seed(lowercase )
snake_case : Optional[int] = torch.randn([1, 2, config.sample_size] ,generator=lowercase ).to(lowercase )
snake_case : Union[str, Any] = torch.linspace(1 ,0 ,steps + 1 ,device=lowercase )[:-1]
snake_case : Optional[Any] = get_crash_schedule(lowercase )
snake_case : Tuple = DanceDiffusionPipeline(unet=lowercase ,scheduler=lowercase )
snake_case : Dict = torch.manual_seed(33 )
snake_case : str = pipe(num_inference_steps=lowercase ,generator=lowercase ).audios
snake_case : Tuple = sampling.iplms_sample(lowercase ,lowercase ,lowercase ,{} )
snake_case : Any = generated.clamp(-1 ,1 )
snake_case : List[Any] = (generated - audio).abs().sum()
snake_case : Tuple = (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print("""Diff sum""" ,lowercase )
print("""Diff max""" ,lowercase )
assert diff_max < 1E-3, f"""Diff max: {diff_max} is too much :-/"""
print(f"""Conversion for {model_name} successful!""" )
if __name__ == "__main__":
lowerCamelCase : Dict = argparse.ArgumentParser()
parser.add_argument('--model_path', default=None, type=str, required=True, help='Path to the model to convert.')
parser.add_argument(
'--save', default=True, type=bool, required=False, help='Whether to save the converted model or not.'
)
parser.add_argument('--checkpoint_path', default=None, type=str, required=True, help='Path to the output model.')
lowerCamelCase : List[str] = parser.parse_args()
main(args)
| 684 |
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
lowerCamelCase : List[str] = 3
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> int:
print("""Generating primitive root of p""" )
while True:
snake_case : Optional[int] = random.randrange(3 ,lowercase )
if pow(lowercase ,2 ,lowercase ) == 1:
continue
if pow(lowercase ,lowercase ,lowercase ) == 1:
continue
return g
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> tuple[tuple[int, int, int, int], tuple[int, int]]:
print("""Generating prime p...""" )
snake_case : Optional[int] = rabin_miller.generate_large_prime(lowercase ) # select large prime number.
snake_case : Optional[int] = primitive_root(lowercase ) # one primitive root on modulo p.
snake_case : Optional[Any] = random.randrange(3 ,lowercase ) # private_key -> have to be greater than 2 for safety.
snake_case : Tuple = cryptomath.find_mod_inverse(pow(lowercase ,lowercase ,lowercase ) ,lowercase )
snake_case : str = (key_size, e_a, e_a, p)
snake_case : Optional[Any] = (key_size, d)
return public_key, private_key
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> None:
if os.path.exists(f"""{name}_pubkey.txt""" ) or os.path.exists(f"""{name}_privkey.txt""" ):
print("""\nWARNING:""" )
print(
f"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"""
"""Use a different name or delete these files and re-run this program.""" )
sys.exit()
snake_case , snake_case : Optional[Any] = generate_key(lowercase )
print(f"""\nWriting public key to file {name}_pubkey.txt...""" )
with open(f"""{name}_pubkey.txt""" ,"""w""" ) as fo:
fo.write(f"""{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}""" )
print(f"""Writing private key to file {name}_privkey.txt...""" )
with open(f"""{name}_privkey.txt""" ,"""w""" ) as fo:
fo.write(f"""{private_key[0]},{private_key[1]}""" )
def SCREAMING_SNAKE_CASE__ ( ) -> None:
print("""Making key files...""" )
make_key_files("""elgamal""" ,2048 )
print("""Key files generation successful""" )
if __name__ == "__main__":
main()
| 684 | 1 |
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
"""The RoBERTa Model transformer with early exiting (DeeRoBERTa). """ , UpperCamelCase__ , )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = RobertaConfig
_snake_case = """roberta"""
def __init__( self , A ) -> List[str]:
super().__init__(A )
snake_case : Dict = RobertaEmbeddings(A )
self.init_weights()
@add_start_docstrings(
"""RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,
also takes care of multi-layer training. """ , UpperCamelCase__ , )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = RobertaConfig
_snake_case = """roberta"""
def __init__( self , A ) -> Optional[Any]:
super().__init__(A )
snake_case : Optional[Any] = config.num_labels
snake_case : Any = config.num_hidden_layers
snake_case : Tuple = DeeRobertaModel(A )
snake_case : Any = nn.Dropout(config.hidden_dropout_prob )
snake_case : str = nn.Linear(config.hidden_size , self.config.num_labels )
@add_start_docstrings_to_model_forward(A )
def UpperCAmelCase ( self , A=None , A=None , A=None , A=None , A=None , A=None , A=None , A=-1 , A=False , ) -> Optional[int]:
snake_case : Optional[Any] = self.num_layers
try:
snake_case : Optional[Any] = self.roberta(
A , attention_mask=A , token_type_ids=A , position_ids=A , head_mask=A , inputs_embeds=A , )
snake_case : List[str] = outputs[1]
snake_case : str = self.dropout(A )
snake_case : Union[str, Any] = self.classifier(A )
snake_case : List[Any] = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
snake_case : Optional[Any] = e.message
snake_case : int = e.exit_layer
snake_case : Union[str, Any] = outputs[0]
if not self.training:
snake_case : List[str] = entropy(A )
snake_case : Any = []
snake_case : List[Any] = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
snake_case : str = MSELoss()
snake_case : Dict = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
snake_case : Optional[int] = CrossEntropyLoss()
snake_case : Dict = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
snake_case : List[Any] = []
for highway_exit in outputs[-1]:
snake_case : List[str] = highway_exit[0]
if not self.training:
highway_logits_all.append(A )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
snake_case : Dict = MSELoss()
snake_case : str = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
snake_case : Any = CrossEntropyLoss()
snake_case : List[Any] = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(A )
if train_highway:
snake_case : List[Any] = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
snake_case : Union[str, Any] = (loss,) + outputs
if not self.training:
snake_case : Dict = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
snake_case : List[str] = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 684 |
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ) -> int:
if exponent == 1:
return base
if exponent % 2 == 0:
snake_case : Dict = _modexpt(lowercase ,exponent // 2 ,lowercase ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(lowercase ,exponent - 1 ,lowercase )) % modulo_value
def SCREAMING_SNAKE_CASE__ ( lowercase = 1777 ,lowercase = 1855 ,lowercase = 8 ) -> int:
snake_case : int = base
for _ in range(1 ,lowercase ):
snake_case : List[str] = _modexpt(lowercase ,lowercase ,10**digits )
return result
if __name__ == "__main__":
print(f"""{solution() = }""")
| 684 | 1 |
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> Union[str, Any]:
return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase=0 ) -> Optional[Any]:
return sorted(lowercase ,key=lambda lowercase : x[column] )
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase=float("""inf""" ) ) -> Any:
for i in range(points_counts - 1 ):
for j in range(i + 1 ,lowercase ):
snake_case : Any = euclidean_distance_sqr(points[i] ,points[j] )
if current_dis < min_dis:
snake_case : int = current_dis
return min_dis
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase=float("""inf""" ) ) -> Dict:
for i in range(min(6 ,points_counts - 1 ) ,lowercase ):
for j in range(max(0 ,i - 6 ) ,lowercase ):
snake_case : int = euclidean_distance_sqr(points[i] ,points[j] )
if current_dis < min_dis:
snake_case : int = current_dis
return min_dis
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ) -> List[str]:
# base case
if points_counts <= 3:
return dis_between_closest_pair(lowercase ,lowercase )
# recursion
snake_case : str = points_counts // 2
snake_case : List[Any] = closest_pair_of_points_sqr(
lowercase ,points_sorted_on_y[:mid] ,lowercase )
snake_case : str = closest_pair_of_points_sqr(
lowercase ,points_sorted_on_y[mid:] ,points_counts - mid )
snake_case : Any = min(lowercase ,lowercase )
snake_case : Tuple = []
for point in points_sorted_on_x:
if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis:
cross_strip.append(lowercase )
snake_case : Optional[int] = dis_between_closest_in_strip(
lowercase ,len(lowercase ) ,lowercase )
return min(lowercase ,lowercase )
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> int:
snake_case : Optional[int] = column_based_sort(lowercase ,column=0 )
snake_case : Union[str, Any] = column_based_sort(lowercase ,column=1 )
return (
closest_pair_of_points_sqr(
lowercase ,lowercase ,lowercase )
) ** 0.5
if __name__ == "__main__":
lowerCamelCase : Tuple = [(2, 3), (1_2, 3_0), (4_0, 5_0), (5, 1), (1_2, 1_0), (3, 4)]
print('Distance:', closest_pair_of_points(points, len(points)))
| 684 |
from itertools import product
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> list[int]:
snake_case : Tuple = sides_number
snake_case : List[str] = max_face_number * dice_number
snake_case : Any = [0] * (max_total + 1)
snake_case : int = 1
snake_case : List[str] = range(lowercase ,max_face_number + 1 )
for dice_numbers in product(lowercase ,repeat=lowercase ):
snake_case : Any = sum(lowercase )
totals_frequencies[total] += 1
return totals_frequencies
def SCREAMING_SNAKE_CASE__ ( ) -> float:
snake_case : List[str] = total_frequency_distribution(
sides_number=4 ,dice_number=9 )
snake_case : str = total_frequency_distribution(
sides_number=6 ,dice_number=6 )
snake_case : Optional[int] = 0
snake_case : List[str] = 9
snake_case : Union[str, Any] = 4 * 9
snake_case : Dict = 6
for peter_total in range(lowercase ,max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
snake_case : str = (4**9) * (6**6)
snake_case : int = peter_wins_count / total_games_number
snake_case : Optional[int] = round(lowercase ,ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(f"""{solution() = }""")
| 684 | 1 |
from pathlib import Path
import fire
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ) -> Any:
snake_case : Dict = Path(lowercase )
snake_case : Any = Path(lowercase )
dest_dir.mkdir(exist_ok=lowercase )
for path in src_dir.iterdir():
snake_case : Any = [x.rstrip() for x in list(path.open().readlines() )][:n]
snake_case : Dict = dest_dir.joinpath(path.name )
print(lowercase )
dest_path.open("""w""" ).write("""\n""".join(lowercase ) )
if __name__ == "__main__":
fire.Fire(minify)
| 684 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
| 684 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : List[Any] = logging.get_logger(__name__)
lowerCamelCase : int = {
'studio-ousia/luke-base': 'https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json',
'studio-ousia/luke-large': 'https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json',
}
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """luke"""
def __init__( self , A=5_0_2_6_7 , A=5_0_0_0_0_0 , A=7_6_8 , A=2_5_6 , A=1_2 , A=1_2 , A=3_0_7_2 , A="gelu" , A=0.1 , A=0.1 , A=5_1_2 , A=2 , A=0.02 , A=1e-1_2 , A=True , A=None , A=1 , A=0 , A=2 , **A , ) -> Optional[int]:
super().__init__(pad_token_id=A , bos_token_id=A , eos_token_id=A , **A )
snake_case : Optional[int] = vocab_size
snake_case : Optional[Any] = entity_vocab_size
snake_case : Dict = hidden_size
snake_case : List[str] = entity_emb_size
snake_case : Union[str, Any] = num_hidden_layers
snake_case : str = num_attention_heads
snake_case : Optional[int] = hidden_act
snake_case : Tuple = intermediate_size
snake_case : List[Any] = hidden_dropout_prob
snake_case : Optional[int] = attention_probs_dropout_prob
snake_case : Union[str, Any] = max_position_embeddings
snake_case : Dict = type_vocab_size
snake_case : List[Any] = initializer_range
snake_case : Optional[Any] = layer_norm_eps
snake_case : List[str] = use_entity_aware_attention
snake_case : Union[str, Any] = classifier_dropout
| 684 |
import os
def SCREAMING_SNAKE_CASE__ ( ) -> Dict:
with open(os.path.dirname(lowercase ) + """/grid.txt""" ) as f:
snake_case : Tuple = [] # noqa: E741
for _ in range(20 ):
l.append([int(lowercase ) for x in f.readline().split()] )
snake_case : Optional[Any] = 0
# right
for i in range(20 ):
for j in range(17 ):
snake_case : List[Any] = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
snake_case : Tuple = temp
# down
for i in range(17 ):
for j in range(20 ):
snake_case : Any = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
snake_case : str = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
snake_case : int = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
snake_case : int = temp
# diagonal 2
for i in range(17 ):
for j in range(3 ,20 ):
snake_case : Any = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
snake_case : Any = temp
return maximum
if __name__ == "__main__":
print(solution())
| 684 | 1 |
from collections.abc import Sequence
def SCREAMING_SNAKE_CASE__ ( lowercase = None ) -> int:
if nums is None or not nums:
raise ValueError("""Input sequence should not be empty""" )
snake_case : int = nums[0]
for i in range(1 ,len(lowercase ) ):
snake_case : int = nums[i]
snake_case : str = max(lowercase ,ans + num ,lowercase )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
lowerCamelCase : str = int(input('Enter number of elements : ').strip())
lowerCamelCase : Any = list(map(int, input('\nEnter the numbers : ').strip().split()))[:n]
print(max_subsequence_sum(array))
| 684 |
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> list:
for i in range(len(lowercase ) - 1 ,0 ,-1 ):
snake_case : Any = False
for j in range(lowercase ,0 ,-1 ):
if unsorted[j] < unsorted[j - 1]:
snake_case , snake_case : Optional[Any] = unsorted[j - 1], unsorted[j]
snake_case : Dict = True
for j in range(lowercase ):
if unsorted[j] > unsorted[j + 1]:
snake_case , snake_case : Dict = unsorted[j + 1], unsorted[j]
snake_case : Tuple = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase : Any = input('Enter numbers separated by a comma:\n').strip()
lowerCamelCase : Optional[int] = [int(item) for item in user_input.split(',')]
print(f"""{cocktail_shaker_sort(unsorted) = }""")
| 684 | 1 |
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
lowerCamelCase : int = logging.get_logger(__name__)
@add_end_docstrings(
UpperCamelCase__ , R"""
top_k (`int`, defaults to 5):
The number of predictions to return.
targets (`str` or `List[str]`, *optional*):
When passed, the model will limit the scores to the passed targets instead of looking up in the whole
vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting
token will be used (with a warning, and that might be slower).
""" , )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
def UpperCAmelCase ( self , A ) -> np.ndarray:
if self.framework == "tf":
snake_case : Union[str, Any] = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
snake_case : int = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=A )
else:
raise ValueError("""Unsupported framework""" )
return masked_index
def UpperCAmelCase ( self , A ) -> np.ndarray:
snake_case : List[Any] = self.get_masked_index(A )
snake_case : Dict = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
"""fill-mask""" , self.model.base_model_prefix , f"""No mask_token ({self.tokenizer.mask_token}) found on the input""" , )
def UpperCAmelCase ( self , A ) -> Dict:
if isinstance(A , A ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input["""input_ids"""][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(A )
def UpperCAmelCase ( self , A , A=None , **A ) -> Dict[str, GenericTensor]:
if return_tensors is None:
snake_case : Optional[int] = self.framework
snake_case : Dict = self.tokenizer(A , return_tensors=A )
self.ensure_exactly_one_mask_token(A )
return model_inputs
def UpperCAmelCase ( self , A ) -> int:
snake_case : Dict = self.model(**A )
snake_case : Tuple = model_inputs["""input_ids"""]
return model_outputs
def UpperCAmelCase ( self , A , A=5 , A=None ) -> List[Any]:
# Cap top_k if there are targets
if target_ids is not None and target_ids.shape[0] < top_k:
snake_case : Any = target_ids.shape[0]
snake_case : Dict = model_outputs["""input_ids"""][0]
snake_case : Optional[int] = model_outputs["""logits"""]
if self.framework == "tf":
snake_case : int = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
snake_case : Dict = outputs.numpy()
snake_case : Optional[Any] = outputs[0, masked_index, :]
snake_case : Optional[Any] = stable_softmax(A , axis=-1 )
if target_ids is not None:
snake_case : str = tf.gather_nd(tf.squeeze(A , 0 ) , target_ids.reshape(-1 , 1 ) )
snake_case : Any = tf.expand_dims(A , 0 )
snake_case : Tuple = tf.math.top_k(A , k=A )
snake_case , snake_case : int = topk.values.numpy(), topk.indices.numpy()
else:
snake_case : List[Any] = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=A ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
snake_case : int = outputs[0, masked_index, :]
snake_case : Dict = logits.softmax(dim=-1 )
if target_ids is not None:
snake_case : Optional[Any] = probs[..., target_ids]
snake_case , snake_case : Optional[Any] = probs.topk(A )
snake_case : List[Any] = []
snake_case : Optional[Any] = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
snake_case : str = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
snake_case : List[str] = input_ids.numpy().copy()
if target_ids is not None:
snake_case : Any = target_ids[p].tolist()
snake_case : List[Any] = p
# Filter padding out:
snake_case : List[str] = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
snake_case : Tuple = self.tokenizer.decode(A , skip_special_tokens=A )
snake_case : List[str] = {"""score""": v, """token""": p, """token_str""": self.tokenizer.decode([p] ), """sequence""": sequence}
row.append(A )
result.append(A )
if single_mask:
return result[0]
return result
def UpperCAmelCase ( self , A , A=None ) -> List[str]:
if isinstance(A , A ):
snake_case : Optional[Any] = [targets]
try:
snake_case : Union[str, Any] = self.tokenizer.get_vocab()
except Exception:
snake_case : Any = {}
snake_case : str = []
for target in targets:
snake_case : Union[str, Any] = vocab.get(A , A )
if id_ is None:
snake_case : int = self.tokenizer(
A , add_special_tokens=A , return_attention_mask=A , return_token_type_ids=A , max_length=1 , truncation=A , )["""input_ids"""]
if len(A ) == 0:
logger.warning(
f"""The specified target token `{target}` does not exist in the model vocabulary. """
"""We cannot replace it with anything meaningful, ignoring it""" )
continue
snake_case : List[str] = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
f"""The specified target token `{target}` does not exist in the model vocabulary. """
f"""Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.""" )
target_ids.append(id_ )
snake_case : Union[str, Any] = list(set(A ) )
if len(A ) == 0:
raise ValueError("""At least one target must be provided when passed.""" )
snake_case : str = np.array(A )
return target_ids
def UpperCAmelCase ( self , A=None , A=None ) -> str:
snake_case : Tuple = {}
if targets is not None:
snake_case : List[Any] = self.get_target_ids(A , A )
snake_case : Optional[Any] = target_ids
if top_k is not None:
snake_case : List[str] = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
"""fill-mask""" , self.model.base_model_prefix , """The tokenizer does not define a `mask_token`.""" )
return {}, {}, postprocess_params
def __call__( self , A , *A , **A ) -> Tuple:
snake_case : int = super().__call__(A , **A )
if isinstance(A , A ) and len(A ) == 1:
return outputs[0]
return outputs
| 684 |
import json
import os
import re
import unicodedata
from json.encoder import INFINITY
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import regex
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging
from ...utils.generic import _is_jax, _is_numpy
lowerCamelCase : Tuple = logging.get_logger(__name__)
lowerCamelCase : Any = {
'artists_file': 'artists.json',
'lyrics_file': 'lyrics.json',
'genres_file': 'genres.json',
}
lowerCamelCase : Any = {
'artists_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json',
},
'genres_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json',
},
'lyrics_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json',
},
}
lowerCamelCase : Optional[int] = {
'jukebox': 5_1_2,
}
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = VOCAB_FILES_NAMES
_snake_case = PRETRAINED_VOCAB_FILES_MAP
_snake_case = PRETRAINED_LYRIC_TOKENS_SIZES
_snake_case = ["""input_ids""", """attention_mask"""]
def __init__( self , A , A , A , A=["v3", "v2", "v2"] , A=5_1_2 , A=5 , A="<|endoftext|>" , **A , ) -> Optional[Any]:
snake_case : Dict = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else unk_token
super().__init__(
unk_token=A , n_genres=A , version=A , max_n_lyric_tokens=A , **A , )
snake_case : Optional[Any] = version
snake_case : Optional[Any] = max_n_lyric_tokens
snake_case : Tuple = n_genres
with open(A , encoding="""utf-8""" ) as vocab_handle:
snake_case : Union[str, Any] = json.load(A )
with open(A , encoding="""utf-8""" ) as vocab_handle:
snake_case : str = json.load(A )
with open(A , encoding="""utf-8""" ) as vocab_handle:
snake_case : List[str] = json.load(A )
snake_case : Tuple = r"""[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+"""
# In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters.
if len(self.lyrics_encoder ) == 7_9:
snake_case : Optional[Any] = oov.replace(r"""\-'""" , r"""\-+'""" )
snake_case : Optional[Any] = regex.compile(A )
snake_case : Optional[Any] = {v: k for k, v in self.artists_encoder.items()}
snake_case : int = {v: k for k, v in self.genres_encoder.items()}
snake_case : List[Any] = {v: k for k, v in self.lyrics_encoder.items()}
@property
def UpperCAmelCase ( self ) -> Optional[Any]:
return len(self.artists_encoder ) + len(self.genres_encoder ) + len(self.lyrics_encoder )
def UpperCAmelCase ( self ) -> str:
return dict(self.artists_encoder , self.genres_encoder , self.lyrics_encoder )
def UpperCAmelCase ( self , A , A , A ) -> Optional[Any]:
snake_case : Optional[int] = [self.artists_encoder.get(A , 0 ) for artist in list_artists]
for genres in range(len(A ) ):
snake_case : Optional[int] = [self.genres_encoder.get(A , 0 ) for genre in list_genres[genres]]
snake_case : Union[str, Any] = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres] ))
snake_case : Optional[Any] = [[self.lyrics_encoder.get(A , 0 ) for character in list_lyrics[0]], [], []]
return artists_id, list_genres, lyric_ids
def UpperCAmelCase ( self , A ) -> List[str]:
return list(A )
def UpperCAmelCase ( self , A , A , A , **A ) -> List[str]:
snake_case , snake_case , snake_case : Any = self.prepare_for_tokenization(A , A , A )
snake_case : Tuple = self._tokenize(A )
return artist, genre, lyrics
def UpperCAmelCase ( self , A , A , A , A = False ) -> Tuple[str, str, str, Dict[str, Any]]:
for idx in range(len(self.version ) ):
if self.version[idx] == "v3":
snake_case : Tuple = artists[idx].lower()
snake_case : List[Any] = [genres[idx].lower()]
else:
snake_case : Union[str, Any] = self._normalize(artists[idx] ) + """.v2"""
snake_case : Any = [
self._normalize(A ) + """.v2""" for genre in genres[idx].split("""_""" )
] # split is for the full dictionary with combined genres
if self.version[0] == "v2":
snake_case : str = regex.compile(r"""[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+""" )
snake_case : Dict = """ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+'\"()[] \t\n"""
snake_case : Union[str, Any] = {vocab[index]: index + 1 for index in range(len(A ) )}
snake_case : Optional[int] = 0
snake_case : Union[str, Any] = len(A ) + 1
snake_case : Optional[int] = self.vocab
snake_case : str = {v: k for k, v in self.vocab.items()}
snake_case : int = """"""
else:
snake_case : Optional[int] = regex.compile(r"""[^A-Za-z0-9.,:;!?\-+'\"()\[\] \t\n]+""" )
snake_case : int = self._run_strip_accents(A )
snake_case : Any = lyrics.replace("""\\""" , """\n""" )
snake_case : Tuple = self.out_of_vocab.sub("""""" , A ), [], []
return artists, genres, lyrics
def UpperCAmelCase ( self , A ) -> List[Any]:
snake_case : int = unicodedata.normalize("""NFD""" , A )
snake_case : int = []
for char in text:
snake_case : Optional[Any] = unicodedata.category(A )
if cat == "Mn":
continue
output.append(A )
return "".join(A )
def UpperCAmelCase ( self , A ) -> str:
snake_case : Dict = (
[chr(A ) for i in range(ord("""a""" ) , ord("""z""" ) + 1 )]
+ [chr(A ) for i in range(ord("""A""" ) , ord("""Z""" ) + 1 )]
+ [chr(A ) for i in range(ord("""0""" ) , ord("""9""" ) + 1 )]
+ ["""."""]
)
snake_case : Dict = frozenset(A )
snake_case : Dict = re.compile(r"""_+""" )
snake_case : str = """""".join([c if c in accepted else """_""" for c in text.lower()] )
snake_case : List[Any] = pattern.sub("""_""" , A ).strip("""_""" )
return text
def UpperCAmelCase ( self , A ) -> str:
return " ".join(A )
def UpperCAmelCase ( self , A , A = None , A = False ) -> List[Any]:
# Convert to TensorType
if not isinstance(A , A ):
snake_case : Tuple = TensorType(A )
# Get a function reference for the correct framework
if tensor_type == TensorType.TENSORFLOW:
if not is_tf_available():
raise ImportError(
"""Unable to convert output to TensorFlow tensors format, TensorFlow is not installed.""" )
import tensorflow as tf
snake_case : Union[str, Any] = tf.constant
snake_case : int = tf.is_tensor
elif tensor_type == TensorType.PYTORCH:
if not is_torch_available():
raise ImportError("""Unable to convert output to PyTorch tensors format, PyTorch is not installed.""" )
import torch
snake_case : List[str] = torch.tensor
snake_case : Optional[Any] = torch.is_tensor
elif tensor_type == TensorType.JAX:
if not is_flax_available():
raise ImportError("""Unable to convert output to JAX tensors format, JAX is not installed.""" )
import jax.numpy as jnp # noqa: F811
snake_case : Optional[int] = jnp.array
snake_case : Dict = _is_jax
else:
snake_case : List[str] = np.asarray
snake_case : Tuple = _is_numpy
# Do the tensor conversion in batch
try:
if prepend_batch_axis:
snake_case : Any = [inputs]
if not is_tensor(A ):
snake_case : List[Any] = as_tensor(A )
except: # noqa E722
raise ValueError(
"""Unable to create tensor, you should probably activate truncation and/or padding """
"""with 'padding=True' 'truncation=True' to have batched tensors with the same length.""" )
return inputs
def __call__( self , A , A , A="" , A="pt" ) -> BatchEncoding:
snake_case : List[str] = [0, 0, 0]
snake_case : List[str] = [artist] * len(self.version )
snake_case : List[Any] = [genres] * len(self.version )
snake_case , snake_case , snake_case : Optional[int] = self.tokenize(A , A , A )
snake_case , snake_case , snake_case : int = self._convert_token_to_id(A , A , A )
snake_case : Any = [-INFINITY] * len(full_tokens[-1] )
snake_case : int = [
self.convert_to_tensors(
[input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] , tensor_type=A )
for i in range(len(self.version ) )
]
return BatchEncoding({"""input_ids""": input_ids, """attention_masks""": attention_masks} )
def UpperCAmelCase ( self , A , A = None ) -> Tuple[str]:
if not os.path.isdir(A ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case : Any = os.path.join(
A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""artists_file"""] )
with open(A , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.artists_encoder , ensure_ascii=A ) )
snake_case : Any = os.path.join(
A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""genres_file"""] )
with open(A , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.genres_encoder , ensure_ascii=A ) )
snake_case : Tuple = os.path.join(
A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""lyrics_file"""] )
with open(A , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.lyrics_encoder , ensure_ascii=A ) )
return (artists_file, genres_file, lyrics_file)
def UpperCAmelCase ( self , A , A , A ) -> List[Any]:
snake_case : Optional[int] = self.artists_decoder.get(A )
snake_case : Optional[Any] = [self.genres_decoder.get(A ) for genre in genres_index]
snake_case : Optional[int] = [self.lyrics_decoder.get(A ) for character in lyric_index]
return artist, genres, lyrics
| 684 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase : Optional[int] = {'configuration_mbart': ['MBART_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MBartConfig', 'MBartOnnxConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Optional[int] = ['MBartTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Optional[Any] = ['MBartTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : int = [
'MBART_PRETRAINED_MODEL_ARCHIVE_LIST',
'MBartForCausalLM',
'MBartForConditionalGeneration',
'MBartForQuestionAnswering',
'MBartForSequenceClassification',
'MBartModel',
'MBartPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : List[Any] = [
'TFMBartForConditionalGeneration',
'TFMBartModel',
'TFMBartPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Dict = [
'FlaxMBartForConditionalGeneration',
'FlaxMBartForQuestionAnswering',
'FlaxMBartForSequenceClassification',
'FlaxMBartModel',
'FlaxMBartPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart import MBartTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart_fast import MBartTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mbart import (
MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
else:
import sys
lowerCamelCase : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 684 |
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> list:
snake_case : str = len(lowercase )
snake_case : Tuple = []
for i in range(len(lowercase ) - pat_len + 1 ):
snake_case : str = True
for j in range(lowercase ):
if s[i + j] != pattern[j]:
snake_case : Dict = False
break
if match_found:
position.append(lowercase )
return position
if __name__ == "__main__":
assert naive_pattern_search('ABCDEFG', 'DE') == [3]
print(naive_pattern_search('ABAAABCDBBABCDDEBCABC', 'ABC'))
| 684 | 1 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> int:
snake_case : Optional[Any] = len(lowercase ) // 2
# choose the middle 3 elements
snake_case : List[Any] = lst[m - 1 : m + 2]
# if middle element is peak
if three[1] > three[0] and three[1] > three[2]:
return three[1]
# if increasing, recurse on right
elif three[0] < three[2]:
if len(lst[:m] ) == 2:
m -= 1
return peak(lst[m:] )
# decreasing
else:
if len(lst[:m] ) == 2:
m += 1
return peak(lst[:m] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 684 |
import numpy as np
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> np.array:
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 684 | 1 |
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 684 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCamelCase : Tuple = {'configuration_vit_mae': ['VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMAEConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : int = [
'VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMAEForPreTraining',
'ViTMAELayer',
'ViTMAEModel',
'ViTMAEPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Dict = [
'TFViTMAEForPreTraining',
'TFViTMAEModel',
'TFViTMAEPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
lowerCamelCase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 684 | 1 |
import numpy as np
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> np.ndarray:
return np.where(vector > 0 ,lowercase ,(alpha * (np.exp(lowercase ) - 1)) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 684 |
lowerCamelCase : Union[str, Any] = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
lowerCamelCase : Tuple = [{'type': 'code', 'content': INSTALL_CONTENT}]
lowerCamelCase : Union[str, Any] = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 684 | 1 |
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowercase (UpperCamelCase__ , unittest.TestCase ):
"""simple docstring"""
_snake_case = OpenAIGPTTokenizer
_snake_case = OpenAIGPTTokenizerFast
_snake_case = True
_snake_case = False
def UpperCAmelCase ( self ) -> List[str]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
snake_case : Union[str, Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
snake_case : Union[str, Any] = dict(zip(A , range(len(A ) ) ) )
snake_case : Tuple = ["""#version: 0.2""", """l o""", """lo w""", """e r</w>""", """"""]
snake_case : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
snake_case : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" ) as fp:
fp.write(json.dumps(A ) )
with open(self.merges_file , """w""" ) as fp:
fp.write("""\n""".join(A ) )
def UpperCAmelCase ( self , A ) -> List[str]:
return "lower newer", "lower newer"
def UpperCAmelCase ( self ) -> Optional[int]:
snake_case : Optional[int] = OpenAIGPTTokenizer(self.vocab_file , self.merges_file )
snake_case : int = """lower"""
snake_case : List[str] = ["""low""", """er</w>"""]
snake_case : List[str] = tokenizer.tokenize(A )
self.assertListEqual(A , A )
snake_case : Optional[Any] = tokens + ["""<unk>"""]
snake_case : List[str] = [1_4, 1_5, 2_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , A )
def UpperCAmelCase ( self , A=1_5 ) -> Tuple:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
snake_case : str = self.rust_tokenizer_class.from_pretrained(A , **A )
# Simple input
snake_case : Dict = """This is a simple input"""
snake_case : str = ["""This is a simple input 1""", """This is a simple input 2"""]
snake_case : Any = ("""This is a simple input""", """This is a pair""")
snake_case : Optional[int] = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(A , tokenizer_r.encode , A , max_length=A , padding="""max_length""" )
# Simple input
self.assertRaises(A , tokenizer_r.encode_plus , A , max_length=A , padding="""max_length""" )
# Simple input
self.assertRaises(
A , tokenizer_r.batch_encode_plus , A , max_length=A , padding="""max_length""" , )
# Pair input
self.assertRaises(A , tokenizer_r.encode , A , max_length=A , padding="""max_length""" )
# Pair input
self.assertRaises(A , tokenizer_r.encode_plus , A , max_length=A , padding="""max_length""" )
# Pair input
self.assertRaises(
A , tokenizer_r.batch_encode_plus , A , max_length=A , padding="""max_length""" , )
def UpperCAmelCase ( self ) -> Optional[int]:
pass
@require_ftfy
@require_spacy
@require_tokenizers
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
pass
| 684 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase : Any = logging.get_logger(__name__)
lowerCamelCase : Optional[int] = {'vocab_file': 'spm_char.model'}
lowerCamelCase : List[str] = {
'vocab_file': {
'microsoft/speecht5_asr': 'https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model',
'microsoft/speecht5_tts': 'https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model',
'microsoft/speecht5_vc': 'https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model',
}
}
lowerCamelCase : List[Any] = {
'microsoft/speecht5_asr': 1_0_2_4,
'microsoft/speecht5_tts': 1_0_2_4,
'microsoft/speecht5_vc': 1_0_2_4,
}
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = VOCAB_FILES_NAMES
_snake_case = PRETRAINED_VOCAB_FILES_MAP
_snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case = ["""input_ids""", """attention_mask"""]
def __init__( self , A , A="<s>" , A="</s>" , A="<unk>" , A="<pad>" , A = None , **A , ) -> None:
snake_case : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=A , eos_token=A , unk_token=A , pad_token=A , sp_model_kwargs=self.sp_model_kwargs , **A , )
snake_case : Tuple = vocab_file
snake_case : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(A )
@property
def UpperCAmelCase ( self ) -> List[Any]:
return self.sp_model.get_piece_size()
def UpperCAmelCase ( self ) -> Optional[int]:
snake_case : Any = {self.convert_ids_to_tokens(A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> List[str]:
snake_case : Optional[Any] = self.__dict__.copy()
snake_case : Optional[Any] = None
return state
def __setstate__( self , A ) -> Tuple:
snake_case : Any = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
snake_case : List[Any] = {}
snake_case : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase ( self , A ) -> List[str]:
return self.sp_model.encode(A , out_type=A )
def UpperCAmelCase ( self , A ) -> Tuple:
return self.sp_model.piece_to_id(A )
def UpperCAmelCase ( self , A ) -> int:
snake_case : Union[str, Any] = self.sp_model.IdToPiece(A )
return token
def UpperCAmelCase ( self , A ) -> Tuple:
snake_case : Optional[int] = []
snake_case : str = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(A ) + token
snake_case : Dict = []
else:
current_sub_tokens.append(A )
out_string += self.sp_model.decode(A )
return out_string.strip()
def UpperCAmelCase ( self , A , A=None ) -> List[int]:
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def UpperCAmelCase ( self , A , A = None , A = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A , token_ids_a=A , already_has_special_tokens=A )
snake_case : Any = [1]
if token_ids_a is None:
return ([0] * len(A )) + suffix_ones
return ([0] * len(A )) + ([0] * len(A )) + suffix_ones
def UpperCAmelCase ( self , A , A = None ) -> Tuple[str]:
if not os.path.isdir(A ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case : Optional[Any] = os.path.join(
A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A )
elif not os.path.isfile(self.vocab_file ):
with open(A , """wb""" ) as fi:
snake_case : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(A )
return (out_vocab_file,)
| 684 | 1 |
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> int:
snake_case : Union[str, Any] = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def SCREAMING_SNAKE_CASE__ ( lowercase = 100 ) -> int:
snake_case : Union[str, Any] = 1
snake_case : List[Any] = 2
for i in range(2 ,max_n + 1 ):
snake_case : Optional[int] = pre_numerator
snake_case : Dict = 2 * i // 3 if i % 3 == 0 else 1
snake_case : Dict = cur_numerator
snake_case : List[str] = e_cont * pre_numerator + temp
return sum_digits(lowercase )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 684 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Any = logging.get_logger(__name__)
lowerCamelCase : Optional[int] = {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json',
}
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """gpt_neox_japanese"""
def __init__( self , A=3_2_0_0_0 , A=2_5_6_0 , A=3_2 , A=3_2 , A=4 , A="gelu" , A=1.00 , A=1_0_0_0_0 , A=2_0_4_8 , A=0.02 , A=1e-5 , A=True , A=3_1_9_9_6 , A=3_1_9_9_9 , A=0.1 , A=0.0 , **A , ) -> str:
super().__init__(bos_token_id=A , eos_token_id=A , **A )
snake_case : Optional[Any] = vocab_size
snake_case : Optional[Any] = max_position_embeddings
snake_case : Union[str, Any] = hidden_size
snake_case : Union[str, Any] = num_hidden_layers
snake_case : Optional[int] = num_attention_heads
snake_case : Optional[int] = intermediate_multiple_size
snake_case : int = hidden_act
snake_case : str = rotary_pct
snake_case : Optional[Any] = rotary_emb_base
snake_case : Any = initializer_range
snake_case : Any = layer_norm_eps
snake_case : Optional[Any] = use_cache
snake_case : Tuple = attention_dropout
snake_case : Tuple = hidden_dropout
| 684 | 1 |
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('3.8'):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase=False ) -> Union[str, Any]:
try:
snake_case : Any = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
snake_case : Any = default
else:
# KEY is set, convert it to True or False.
try:
snake_case : List[Any] = strtobool(lowercase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"""If set, {key} must be yes or no.""" )
return _value
lowerCamelCase : List[str] = parse_flag_from_env('RUN_SLOW', default=False)
lowerCamelCase : Union[str, Any] = parse_flag_from_env('RUN_REMOTE', default=False)
lowerCamelCase : str = parse_flag_from_env('RUN_LOCAL', default=True)
lowerCamelCase : int = parse_flag_from_env('RUN_PACKAGED', default=True)
# Compression
lowerCamelCase : List[str] = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='test requires lz4')
lowerCamelCase : Optional[int] = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='test requires py7zr')
lowerCamelCase : Union[str, Any] = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='test requires zstandard')
# Audio
lowerCamelCase : List[str] = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('soundfile') is None or version.parse(importlib_metadata.version('soundfile')) < version.parse('0.12.0'),
reason='test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ',
)
# Beam
lowerCamelCase : str = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('0.3.2'),
reason='test requires apache-beam and a compatible dill version',
)
# Dill-cloudpickle compatibility
lowerCamelCase : Tuple = pytest.mark.skipif(
config.DILL_VERSION <= version.parse('0.3.2'),
reason='test requires dill>0.3.2 for cloudpickle compatibility',
)
# Windows
lowerCamelCase : List[Any] = pytest.mark.skipif(
sys.platform == 'win32',
reason='test should not be run on Windows',
)
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> List[str]:
try:
import faiss # noqa
except ImportError:
snake_case : Tuple = unittest.skip("""test requires faiss""" )(lowercase )
return test_case
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> str:
try:
import regex # noqa
except ImportError:
snake_case : Tuple = unittest.skip("""test requires regex""" )(lowercase )
return test_case
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Dict:
try:
import elasticsearch # noqa
except ImportError:
snake_case : Tuple = unittest.skip("""test requires elasticsearch""" )(lowercase )
return test_case
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> List[Any]:
try:
import sqlalchemy # noqa
except ImportError:
snake_case : Tuple = unittest.skip("""test requires sqlalchemy""" )(lowercase )
return test_case
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> List[str]:
if not config.TORCH_AVAILABLE:
snake_case : Optional[Any] = unittest.skip("""test requires PyTorch""" )(lowercase )
return test_case
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> str:
if not config.TF_AVAILABLE:
snake_case : Union[str, Any] = unittest.skip("""test requires TensorFlow""" )(lowercase )
return test_case
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Dict:
if not config.JAX_AVAILABLE:
snake_case : List[str] = unittest.skip("""test requires JAX""" )(lowercase )
return test_case
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> List[Any]:
if not config.PIL_AVAILABLE:
snake_case : List[Any] = unittest.skip("""test requires Pillow""" )(lowercase )
return test_case
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Tuple:
try:
import transformers # noqa F401
except ImportError:
return unittest.skip("""test requires transformers""" )(lowercase )
else:
return test_case
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Any:
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip("""test requires tiktoken""" )(lowercase )
else:
return test_case
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> List[str]:
try:
import spacy # noqa F401
except ImportError:
return unittest.skip("""test requires spacy""" )(lowercase )
else:
return test_case
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Any:
def _require_spacy_model(lowercase ):
try:
import spacy # noqa F401
spacy.load(lowercase )
except ImportError:
return unittest.skip("""test requires spacy""" )(lowercase )
except OSError:
return unittest.skip("""test requires spacy model '{}'""".format(lowercase ) )(lowercase )
else:
return test_case
return _require_spacy_model
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Optional[int]:
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip("""test requires pyspark""" )(lowercase )
else:
return test_case
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Optional[Any]:
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip("""test requires joblibspark""" )(lowercase )
else:
return test_case
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> int:
if not _run_slow_tests or _run_slow_tests == 0:
snake_case : Optional[int] = unittest.skip("""test is slow""" )(lowercase )
return test_case
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> int:
if not _run_local_tests or _run_local_tests == 0:
snake_case : Tuple = unittest.skip("""test is local""" )(lowercase )
return test_case
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Tuple:
if not _run_packaged_tests or _run_packaged_tests == 0:
snake_case : Tuple = unittest.skip("""test is packaged""" )(lowercase )
return test_case
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> List[Any]:
if not _run_remote_tests or _run_remote_tests == 0:
snake_case : Dict = unittest.skip("""test requires remote""" )(lowercase )
return test_case
def SCREAMING_SNAKE_CASE__ ( *lowercase ) -> int:
def decorate(cls ):
for name, fn in cls.__dict__.items():
if callable(lowercase ) and name.startswith("""test""" ):
for decorator in decorators:
snake_case : Any = decorator(lowercase )
setattr(cls ,lowercase ,lowercase )
return cls
return decorate
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
pass
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = 0
_snake_case = 1
_snake_case = 2
@contextmanager
def SCREAMING_SNAKE_CASE__ ( lowercase=OfflineSimulationMode.CONNECTION_FAILS ,lowercase=1E-16 ) -> List[Any]:
snake_case : Any = requests.Session().request
def timeout_request(lowercase ,lowercase ,lowercase ,**lowercase ):
# Change the url to an invalid url so that the connection hangs
snake_case : List[Any] = """https://10.255.255.1"""
if kwargs.get("""timeout""" ) is None:
raise RequestWouldHangIndefinitelyError(
f"""Tried a call to {url} in offline mode with no timeout set. Please set a timeout.""" )
snake_case : Any = timeout
try:
return online_request(lowercase ,lowercase ,**lowercase )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
snake_case : Dict = url
snake_case : Optional[int] = e.args[0]
snake_case : List[Any] = (max_retry_error.args[0].replace("""10.255.255.1""" ,f"""OfflineMock[{url}]""" ),)
snake_case : Dict = (max_retry_error,)
raise
def raise_connection_error(lowercase ,lowercase ,**lowercase ):
raise requests.ConnectionError("""Offline mode is enabled.""" ,request=lowercase )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch("""requests.Session.send""" ,lowercase ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch("""requests.Session.request""" ,lowercase ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch("""datasets.config.HF_DATASETS_OFFLINE""" ,lowercase ):
yield
else:
raise ValueError("""Please use a value from the OfflineSimulationMode enum.""" )
@contextmanager
def SCREAMING_SNAKE_CASE__ ( *lowercase ,**lowercase ) -> Optional[Any]:
snake_case : str = str(Path().resolve() )
with tempfile.TemporaryDirectory(*lowercase ,**lowercase ) as tmp_dir:
try:
os.chdir(lowercase )
yield
finally:
os.chdir(lowercase )
@contextmanager
def SCREAMING_SNAKE_CASE__ ( ) -> Dict:
import gc
gc.collect()
snake_case : Tuple = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def SCREAMING_SNAKE_CASE__ ( ) -> Union[str, Any]:
import gc
gc.collect()
snake_case : List[str] = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> List[Any]:
return deepcopy(lowercase ).integers(0 ,100 ,10 ).tolist() == deepcopy(lowercase ).integers(0 ,100 ,10 ).tolist()
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> int:
import decorator
from requests.exceptions import HTTPError
def _wrapper(lowercase ,*lowercase ,**lowercase ):
try:
return func(*lowercase ,**lowercase )
except HTTPError as err:
if str(lowercase ).startswith("""500""" ) or str(lowercase ).startswith("""502""" ):
pytest.xfail(str(lowercase ) )
raise err
return decorator.decorator(_wrapper ,lowercase )
class __lowercase :
"""simple docstring"""
def __init__( self , A , A , A ) -> Optional[int]:
snake_case : List[Any] = returncode
snake_case : Tuple = stdout
snake_case : Dict = stderr
async def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> List[Any]:
while True:
snake_case : List[str] = await stream.readline()
if line:
callback(lowercase )
else:
break
async def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase=None ,lowercase=None ,lowercase=None ,lowercase=False ,lowercase=False ) -> _RunOutput:
if echo:
print("""\nRunning: """ ,""" """.join(lowercase ) )
snake_case : Tuple = await asyncio.create_subprocess_exec(
cmd[0] ,*cmd[1:] ,stdin=lowercase ,stdout=asyncio.subprocess.PIPE ,stderr=asyncio.subprocess.PIPE ,env=lowercase ,)
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
snake_case : str = []
snake_case : List[str] = []
def tee(lowercase ,lowercase ,lowercase ,lowercase="" ):
snake_case : List[str] = line.decode("""utf-8""" ).rstrip()
sink.append(lowercase )
if not quiet:
print(lowercase ,lowercase ,file=lowercase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout ,lambda lowercase : tee(lowercase ,lowercase ,sys.stdout ,label="""stdout:""" ) ),
_read_stream(p.stderr ,lambda lowercase : tee(lowercase ,lowercase ,sys.stderr ,label="""stderr:""" ) ),
] ,timeout=lowercase ,)
return _RunOutput(await p.wait() ,lowercase ,lowercase )
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase=None ,lowercase=None ,lowercase=180 ,lowercase=False ,lowercase=True ) -> _RunOutput:
snake_case : Dict = asyncio.get_event_loop()
snake_case : Optional[int] = loop.run_until_complete(
_stream_subprocess(lowercase ,env=lowercase ,stdin=lowercase ,timeout=lowercase ,quiet=lowercase ,echo=lowercase ) )
snake_case : Any = """ """.join(lowercase )
if result.returncode > 0:
snake_case : List[str] = """\n""".join(result.stderr )
raise RuntimeError(
f"""'{cmd_str}' failed with returncode {result.returncode}\n\n"""
f"""The combined stderr from workers follows:\n{stderr}""" )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f"""'{cmd_str}' produced no output.""" )
return result
def SCREAMING_SNAKE_CASE__ ( ) -> Tuple:
snake_case : Optional[Any] = os.environ.get("""PYTEST_XDIST_WORKER""" ,"""gw0""" )
snake_case : Tuple = re.sub(R"""^gw""" ,"""""" ,lowercase ,0 ,re.M )
return int(lowercase )
def SCREAMING_SNAKE_CASE__ ( ) -> Dict:
snake_case : Dict = 29500
snake_case : int = pytest_xdist_worker_id()
return port + uniq_delta
| 684 |
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> int:
snake_case : Optional[Any] = hex_num.strip()
if not hex_num:
raise ValueError("""No value was passed to the function""" )
snake_case : Any = hex_num[0] == """-"""
if is_negative:
snake_case : int = hex_num[1:]
try:
snake_case : List[Any] = int(lowercase ,16 )
except ValueError:
raise ValueError("""Invalid value was passed to the function""" )
snake_case : Dict = """"""
while int_num > 0:
snake_case : Dict = str(int_num % 2 ) + bin_str
int_num >>= 1
return int(("""-""" + bin_str) if is_negative else bin_str )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 684 | 1 |
from copy import deepcopy
class __lowercase :
"""simple docstring"""
def __init__( self , A = None , A = None ) -> None:
if arr is None and size is not None:
snake_case : Tuple = size
snake_case : List[Any] = [0] * size
elif arr is not None:
self.init(A )
else:
raise ValueError("""Either arr or size must be specified""" )
def UpperCAmelCase ( self , A ) -> None:
snake_case : Optional[Any] = len(A )
snake_case : str = deepcopy(A )
for i in range(1 , self.size ):
snake_case : Optional[int] = self.next_(A )
if j < self.size:
self.tree[j] += self.tree[i]
def UpperCAmelCase ( self ) -> list[int]:
snake_case : int = self.tree[:]
for i in range(self.size - 1 , 0 , -1 ):
snake_case : Tuple = self.next_(A )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def UpperCAmelCase ( A ) -> int:
return index + (index & (-index))
@staticmethod
def UpperCAmelCase ( A ) -> int:
return index - (index & (-index))
def UpperCAmelCase ( self , A , A ) -> None:
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
snake_case : int = self.next_(A )
def UpperCAmelCase ( self , A , A ) -> None:
self.add(A , value - self.get(A ) )
def UpperCAmelCase ( self , A ) -> int:
if right == 0:
return 0
snake_case : str = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
snake_case : List[str] = self.prev(A )
return result
def UpperCAmelCase ( self , A , A ) -> int:
return self.prefix(A ) - self.prefix(A )
def UpperCAmelCase ( self , A ) -> int:
return self.query(A , index + 1 )
def UpperCAmelCase ( self , A ) -> int:
value -= self.tree[0]
if value < 0:
return -1
snake_case : Union[str, Any] = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
snake_case : Union[str, Any] = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 684 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCamelCase : Optional[int] = logging.get_logger(__name__)
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""pixel_values"""]
def __init__( self , A = True , A = None , A = PIL.Image.BICUBIC , A = True , A = None , A = 1 / 2_5_5 , A = True , A = True , A = None , A = None , **A , ) -> None:
super().__init__(**A )
snake_case : int = size if size is not None else {"""height""": 2_5_6, """width""": 2_5_6}
snake_case : int = get_size_dict(A )
snake_case : Optional[Any] = crop_size if crop_size is not None else {"""height""": 2_2_4, """width""": 2_2_4}
snake_case : Dict = get_size_dict(A , param_name="""crop_size""" )
snake_case : int = do_resize
snake_case : str = size
snake_case : Tuple = resample
snake_case : Any = do_center_crop
snake_case : Tuple = crop_size
snake_case : int = do_rescale
snake_case : Dict = rescale_factor
snake_case : Union[str, Any] = do_normalize
snake_case : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
snake_case : Optional[Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCAmelCase ( self , A , A , A = PIL.Image.BICUBIC , A = None , **A , ) -> np.ndarray:
snake_case : Dict = get_size_dict(A )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" )
return resize(
A , size=(size["""height"""], size["""width"""]) , resample=A , data_format=A , **A )
def UpperCAmelCase ( self , A , A , A = None , **A , ) -> np.ndarray:
snake_case : Any = get_size_dict(A )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" )
return center_crop(A , size=(size["""height"""], size["""width"""]) , data_format=A , **A )
def UpperCAmelCase ( self , A , A , A = None , **A , ) -> Tuple:
return rescale(A , scale=A , data_format=A , **A )
def UpperCAmelCase ( self , A , A , A , A = None , **A , ) -> np.ndarray:
return normalize(A , mean=A , std=A , data_format=A , **A )
def UpperCAmelCase ( self , A , A = None , A = None , A=None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = ChannelDimension.FIRST , **A , ) -> PIL.Image.Image:
snake_case : str = do_resize if do_resize is not None else self.do_resize
snake_case : Dict = resample if resample is not None else self.resample
snake_case : Dict = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case : Tuple = do_rescale if do_rescale is not None else self.do_rescale
snake_case : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case : List[str] = do_normalize if do_normalize is not None else self.do_normalize
snake_case : int = image_mean if image_mean is not None else self.image_mean
snake_case : List[str] = image_std if image_std is not None else self.image_std
snake_case : Dict = size if size is not None else self.size
snake_case : Tuple = get_size_dict(A )
snake_case : Dict = crop_size if crop_size is not None else self.crop_size
snake_case : List[str] = get_size_dict(A , param_name="""crop_size""" )
snake_case : int = make_list_of_images(A )
if not valid_images(A ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
snake_case : Optional[Any] = [to_numpy_array(A ) for image in images]
if do_resize:
snake_case : Dict = [self.resize(image=A , size=A , resample=A ) for image in images]
if do_center_crop:
snake_case : List[str] = [self.center_crop(image=A , size=A ) for image in images]
if do_rescale:
snake_case : List[str] = [self.rescale(image=A , scale=A ) for image in images]
if do_normalize:
snake_case : str = [self.normalize(image=A , mean=A , std=A ) for image in images]
snake_case : Union[str, Any] = [to_channel_dimension_format(A , A ) for image in images]
snake_case : List[Any] = {"""pixel_values""": images}
return BatchFeature(data=A , tensor_type=A )
| 684 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase : Optional[Any] = {'configuration_beit': ['BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BeitConfig', 'BeitOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Optional[int] = ['BeitFeatureExtractor']
lowerCamelCase : Tuple = ['BeitImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Optional[int] = [
'BEIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BeitForImageClassification',
'BeitForMaskedImageModeling',
'BeitForSemanticSegmentation',
'BeitModel',
'BeitPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : str = [
'FlaxBeitForImageClassification',
'FlaxBeitForMaskedImageModeling',
'FlaxBeitModel',
'FlaxBeitPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
lowerCamelCase : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 684 |
import inspect
import unittest
class __lowercase (unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> List[Any]:
try:
import diffusers # noqa: F401
except ImportError:
assert False
def UpperCAmelCase ( self ) -> Tuple:
import diffusers
from diffusers.dependency_versions_table import deps
snake_case : List[str] = inspect.getmembers(A , inspect.isclass )
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
snake_case : Tuple = """k-diffusion"""
elif backend == "invisible_watermark":
snake_case : Optional[int] = """invisible-watermark"""
assert backend in deps, f"""{backend} is not in the deps table!"""
| 684 | 1 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing the experiment tracking capability,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCamelCase : List[Any] = 1_6
lowerCamelCase : str = 3_2
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase = 16 ) -> List[Any]:
snake_case : List[str] = AutoTokenizer.from_pretrained("""bert-base-cased""" )
snake_case : str = load_dataset("""glue""" ,"""mrpc""" )
def tokenize_function(lowercase ):
# max_length=None => use the model max length (it's actually the default)
snake_case : List[Any] = tokenizer(examples["""sentence1"""] ,examples["""sentence2"""] ,truncation=lowercase ,max_length=lowercase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
snake_case : Dict = datasets.map(
lowercase ,batched=lowercase ,remove_columns=["""idx""", """sentence1""", """sentence2"""] ,)
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
snake_case : Union[str, Any] = tokenized_datasets.rename_column("""label""" ,"""labels""" )
def collate_fn(lowercase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
snake_case : Any = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
snake_case : Dict = 16
elif accelerator.mixed_precision != "no":
snake_case : Optional[int] = 8
else:
snake_case : List[Any] = None
return tokenizer.pad(
lowercase ,padding="""longest""" ,max_length=lowercase ,pad_to_multiple_of=lowercase ,return_tensors="""pt""" ,)
# Instantiate dataloaders.
snake_case : Tuple = DataLoader(
tokenized_datasets["""train"""] ,shuffle=lowercase ,collate_fn=lowercase ,batch_size=lowercase )
snake_case : str = DataLoader(
tokenized_datasets["""validation"""] ,shuffle=lowercase ,collate_fn=lowercase ,batch_size=lowercase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowerCamelCase : Dict = mocked_dataloaders # noqa: F811
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> Dict:
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" ,lowercase ) == "1":
snake_case : str = 2
# Initialize Accelerator
# New Code #
# We pass in "all" to `log_with` to grab all available trackers in the environment
# Note: If using a custom `Tracker` class, should be passed in here such as:
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
if args.with_tracking:
snake_case : List[Any] = Accelerator(
cpu=args.cpu ,mixed_precision=args.mixed_precision ,log_with="""all""" ,project_dir=args.project_dir )
else:
snake_case : int = Accelerator(cpu=args.cpu ,mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
snake_case : List[str] = config["""lr"""]
snake_case : Any = int(config["""num_epochs"""] )
snake_case : Optional[int] = int(config["""seed"""] )
snake_case : List[Any] = int(config["""batch_size"""] )
set_seed(lowercase )
snake_case , snake_case : Union[str, Any] = get_dataloaders(lowercase ,lowercase )
snake_case : Dict = evaluate.load("""glue""" ,"""mrpc""" )
# If the batch size is too big we use gradient accumulation
snake_case : Union[str, Any] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
snake_case : Optional[int] = batch_size // MAX_GPU_BATCH_SIZE
snake_case : Any = MAX_GPU_BATCH_SIZE
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
snake_case : Optional[Any] = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" ,return_dict=lowercase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
snake_case : Tuple = model.to(accelerator.device )
# Instantiate optimizer
snake_case : str = AdamW(params=model.parameters() ,lr=lowercase )
# Instantiate scheduler
snake_case : Optional[Any] = get_linear_schedule_with_warmup(
optimizer=lowercase ,num_warmup_steps=100 ,num_training_steps=(len(lowercase ) * num_epochs) // gradient_accumulation_steps ,)
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
snake_case , snake_case , snake_case , snake_case , snake_case : Union[str, Any] = accelerator.prepare(
lowercase ,lowercase ,lowercase ,lowercase ,lowercase )
# New Code #
# We need to initialize the trackers we use. Overall configurations can also be stored
if args.with_tracking:
snake_case : str = os.path.split(lowercase )[-1].split(""".""" )[0]
accelerator.init_trackers(lowercase ,lowercase )
# Now we train the model
for epoch in range(lowercase ):
model.train()
# New Code #
# For our tracking example, we will log the total loss of each epoch
if args.with_tracking:
snake_case : Optional[Any] = 0
for step, batch in enumerate(lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
snake_case : Optional[Any] = model(**lowercase )
snake_case : str = outputs.loss
# New Code #
if args.with_tracking:
total_loss += loss.detach().float()
snake_case : Tuple = loss / gradient_accumulation_steps
accelerator.backward(lowercase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
batch.to(accelerator.device )
with torch.no_grad():
snake_case : Any = model(**lowercase )
snake_case : Optional[Any] = outputs.logits.argmax(dim=-1 )
snake_case , snake_case : Any = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=lowercase ,references=lowercase ,)
snake_case : Dict = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" ,lowercase )
# New Code #
# To actually log, we call `Accelerator.log`
# The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`
if args.with_tracking:
accelerator.log(
{
"""accuracy""": eval_metric["""accuracy"""],
"""f1""": eval_metric["""f1"""],
"""train_loss""": total_loss.item() / len(lowercase ),
"""epoch""": epoch,
} ,step=lowercase ,)
# New Code #
# When a run is finished, you should call `accelerator.end_training()`
# to close all of the open trackers
if args.with_tracking:
accelerator.end_training()
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[Any]:
snake_case : Tuple = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" ,type=lowercase ,default=lowercase ,choices=["""no""", """fp16""", """bf16""", """fp8"""] ,help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" ,)
parser.add_argument("""--cpu""" ,action="""store_true""" ,help="""If passed, will train on the CPU.""" )
parser.add_argument(
"""--with_tracking""" ,action="""store_true""" ,help="""Whether to load in all available experiment trackers from the environment and use them for logging.""" ,)
parser.add_argument(
"""--project_dir""" ,type=lowercase ,default="""logs""" ,help="""Location on where to store experiment tracking logs` and relevent project information""" ,)
snake_case : List[str] = parser.parse_args()
snake_case : Optional[Any] = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(lowercase ,lowercase )
if __name__ == "__main__":
main()
| 684 |
import contextlib
import importlib
import io
import unittest
import transformers
# Try to import everything from transformers to ensure every object can be loaded.
from transformers import * # noqa F406
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch
from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available
if is_torch_available():
from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification
if is_tf_available():
from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification
if is_flax_available():
from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification
lowerCamelCase : Union[str, Any] = DUMMY_UNKNOWN_IDENTIFIER
# An actual model hosted on huggingface.co
lowerCamelCase : List[Any] = 'main'
# Default branch name
lowerCamelCase : Tuple = 'f2c752cfc5c0ab6f4bdec59acea69eefbee381c2'
# One particular commit (not the top of `main`)
lowerCamelCase : List[Any] = 'aaaaaaa'
# This commit does not exist, so we should 404.
lowerCamelCase : List[Any] = 'd9e9f15bc825e4b2c9249e9578f884bbcb5e3684'
# Sha-1 of config.json on the top of `main`, for checking purposes
lowerCamelCase : int = '4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3'
@contextlib.contextmanager
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[int]:
print("""Welcome!""" )
yield
print("""Bye!""" )
@contextlib.contextmanager
def SCREAMING_SNAKE_CASE__ ( ) -> List[str]:
print("""Bonjour!""" )
yield
print("""Au revoir!""" )
class __lowercase (unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> int:
# If the spec is missing, importlib would not be able to import the module dynamically.
assert transformers.__spec__ is not None
assert importlib.util.find_spec("""transformers""" ) is not None
class __lowercase (unittest.TestCase ):
"""simple docstring"""
@unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO )
def UpperCAmelCase ( self , A ) -> Optional[Any]:
with ContextManagers([] ):
print("""Transformers are awesome!""" )
# The print statement adds a new line at the end of the output
self.assertEqual(mock_stdout.getvalue() , """Transformers are awesome!\n""" )
@unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO )
def UpperCAmelCase ( self , A ) -> int:
with ContextManagers([context_en()] ):
print("""Transformers are awesome!""" )
# The output should be wrapped with an English welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , """Welcome!\nTransformers are awesome!\nBye!\n""" )
@unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO )
def UpperCAmelCase ( self , A ) -> int:
with ContextManagers([context_fr(), context_en()] ):
print("""Transformers are awesome!""" )
# The output should be wrapped with an English and French welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , """Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n""" )
@require_torch
def UpperCAmelCase ( self ) -> Optional[Any]:
self.assertEqual(find_labels(A ) , ["""labels"""] )
self.assertEqual(find_labels(A ) , ["""labels""", """next_sentence_label"""] )
self.assertEqual(find_labels(A ) , ["""start_positions""", """end_positions"""] )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
pass
self.assertEqual(find_labels(A ) , ["""labels"""] )
@require_tf
def UpperCAmelCase ( self ) -> str:
self.assertEqual(find_labels(A ) , ["""labels"""] )
self.assertEqual(find_labels(A ) , ["""labels""", """next_sentence_label"""] )
self.assertEqual(find_labels(A ) , ["""start_positions""", """end_positions"""] )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
pass
self.assertEqual(find_labels(A ) , ["""labels"""] )
@require_flax
def UpperCAmelCase ( self ) -> Any:
# Flax models don't have labels
self.assertEqual(find_labels(A ) , [] )
self.assertEqual(find_labels(A ) , [] )
self.assertEqual(find_labels(A ) , [] )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
pass
self.assertEqual(find_labels(A ) , [] )
| 684 | 1 |
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> None:
snake_case , snake_case : List[str] = analyze_text(lowercase )
snake_case : Optional[int] = list(""" """ + ascii_lowercase )
# what is our total sum of probabilities.
snake_case : int = sum(single_char_strings.values() )
# one length string
snake_case : Optional[Any] = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
snake_case : Dict = single_char_strings[ch]
snake_case : Tuple = my_str / all_sum
my_fir_sum += prob * math.loga(lowercase ) # entropy formula.
# print entropy
print(f"""{round(-1 * my_fir_sum ):.1f}""" )
# two len string
snake_case : Dict = sum(two_char_strings.values() )
snake_case : List[Any] = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
snake_case : List[Any] = cha + cha
if sequence in two_char_strings:
snake_case : Tuple = two_char_strings[sequence]
snake_case : str = int(lowercase ) / all_sum
my_sec_sum += prob * math.loga(lowercase )
# print second entropy
print(f"""{round(-1 * my_sec_sum ):.1f}""" )
# print the difference between them
print(f"""{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}""" )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> tuple[dict, dict]:
snake_case : int = Counter() # type: ignore
snake_case : Union[str, Any] = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 ,len(lowercase ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def SCREAMING_SNAKE_CASE__ ( ) -> Union[str, Any]:
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 684 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
lowerCamelCase : Dict = {
'MIT/ast-finetuned-audioset-10-10-0.4593': (
'https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json'
),
}
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """audio-spectrogram-transformer"""
def __init__( self , A=7_6_8 , A=1_2 , A=1_2 , A=3_0_7_2 , A="gelu" , A=0.0 , A=0.0 , A=0.02 , A=1e-1_2 , A=1_6 , A=True , A=1_0 , A=1_0 , A=1_0_2_4 , A=1_2_8 , **A , ) -> int:
super().__init__(**A )
snake_case : Any = hidden_size
snake_case : Tuple = num_hidden_layers
snake_case : Any = num_attention_heads
snake_case : Dict = intermediate_size
snake_case : int = hidden_act
snake_case : int = hidden_dropout_prob
snake_case : Tuple = attention_probs_dropout_prob
snake_case : int = initializer_range
snake_case : int = layer_norm_eps
snake_case : Any = patch_size
snake_case : List[Any] = qkv_bias
snake_case : int = frequency_stride
snake_case : Any = time_stride
snake_case : Union[str, Any] = max_length
snake_case : Any = num_mel_bins
| 684 | 1 |
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> int:
snake_case : Optional[Any] = hex_num.strip()
if not hex_num:
raise ValueError("""No value was passed to the function""" )
snake_case : Any = hex_num[0] == """-"""
if is_negative:
snake_case : int = hex_num[1:]
try:
snake_case : List[Any] = int(lowercase ,16 )
except ValueError:
raise ValueError("""Invalid value was passed to the function""" )
snake_case : Dict = """"""
while int_num > 0:
snake_case : Dict = str(int_num % 2 ) + bin_str
int_num >>= 1
return int(("""-""" + bin_str) if is_negative else bin_str )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 684 |
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
lowerCamelCase : Any = logging.get_logger(__name__)
class __lowercase (enum.Enum ):
"""simple docstring"""
_snake_case = 0
_snake_case = 1
@add_end_docstrings(UpperCamelCase__ )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """generated"""
def __init__( self , *A , **A ) -> Optional[Any]:
super().__init__(*A , **A )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == """tf"""
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def UpperCAmelCase ( self , A=None , A=None , A=None , A=None , A=None , A=None , **A , ) -> Optional[int]:
snake_case : Tuple = {}
if truncation is not None:
snake_case : Union[str, Any] = truncation
snake_case : Dict = generate_kwargs
snake_case : int = {}
if return_tensors is not None and return_type is None:
snake_case : List[Any] = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
snake_case : List[str] = return_type
if clean_up_tokenization_spaces is not None:
snake_case : int = clean_up_tokenization_spaces
if stop_sequence is not None:
snake_case : Tuple = self.tokenizer.encode(A , add_special_tokens=A )
if len(A ) > 1:
warnings.warn(
"""Stopping on a multiple token sequence is not yet supported on transformers. The first token of"""
""" the stop sequence will be used as the stop sequence string in the interim.""" )
snake_case : List[str] = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def UpperCAmelCase ( self , A , A , A ) -> Union[str, Any]:
return True
def UpperCAmelCase ( self , *A , A ) -> Tuple:
snake_case : Union[str, Any] = self.model.config.prefix if self.model.config.prefix is not None else """"""
if isinstance(args[0] , A ):
if self.tokenizer.pad_token_id is None:
raise ValueError("""Please make sure that the tokenizer has a pad_token_id when using a batch input""" )
snake_case : Union[str, Any] = ([prefix + arg for arg in args[0]],)
snake_case : List[Any] = True
elif isinstance(args[0] , A ):
snake_case : str = (prefix + args[0],)
snake_case : str = False
else:
raise ValueError(
f""" `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`""" )
snake_case : Optional[Any] = self.tokenizer(*A , padding=A , truncation=A , return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self , *A , **A ) -> Union[str, Any]:
snake_case : Tuple = super().__call__(*A , **A )
if (
isinstance(args[0] , A )
and all(isinstance(A , A ) for el in args[0] )
and all(len(A ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def UpperCAmelCase ( self , A , A=TruncationStrategy.DO_NOT_TRUNCATE , **A ) -> str:
snake_case : Optional[Any] = self._parse_and_tokenize(A , truncation=A , **A )
return inputs
def UpperCAmelCase ( self , A , **A ) -> Tuple:
if self.framework == "pt":
snake_case , snake_case : List[str] = model_inputs["""input_ids"""].shape
elif self.framework == "tf":
snake_case , snake_case : Optional[Any] = tf.shape(model_inputs["""input_ids"""] ).numpy()
snake_case : Dict = generate_kwargs.get("""min_length""" , self.model.config.min_length )
snake_case : str = generate_kwargs.get("""max_length""" , self.model.config.max_length )
self.check_inputs(A , generate_kwargs["""min_length"""] , generate_kwargs["""max_length"""] )
snake_case : List[str] = self.model.generate(**A , **A )
snake_case : Dict = output_ids.shape[0]
if self.framework == "pt":
snake_case : List[Any] = output_ids.reshape(A , out_b // in_b , *output_ids.shape[1:] )
elif self.framework == "tf":
snake_case : Any = tf.reshape(A , (in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def UpperCAmelCase ( self , A , A=ReturnType.TEXT , A=False ) -> Union[str, Any]:
snake_case : Tuple = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
snake_case : Dict = {f"""{self.return_name}_token_ids""": output_ids}
elif return_type == ReturnType.TEXT:
snake_case : int = {
f"""{self.return_name}_text""": self.tokenizer.decode(
A , skip_special_tokens=A , clean_up_tokenization_spaces=A , )
}
records.append(A )
return records
@add_end_docstrings(UpperCamelCase__ )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """summary"""
def __call__( self , *A , **A ) -> str:
return super().__call__(*A , **A )
def UpperCAmelCase ( self , A , A , A ) -> bool:
if max_length < min_length:
logger.warning(f"""Your min_length={min_length} must be inferior than your max_length={max_length}.""" )
if input_length < max_length:
logger.warning(
f"""Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is """
"""a summarization task, where outputs shorter than the input are typically wanted, you might """
f"""consider decreasing max_length manually, e.g. summarizer('...', max_length={input_length//2})""" )
@add_end_docstrings(UpperCamelCase__ )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """translation"""
def UpperCAmelCase ( self , A , A , A ) -> Union[str, Any]:
if input_length > 0.9 * max_length:
logger.warning(
f"""Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider """
"""increasing your max_length manually, e.g. translator('...', max_length=400)""" )
return True
def UpperCAmelCase ( self , *A , A=TruncationStrategy.DO_NOT_TRUNCATE , A=None , A=None ) -> Optional[int]:
if getattr(self.tokenizer , """_build_translation_inputs""" , A ):
return self.tokenizer._build_translation_inputs(
*A , return_tensors=self.framework , truncation=A , src_lang=A , tgt_lang=A )
else:
return super()._parse_and_tokenize(*A , truncation=A )
def UpperCAmelCase ( self , A=None , A=None , **A ) -> Union[str, Any]:
snake_case , snake_case , snake_case : str = super()._sanitize_parameters(**A )
if src_lang is not None:
snake_case : Tuple = src_lang
if tgt_lang is not None:
snake_case : str = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
snake_case : Union[str, Any] = kwargs.get("""task""" , self.task )
snake_case : Any = task.split("""_""" )
if task and len(A ) == 4:
# translation, XX, to YY
snake_case : Optional[Any] = items[1]
snake_case : Dict = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self , *A , **A ) -> str:
return super().__call__(*A , **A )
| 684 | 1 |
import heapq
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> set[int]:
snake_case : list[list] = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(lowercase ,[-1 * len(lowercase ), (key, value)] )
# chosen_vertices = set of chosen vertices
snake_case : List[Any] = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
snake_case : Union[str, Any] = heapq.heappop(lowercase )[1][0]
chosen_vertices.add(lowercase )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
snake_case : Any = elem[1][1].index(lowercase )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(lowercase )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase : Dict = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(f"""Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}""")
| 684 |
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> str:
snake_case : int = []
for line in lines:
snake_case : Dict = re.sub(R"""#.*""" ,"""""" ,lowercase ) # remove comments
if line:
filtered_lines.append(lowercase )
snake_case : Optional[int] = """\n""".join(lowercase )
# Make a hash from all this code
snake_case : List[str] = full_str.encode("""utf-8""" )
return shaaaa(lowercase ).hexdigest()
# get importable module names and hash for caching
lowerCamelCase : Any = {
'csv': (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
'json': (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
'pandas': (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
'parquet': (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
'arrow': (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
'text': (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
'imagefolder': (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
'audiofolder': (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
lowerCamelCase : Optional[int] = {
'.csv': ('csv', {}),
'.tsv': ('csv', {'sep': '\t'}),
'.json': ('json', {}),
'.jsonl': ('json', {}),
'.parquet': ('parquet', {}),
'.arrow': ('arrow', {}),
'.txt': ('text', {}),
}
_EXTENSION_TO_MODULE.update({ext: ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
lowerCamelCase : Tuple = {'imagefolder', 'audiofolder'}
# Used to filter data files based on extensions given a module name
lowerCamelCase : Dict[str, List[str]] = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append('.zip')
_MODULE_TO_EXTENSIONS["audiofolder"].append('.zip')
| 684 | 1 |
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
lowerCamelCase : Optional[Any] = 'CompVis/stable-diffusion-v1-1'
lowerCamelCase : Optional[Any] = 'CompVis/stable-diffusion-v1-2'
lowerCamelCase : Any = 'CompVis/stable-diffusion-v1-3'
lowerCamelCase : List[str] = 'CompVis/stable-diffusion-v1-4'
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
def __init__( self , A , A , A , A , A , A , A , A = True , ) -> Any:
super()._init_()
snake_case : Tuple = StableDiffusionPipeline.from_pretrained(A )
snake_case : Optional[Any] = StableDiffusionPipeline.from_pretrained(A )
snake_case : List[Any] = StableDiffusionPipeline.from_pretrained(A )
snake_case : Dict = StableDiffusionPipeline(
vae=A , text_encoder=A , tokenizer=A , unet=A , scheduler=A , safety_checker=A , feature_extractor=A , requires_safety_checker=A , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def UpperCAmelCase ( self ) -> Dict[str, Any]:
return {k: getattr(self , A ) for k in self.config.keys() if not k.startswith("""_""" )}
def UpperCAmelCase ( self , A = "auto" ) -> Any:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
snake_case : Tuple = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(A )
def UpperCAmelCase ( self ) -> Any:
self.enable_attention_slicing(A )
@torch.no_grad()
def UpperCAmelCase ( self , A , A = 5_1_2 , A = 5_1_2 , A = 5_0 , A = 7.5 , A = None , A = 1 , A = 0.0 , A = None , A = None , A = "pil" , A = True , A = None , A = 1 , **A , ) -> int:
return self.pipea(
prompt=A , height=A , width=A , num_inference_steps=A , guidance_scale=A , negative_prompt=A , num_images_per_prompt=A , eta=A , generator=A , latents=A , output_type=A , return_dict=A , callback=A , callback_steps=A , **A , )
@torch.no_grad()
def UpperCAmelCase ( self , A , A = 5_1_2 , A = 5_1_2 , A = 5_0 , A = 7.5 , A = None , A = 1 , A = 0.0 , A = None , A = None , A = "pil" , A = True , A = None , A = 1 , **A , ) -> Dict:
return self.pipea(
prompt=A , height=A , width=A , num_inference_steps=A , guidance_scale=A , negative_prompt=A , num_images_per_prompt=A , eta=A , generator=A , latents=A , output_type=A , return_dict=A , callback=A , callback_steps=A , **A , )
@torch.no_grad()
def UpperCAmelCase ( self , A , A = 5_1_2 , A = 5_1_2 , A = 5_0 , A = 7.5 , A = None , A = 1 , A = 0.0 , A = None , A = None , A = "pil" , A = True , A = None , A = 1 , **A , ) -> List[str]:
return self.pipea(
prompt=A , height=A , width=A , num_inference_steps=A , guidance_scale=A , negative_prompt=A , num_images_per_prompt=A , eta=A , generator=A , latents=A , output_type=A , return_dict=A , callback=A , callback_steps=A , **A , )
@torch.no_grad()
def UpperCAmelCase ( self , A , A = 5_1_2 , A = 5_1_2 , A = 5_0 , A = 7.5 , A = None , A = 1 , A = 0.0 , A = None , A = None , A = "pil" , A = True , A = None , A = 1 , **A , ) -> Dict:
return self.pipea(
prompt=A , height=A , width=A , num_inference_steps=A , guidance_scale=A , negative_prompt=A , num_images_per_prompt=A , eta=A , generator=A , latents=A , output_type=A , return_dict=A , callback=A , callback_steps=A , **A , )
@torch.no_grad()
def UpperCAmelCase ( self , A , A = 5_1_2 , A = 5_1_2 , A = 5_0 , A = 7.5 , A = None , A = 1 , A = 0.0 , A = None , A = None , A = "pil" , A = True , A = None , A = 1 , **A , ) -> int:
snake_case : List[Any] = """cuda""" if torch.cuda.is_available() else """cpu"""
self.to(A )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"""`height` and `width` must be divisible by 8 but are {height} and {width}.""" )
# Get first result from Stable Diffusion Checkpoint v1.1
snake_case : List[Any] = self.textaimg_sda_a(
prompt=A , height=A , width=A , num_inference_steps=A , guidance_scale=A , negative_prompt=A , num_images_per_prompt=A , eta=A , generator=A , latents=A , output_type=A , return_dict=A , callback=A , callback_steps=A , **A , )
# Get first result from Stable Diffusion Checkpoint v1.2
snake_case : List[Any] = self.textaimg_sda_a(
prompt=A , height=A , width=A , num_inference_steps=A , guidance_scale=A , negative_prompt=A , num_images_per_prompt=A , eta=A , generator=A , latents=A , output_type=A , return_dict=A , callback=A , callback_steps=A , **A , )
# Get first result from Stable Diffusion Checkpoint v1.3
snake_case : Dict = self.textaimg_sda_a(
prompt=A , height=A , width=A , num_inference_steps=A , guidance_scale=A , negative_prompt=A , num_images_per_prompt=A , eta=A , generator=A , latents=A , output_type=A , return_dict=A , callback=A , callback_steps=A , **A , )
# Get first result from Stable Diffusion Checkpoint v1.4
snake_case : List[str] = self.textaimg_sda_a(
prompt=A , height=A , width=A , num_inference_steps=A , guidance_scale=A , negative_prompt=A , num_images_per_prompt=A , eta=A , generator=A , latents=A , output_type=A , return_dict=A , callback=A , callback_steps=A , **A , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 684 |
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ) -> Tuple:
# Initialise PyTorch model
snake_case : int = RemBertConfig.from_json_file(lowercase )
print("""Building PyTorch model from configuration: {}""".format(str(lowercase ) ) )
snake_case : Tuple = RemBertModel(lowercase )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(lowercase ,lowercase ,lowercase )
# Save pytorch-model
print("""Save PyTorch model to {}""".format(lowercase ) )
torch.save(model.state_dict() ,lowercase )
if __name__ == "__main__":
lowerCamelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--rembert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained RemBERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowerCamelCase : Dict = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 684 | 1 |
from ..utils import DummyObject, requires_backends
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Tuple:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> List[str]:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> int:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Any:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Union[str, Any]:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Tuple:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Optional[int]:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Union[str, Any]:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Union[str, Any]:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> str:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> List[Any]:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Optional[Any]:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Union[str, Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> int:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Union[str, Any]:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> List[Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Tuple:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Optional[Any]:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Optional[Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> str:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Any:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Tuple:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> int:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Dict:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Dict:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Optional[int]:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Optional[Any]:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> str:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> List[Any]:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Optional[int]:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Optional[Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Tuple:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Tuple:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> List[Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> str:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> str:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Tuple:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Dict:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Union[str, Any]:
requires_backends(cls , ["""flax"""] )
| 684 |
from ..utils import DummyObject, requires_backends
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Tuple:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> List[str]:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> int:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Any:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Union[str, Any]:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Tuple:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Optional[int]:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Union[str, Any]:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Union[str, Any]:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> str:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> List[Any]:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Optional[Any]:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Union[str, Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> int:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Union[str, Any]:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> List[Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Tuple:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Optional[Any]:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Optional[Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> str:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Any:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Tuple:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> int:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Dict:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Dict:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Optional[int]:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Optional[Any]:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> str:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> List[Any]:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Optional[int]:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Optional[Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Tuple:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Tuple:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> List[Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> str:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> str:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Tuple:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Dict:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Union[str, Any]:
requires_backends(cls , ["""flax"""] )
| 684 | 1 |
from ..utils import DummyObject, requires_backends
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""torch"""]
def __init__( self , *A , **A ) -> Any:
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> List[str]:
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Union[str, Any]:
requires_backends(cls , ["""torch"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""torch"""]
def __init__( self , *A , **A ) -> Optional[int]:
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Optional[Any]:
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Union[str, Any]:
requires_backends(cls , ["""torch"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""torch"""]
def __init__( self , *A , **A ) -> Union[str, Any]:
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Any:
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Dict:
requires_backends(cls , ["""torch"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""torch"""]
def __init__( self , *A , **A ) -> Optional[Any]:
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Optional[Any]:
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> List[Any]:
requires_backends(cls , ["""torch"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""torch"""]
def __init__( self , *A , **A ) -> Dict:
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Optional[Any]:
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Any:
requires_backends(cls , ["""torch"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""torch"""]
def __init__( self , *A , **A ) -> int:
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> int:
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Tuple:
requires_backends(cls , ["""torch"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""torch"""]
def __init__( self , *A , **A ) -> Any:
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Any:
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Union[str, Any]:
requires_backends(cls , ["""torch"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""torch"""]
def __init__( self , *A , **A ) -> Any:
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> int:
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> str:
requires_backends(cls , ["""torch"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""torch"""]
def __init__( self , *A , **A ) -> List[Any]:
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Optional[Any]:
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Tuple:
requires_backends(cls , ["""torch"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""torch"""]
def __init__( self , *A , **A ) -> str:
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Union[str, Any]:
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> List[str]:
requires_backends(cls , ["""torch"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""torch"""]
def __init__( self , *A , **A ) -> Union[str, Any]:
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> str:
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Any:
requires_backends(cls , ["""torch"""] )
def SCREAMING_SNAKE_CASE__ ( *lowercase ,**lowercase ) -> Any:
requires_backends(lowercase ,["""torch"""] )
def SCREAMING_SNAKE_CASE__ ( *lowercase ,**lowercase ) -> List[str]:
requires_backends(lowercase ,["""torch"""] )
def SCREAMING_SNAKE_CASE__ ( *lowercase ,**lowercase ) -> Any:
requires_backends(lowercase ,["""torch"""] )
def SCREAMING_SNAKE_CASE__ ( *lowercase ,**lowercase ) -> Any:
requires_backends(lowercase ,["""torch"""] )
def SCREAMING_SNAKE_CASE__ ( *lowercase ,**lowercase ) -> Optional[int]:
requires_backends(lowercase ,["""torch"""] )
def SCREAMING_SNAKE_CASE__ ( *lowercase ,**lowercase ) -> Dict:
requires_backends(lowercase ,["""torch"""] )
def SCREAMING_SNAKE_CASE__ ( *lowercase ,**lowercase ) -> Any:
requires_backends(lowercase ,["""torch"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""torch"""]
def __init__( self , *A , **A ) -> Optional[int]:
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> List[str]:
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Optional[int]:
requires_backends(cls , ["""torch"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""torch"""]
def __init__( self , *A , **A ) -> Optional[Any]:
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Optional[int]:
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Any:
requires_backends(cls , ["""torch"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""torch"""]
def __init__( self , *A , **A ) -> str:
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> List[str]:
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> List[str]:
requires_backends(cls , ["""torch"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""torch"""]
def __init__( self , *A , **A ) -> List[str]:
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Union[str, Any]:
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> List[str]:
requires_backends(cls , ["""torch"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""torch"""]
def __init__( self , *A , **A ) -> List[str]:
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Optional[Any]:
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Optional[Any]:
requires_backends(cls , ["""torch"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""torch"""]
def __init__( self , *A , **A ) -> Tuple:
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Tuple:
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Optional[Any]:
requires_backends(cls , ["""torch"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""torch"""]
def __init__( self , *A , **A ) -> Any:
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> int:
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Optional[int]:
requires_backends(cls , ["""torch"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""torch"""]
def __init__( self , *A , **A ) -> Dict:
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> int:
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Optional[Any]:
requires_backends(cls , ["""torch"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""torch"""]
def __init__( self , *A , **A ) -> Any:
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Optional[Any]:
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> str:
requires_backends(cls , ["""torch"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""torch"""]
def __init__( self , *A , **A ) -> Dict:
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> List[str]:
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Tuple:
requires_backends(cls , ["""torch"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""torch"""]
def __init__( self , *A , **A ) -> Union[str, Any]:
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Optional[int]:
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Optional[Any]:
requires_backends(cls , ["""torch"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""torch"""]
def __init__( self , *A , **A ) -> Union[str, Any]:
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Union[str, Any]:
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> List[Any]:
requires_backends(cls , ["""torch"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""torch"""]
def __init__( self , *A , **A ) -> int:
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> str:
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Optional[Any]:
requires_backends(cls , ["""torch"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""torch"""]
def __init__( self , *A , **A ) -> Any:
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> int:
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> int:
requires_backends(cls , ["""torch"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""torch"""]
def __init__( self , *A , **A ) -> str:
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Any:
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Union[str, Any]:
requires_backends(cls , ["""torch"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""torch"""]
def __init__( self , *A , **A ) -> List[Any]:
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> List[Any]:
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Optional[int]:
requires_backends(cls , ["""torch"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""torch"""]
def __init__( self , *A , **A ) -> Optional[int]:
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> str:
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Union[str, Any]:
requires_backends(cls , ["""torch"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""torch"""]
def __init__( self , *A , **A ) -> Optional[Any]:
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Optional[int]:
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Any:
requires_backends(cls , ["""torch"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""torch"""]
def __init__( self , *A , **A ) -> Any:
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Dict:
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Union[str, Any]:
requires_backends(cls , ["""torch"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""torch"""]
def __init__( self , *A , **A ) -> Union[str, Any]:
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Tuple:
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Dict:
requires_backends(cls , ["""torch"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""torch"""]
def __init__( self , *A , **A ) -> int:
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Optional[int]:
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Union[str, Any]:
requires_backends(cls , ["""torch"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""torch"""]
def __init__( self , *A , **A ) -> Optional[int]:
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Union[str, Any]:
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Dict:
requires_backends(cls , ["""torch"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""torch"""]
def __init__( self , *A , **A ) -> Dict:
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Tuple:
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Optional[Any]:
requires_backends(cls , ["""torch"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""torch"""]
def __init__( self , *A , **A ) -> Tuple:
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Any:
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Any:
requires_backends(cls , ["""torch"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""torch"""]
def __init__( self , *A , **A ) -> List[Any]:
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> str:
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Dict:
requires_backends(cls , ["""torch"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""torch"""]
def __init__( self , *A , **A ) -> int:
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Tuple:
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Optional[int]:
requires_backends(cls , ["""torch"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""torch"""]
def __init__( self , *A , **A ) -> int:
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Dict:
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Optional[Any]:
requires_backends(cls , ["""torch"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""torch"""]
def __init__( self , *A , **A ) -> Optional[Any]:
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> int:
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Union[str, Any]:
requires_backends(cls , ["""torch"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""torch"""]
def __init__( self , *A , **A ) -> int:
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Union[str, Any]:
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Tuple:
requires_backends(cls , ["""torch"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""torch"""]
def __init__( self , *A , **A ) -> Tuple:
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> str:
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Optional[Any]:
requires_backends(cls , ["""torch"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""torch"""]
def __init__( self , *A , **A ) -> Tuple:
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Optional[int]:
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Tuple:
requires_backends(cls , ["""torch"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""torch"""]
def __init__( self , *A , **A ) -> Any:
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Optional[Any]:
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> List[Any]:
requires_backends(cls , ["""torch"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""torch"""]
def __init__( self , *A , **A ) -> int:
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> List[Any]:
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Dict:
requires_backends(cls , ["""torch"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""torch"""]
def __init__( self , *A , **A ) -> int:
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Tuple:
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> List[str]:
requires_backends(cls , ["""torch"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""torch"""]
def __init__( self , *A , **A ) -> Tuple:
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> str:
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> List[Any]:
requires_backends(cls , ["""torch"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""torch"""]
def __init__( self , *A , **A ) -> List[str]:
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Optional[Any]:
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> List[Any]:
requires_backends(cls , ["""torch"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""torch"""]
def __init__( self , *A , **A ) -> List[str]:
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Any:
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> int:
requires_backends(cls , ["""torch"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""torch"""]
def __init__( self , *A , **A ) -> List[str]:
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Dict:
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> List[Any]:
requires_backends(cls , ["""torch"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""torch"""]
def __init__( self , *A , **A ) -> Tuple:
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Optional[Any]:
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> int:
requires_backends(cls , ["""torch"""] )
| 684 |
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
lowerCamelCase : List[str] = 3
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> int:
print("""Generating primitive root of p""" )
while True:
snake_case : Optional[int] = random.randrange(3 ,lowercase )
if pow(lowercase ,2 ,lowercase ) == 1:
continue
if pow(lowercase ,lowercase ,lowercase ) == 1:
continue
return g
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> tuple[tuple[int, int, int, int], tuple[int, int]]:
print("""Generating prime p...""" )
snake_case : Optional[int] = rabin_miller.generate_large_prime(lowercase ) # select large prime number.
snake_case : Optional[int] = primitive_root(lowercase ) # one primitive root on modulo p.
snake_case : Optional[Any] = random.randrange(3 ,lowercase ) # private_key -> have to be greater than 2 for safety.
snake_case : Tuple = cryptomath.find_mod_inverse(pow(lowercase ,lowercase ,lowercase ) ,lowercase )
snake_case : str = (key_size, e_a, e_a, p)
snake_case : Optional[Any] = (key_size, d)
return public_key, private_key
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> None:
if os.path.exists(f"""{name}_pubkey.txt""" ) or os.path.exists(f"""{name}_privkey.txt""" ):
print("""\nWARNING:""" )
print(
f"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"""
"""Use a different name or delete these files and re-run this program.""" )
sys.exit()
snake_case , snake_case : Optional[Any] = generate_key(lowercase )
print(f"""\nWriting public key to file {name}_pubkey.txt...""" )
with open(f"""{name}_pubkey.txt""" ,"""w""" ) as fo:
fo.write(f"""{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}""" )
print(f"""Writing private key to file {name}_privkey.txt...""" )
with open(f"""{name}_privkey.txt""" ,"""w""" ) as fo:
fo.write(f"""{private_key[0]},{private_key[1]}""" )
def SCREAMING_SNAKE_CASE__ ( ) -> None:
print("""Making key files...""" )
make_key_files("""elgamal""" ,2048 )
print("""Key files generation successful""" )
if __name__ == "__main__":
main()
| 684 | 1 |
import string
import numpy
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> int:
return b if a == 0 else greatest_common_divisor(b % a ,lowercase )
class __lowercase :
"""simple docstring"""
_snake_case = string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
_snake_case = numpy.vectorize(lambda UpperCamelCase__ : x % 36 )
_snake_case = numpy.vectorize(UpperCamelCase__ )
def __init__( self , A ) -> None:
snake_case : Any = self.modulus(A ) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
snake_case : str = encrypt_key.shape[0]
def UpperCAmelCase ( self , A ) -> int:
return self.key_string.index(A )
def UpperCAmelCase ( self , A ) -> str:
return self.key_string[round(A )]
def UpperCAmelCase ( self ) -> None:
snake_case : Optional[int] = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
snake_case : int = det % len(self.key_string )
snake_case : Optional[Any] = len(self.key_string )
if greatest_common_divisor(A , len(self.key_string ) ) != 1:
snake_case : Dict = (
f"""determinant modular {req_l} of encryption key({det}) """
f"""is not co prime w.r.t {req_l}.\nTry another key."""
)
raise ValueError(A )
def UpperCAmelCase ( self , A ) -> str:
snake_case : Optional[Any] = [char for char in text.upper() if char in self.key_string]
snake_case : Optional[int] = chars[-1]
while len(A ) % self.break_key != 0:
chars.append(A )
return "".join(A )
def UpperCAmelCase ( self , A ) -> str:
snake_case : List[str] = self.process_text(text.upper() )
snake_case : str = """"""
for i in range(0 , len(A ) - self.break_key + 1 , self.break_key ):
snake_case : Union[str, Any] = text[i : i + self.break_key]
snake_case : int = [self.replace_letters(A ) for char in batch]
snake_case : Optional[int] = numpy.array([vec] ).T
snake_case : str = self.modulus(self.encrypt_key.dot(A ) ).T.tolist()[
0
]
snake_case : Optional[int] = """""".join(
self.replace_digits(A ) for num in batch_encrypted )
encrypted += encrypted_batch
return encrypted
def UpperCAmelCase ( self ) -> numpy.ndarray:
snake_case : Optional[Any] = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
snake_case : int = det % len(self.key_string )
snake_case : str = None
for i in range(len(self.key_string ) ):
if (det * i) % len(self.key_string ) == 1:
snake_case : Dict = i
break
snake_case : List[str] = (
det_inv
* numpy.linalg.det(self.encrypt_key )
* numpy.linalg.inv(self.encrypt_key )
)
return self.to_int(self.modulus(A ) )
def UpperCAmelCase ( self , A ) -> str:
snake_case : Optional[Any] = self.make_decrypt_key()
snake_case : int = self.process_text(text.upper() )
snake_case : Optional[Any] = """"""
for i in range(0 , len(A ) - self.break_key + 1 , self.break_key ):
snake_case : str = text[i : i + self.break_key]
snake_case : Any = [self.replace_letters(A ) for char in batch]
snake_case : Optional[Any] = numpy.array([vec] ).T
snake_case : Optional[Any] = self.modulus(decrypt_key.dot(A ) ).T.tolist()[0]
snake_case : List[Any] = """""".join(
self.replace_digits(A ) for num in batch_decrypted )
decrypted += decrypted_batch
return decrypted
def SCREAMING_SNAKE_CASE__ ( ) -> None:
snake_case : Optional[Any] = int(input("""Enter the order of the encryption key: """ ) )
snake_case : Optional[Any] = []
print("""Enter each row of the encryption key with space separated integers""" )
for _ in range(lowercase ):
snake_case : str = [int(lowercase ) for x in input().split()]
hill_matrix.append(lowercase )
snake_case : Tuple = HillCipher(numpy.array(lowercase ) )
print("""Would you like to encrypt or decrypt some text? (1 or 2)""" )
snake_case : Tuple = input("""\n1. Encrypt\n2. Decrypt\n""" )
if option == "1":
snake_case : List[Any] = input("""What text would you like to encrypt?: """ )
print("""Your encrypted text is:""" )
print(hc.encrypt(lowercase ) )
elif option == "2":
snake_case : Optional[int] = input("""What text would you like to decrypt?: """ )
print("""Your decrypted text is:""" )
print(hc.decrypt(lowercase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 684 |
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ) -> int:
if exponent == 1:
return base
if exponent % 2 == 0:
snake_case : Dict = _modexpt(lowercase ,exponent // 2 ,lowercase ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(lowercase ,exponent - 1 ,lowercase )) % modulo_value
def SCREAMING_SNAKE_CASE__ ( lowercase = 1777 ,lowercase = 1855 ,lowercase = 8 ) -> int:
snake_case : int = base
for _ in range(1 ,lowercase ):
snake_case : List[str] = _modexpt(lowercase ,lowercase ,10**digits )
return result
if __name__ == "__main__":
print(f"""{solution() = }""")
| 684 | 1 |
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> Any:
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(lowercase ,int(b / 2 ) ) * actual_power(lowercase ,int(b / 2 ) )
else:
return a * actual_power(lowercase ,int(b / 2 ) ) * actual_power(lowercase ,int(b / 2 ) )
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> float:
if b < 0:
return 1 / actual_power(lowercase ,lowercase )
return actual_power(lowercase ,lowercase )
if __name__ == "__main__":
print(power(-2, -3))
| 684 |
from itertools import product
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> list[int]:
snake_case : Tuple = sides_number
snake_case : List[str] = max_face_number * dice_number
snake_case : Any = [0] * (max_total + 1)
snake_case : int = 1
snake_case : List[str] = range(lowercase ,max_face_number + 1 )
for dice_numbers in product(lowercase ,repeat=lowercase ):
snake_case : Any = sum(lowercase )
totals_frequencies[total] += 1
return totals_frequencies
def SCREAMING_SNAKE_CASE__ ( ) -> float:
snake_case : List[str] = total_frequency_distribution(
sides_number=4 ,dice_number=9 )
snake_case : str = total_frequency_distribution(
sides_number=6 ,dice_number=6 )
snake_case : Optional[int] = 0
snake_case : List[str] = 9
snake_case : Union[str, Any] = 4 * 9
snake_case : Dict = 6
for peter_total in range(lowercase ,max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
snake_case : str = (4**9) * (6**6)
snake_case : int = peter_wins_count / total_games_number
snake_case : Optional[int] = round(lowercase ,ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(f"""{solution() = }""")
| 684 | 1 |
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Optional[Any]: # picklable for multiprocessing
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def SCREAMING_SNAKE_CASE__ ( ) -> Union[str, Any]:
with parallel_backend("""spark""" ):
assert ParallelBackendConfig.backend_name == "spark"
snake_case : Optional[Any] = [1, 2, 3]
with pytest.raises(lowercase ):
with parallel_backend("""unsupported backend""" ):
map_nested(lowercase ,lowercase ,num_proc=2 )
with pytest.raises(lowercase ):
with parallel_backend("""unsupported backend""" ):
map_nested(lowercase ,lowercase ,num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize("""num_proc""" ,[2, -1] )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> str:
snake_case : Any = [1, 2]
snake_case : List[Any] = {"""a""": 1, """b""": 2}
snake_case : Tuple = {"""a""": [1, 2], """b""": [3, 4]}
snake_case : Optional[Any] = {"""a""": {"""1""": 1}, """b""": 2}
snake_case : Union[str, Any] = {"""a""": 1, """b""": 2, """c""": 3, """d""": 4}
snake_case : int = [2, 3]
snake_case : Union[str, Any] = {"""a""": 2, """b""": 3}
snake_case : Union[str, Any] = {"""a""": [2, 3], """b""": [4, 5]}
snake_case : Optional[Any] = {"""a""": {"""1""": 2}, """b""": 3}
snake_case : List[Any] = {"""a""": 2, """b""": 3, """c""": 4, """d""": 5}
with parallel_backend("""spark""" ):
assert map_nested(lowercase ,lowercase ,num_proc=lowercase ) == expected_map_nested_sa
assert map_nested(lowercase ,lowercase ,num_proc=lowercase ) == expected_map_nested_sa
assert map_nested(lowercase ,lowercase ,num_proc=lowercase ) == expected_map_nested_sa
assert map_nested(lowercase ,lowercase ,num_proc=lowercase ) == expected_map_nested_sa
assert map_nested(lowercase ,lowercase ,num_proc=lowercase ) == expected_map_nested_sa
| 684 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
| 684 | 1 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
| 684 |
import os
def SCREAMING_SNAKE_CASE__ ( ) -> Dict:
with open(os.path.dirname(lowercase ) + """/grid.txt""" ) as f:
snake_case : Tuple = [] # noqa: E741
for _ in range(20 ):
l.append([int(lowercase ) for x in f.readline().split()] )
snake_case : Optional[Any] = 0
# right
for i in range(20 ):
for j in range(17 ):
snake_case : List[Any] = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
snake_case : Tuple = temp
# down
for i in range(17 ):
for j in range(20 ):
snake_case : Any = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
snake_case : str = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
snake_case : int = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
snake_case : int = temp
# diagonal 2
for i in range(17 ):
for j in range(3 ,20 ):
snake_case : Any = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
snake_case : Any = temp
return maximum
if __name__ == "__main__":
print(solution())
| 684 | 1 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinvaConfig, SwinvaForImageClassification
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Dict:
snake_case : str = SwinvaConfig()
snake_case : List[str] = swinva_name.split("""_""" )
snake_case : Optional[Any] = name_split[1]
if "to" in name_split[3]:
snake_case : Optional[Any] = int(name_split[3][-3:] )
else:
snake_case : List[Any] = int(name_split[3] )
if "to" in name_split[2]:
snake_case : Any = int(name_split[2][-2:] )
else:
snake_case : int = int(name_split[2][6:] )
if model_size == "tiny":
snake_case : int = 96
snake_case : Optional[int] = (2, 2, 6, 2)
snake_case : str = (3, 6, 12, 24)
elif model_size == "small":
snake_case : Optional[int] = 96
snake_case : Optional[Any] = (2, 2, 18, 2)
snake_case : Union[str, Any] = (3, 6, 12, 24)
elif model_size == "base":
snake_case : str = 128
snake_case : Optional[int] = (2, 2, 18, 2)
snake_case : Dict = (4, 8, 16, 32)
else:
snake_case : Dict = 192
snake_case : List[str] = (2, 2, 18, 2)
snake_case : Optional[int] = (6, 12, 24, 48)
if "to" in swinva_name:
snake_case : int = (12, 12, 12, 6)
if ("22k" in swinva_name) and ("to" not in swinva_name):
snake_case : List[str] = 21841
snake_case : List[Any] = """huggingface/label-files"""
snake_case : Any = """imagenet-22k-id2label.json"""
snake_case : Any = json.load(open(hf_hub_download(lowercase ,lowercase ,repo_type="""dataset""" ) ,"""r""" ) )
snake_case : Union[str, Any] = {int(lowercase ): v for k, v in idalabel.items()}
snake_case : Tuple = idalabel
snake_case : Optional[Any] = {v: k for k, v in idalabel.items()}
else:
snake_case : List[Any] = 1000
snake_case : Optional[int] = """huggingface/label-files"""
snake_case : Dict = """imagenet-1k-id2label.json"""
snake_case : List[str] = json.load(open(hf_hub_download(lowercase ,lowercase ,repo_type="""dataset""" ) ,"""r""" ) )
snake_case : str = {int(lowercase ): v for k, v in idalabel.items()}
snake_case : Optional[Any] = idalabel
snake_case : Union[str, Any] = {v: k for k, v in idalabel.items()}
snake_case : Tuple = img_size
snake_case : Any = num_classes
snake_case : List[Any] = embed_dim
snake_case : int = depths
snake_case : List[Any] = num_heads
snake_case : Any = window_size
return config
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> str:
if "patch_embed.proj" in name:
snake_case : Any = name.replace("""patch_embed.proj""" ,"""embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
snake_case : Dict = name.replace("""patch_embed.norm""" ,"""embeddings.norm""" )
if "layers" in name:
snake_case : Dict = """encoder.""" + name
if "attn.proj" in name:
snake_case : Dict = name.replace("""attn.proj""" ,"""attention.output.dense""" )
if "attn" in name:
snake_case : List[Any] = name.replace("""attn""" ,"""attention.self""" )
if "norm1" in name:
snake_case : Optional[Any] = name.replace("""norm1""" ,"""layernorm_before""" )
if "norm2" in name:
snake_case : List[str] = name.replace("""norm2""" ,"""layernorm_after""" )
if "mlp.fc1" in name:
snake_case : int = name.replace("""mlp.fc1""" ,"""intermediate.dense""" )
if "mlp.fc2" in name:
snake_case : Dict = name.replace("""mlp.fc2""" ,"""output.dense""" )
if "q_bias" in name:
snake_case : str = name.replace("""q_bias""" ,"""query.bias""" )
if "k_bias" in name:
snake_case : Union[str, Any] = name.replace("""k_bias""" ,"""key.bias""" )
if "v_bias" in name:
snake_case : Tuple = name.replace("""v_bias""" ,"""value.bias""" )
if "cpb_mlp" in name:
snake_case : Tuple = name.replace("""cpb_mlp""" ,"""continuous_position_bias_mlp""" )
if name == "norm.weight":
snake_case : int = """layernorm.weight"""
if name == "norm.bias":
snake_case : Tuple = """layernorm.bias"""
if "head" in name:
snake_case : Any = name.replace("""head""" ,"""classifier""" )
else:
snake_case : List[Any] = """swinv2.""" + name
return name
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> Tuple:
for key in orig_state_dict.copy().keys():
snake_case : List[str] = orig_state_dict.pop(lowercase )
if "mask" in key:
continue
elif "qkv" in key:
snake_case : Dict = key.split(""".""" )
snake_case : List[Any] = int(key_split[1] )
snake_case : List[str] = int(key_split[3] )
snake_case : Optional[Any] = model.swinva.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
snake_case : List[Any] = val[:dim, :]
snake_case : Union[str, Any] = val[dim : dim * 2, :]
snake_case : Any = val[-dim:, :]
else:
snake_case : List[str] = val[:dim]
snake_case : Union[str, Any] = val[
dim : dim * 2
]
snake_case : List[str] = val[-dim:]
else:
snake_case : Optional[Any] = val
return orig_state_dict
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> Dict:
snake_case : Tuple = timm.create_model(lowercase ,pretrained=lowercase )
timm_model.eval()
snake_case : Tuple = get_swinva_config(lowercase )
snake_case : Optional[int] = SwinvaForImageClassification(lowercase )
model.eval()
snake_case : Optional[Any] = convert_state_dict(timm_model.state_dict() ,lowercase )
model.load_state_dict(lowercase )
snake_case : Union[str, Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
snake_case : str = AutoImageProcessor.from_pretrained("""microsoft/{}""".format(swinva_name.replace("""_""" ,"""-""" ) ) )
snake_case : int = Image.open(requests.get(lowercase ,stream=lowercase ).raw )
snake_case : str = image_processor(images=lowercase ,return_tensors="""pt""" )
snake_case : Dict = timm_model(inputs["""pixel_values"""] )
snake_case : int = model(**lowercase ).logits
assert torch.allclose(lowercase ,lowercase ,atol=1E-3 )
print(f"""Saving model {swinva_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowercase )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(lowercase )
model.push_to_hub(
repo_path_or_name=Path(lowercase ,lowercase ) ,organization="""nandwalritik""" ,commit_message="""Add model""" ,)
if __name__ == "__main__":
lowerCamelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--swinv2_name',
default='swinv2_tiny_patch4_window8_256',
type=str,
help='Name of the Swinv2 timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
lowerCamelCase : List[str] = parser.parse_args()
convert_swinva_checkpoint(args.swinva_name, args.pytorch_dump_folder_path)
| 684 |
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> list:
for i in range(len(lowercase ) - 1 ,0 ,-1 ):
snake_case : Any = False
for j in range(lowercase ,0 ,-1 ):
if unsorted[j] < unsorted[j - 1]:
snake_case , snake_case : Optional[Any] = unsorted[j - 1], unsorted[j]
snake_case : Dict = True
for j in range(lowercase ):
if unsorted[j] > unsorted[j + 1]:
snake_case , snake_case : Dict = unsorted[j + 1], unsorted[j]
snake_case : Tuple = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase : Any = input('Enter numbers separated by a comma:\n').strip()
lowerCamelCase : Optional[int] = [int(item) for item in user_input.split(',')]
print(f"""{cocktail_shaker_sort(unsorted) = }""")
| 684 | 1 |
import warnings
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""input_ids""", """attention_mask"""]
def __init__( self , A="</s>" , A="<unk>" , A="<pad>" , A=1_2_5 , A=None , **A , ) -> None:
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
snake_case : str = [f"""<extra_id_{i}>""" for i in range(A )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
snake_case : int = len(set(filter(lambda A : bool("""extra_id""" in str(A ) ) , A ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f"""Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"""
""" provided to ByT5Tokenizer. In this case the additional_special_tokens must include the"""
""" extra_ids tokens""" )
snake_case : str = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else pad_token
snake_case : Tuple = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else eos_token
snake_case : int = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else unk_token
super().__init__(
eos_token=A , unk_token=A , pad_token=A , extra_ids=A , additional_special_tokens=A , **A , )
snake_case : Union[str, Any] = extra_ids
snake_case : Tuple = 2**8 # utf is 8 bits
# define special tokens dict
snake_case : Dict[int, str] = {
self.pad_token: 0,
self.eos_token: 1,
self.unk_token: 2,
}
snake_case : str = len(self.special_tokens_encoder )
snake_case : Any = len(A )
for i, token in enumerate(A ):
snake_case : Optional[int] = self.vocab_size + i - n
snake_case : Dict[str, int] = {v: k for k, v in self.special_tokens_encoder.items()}
@property
def UpperCAmelCase ( self ) -> Union[str, Any]:
return self._utf_vocab_size + self._num_special_tokens + self._extra_ids
def UpperCAmelCase ( self , A , A = None , A = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A , token_ids_a=A , already_has_special_tokens=A )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(A )) + [1]
return ([0] * len(A )) + [1] + ([0] * len(A )) + [1]
def UpperCAmelCase ( self , A ) -> List[int]:
if len(A ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f"""This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated"""
""" eos tokens being added.""" )
return token_ids
else:
return token_ids + [self.eos_token_id]
def UpperCAmelCase ( self , A , A = None ) -> List[int]:
snake_case : Any = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def UpperCAmelCase ( self , A , A = None ) -> List[int]:
snake_case : Dict = self._add_eos_if_not_present(A )
if token_ids_a is None:
return token_ids_a
else:
snake_case : Optional[int] = self._add_eos_if_not_present(A )
return token_ids_a + token_ids_a
def UpperCAmelCase ( self , A ) -> List[str]:
snake_case : Tuple = [chr(A ) for i in text.encode("""utf-8""" )]
return tokens
def UpperCAmelCase ( self , A ) -> Dict:
if token in self.special_tokens_encoder:
snake_case : str = self.special_tokens_encoder[token]
elif token in self.added_tokens_encoder:
snake_case : Union[str, Any] = self.added_tokens_encoder[token]
elif len(A ) != 1:
snake_case : List[str] = self.unk_token_id
else:
snake_case : Union[str, Any] = ord(A ) + self._num_special_tokens
return token_id
def UpperCAmelCase ( self , A ) -> Optional[Any]:
if index in self.special_tokens_decoder:
snake_case : str = self.special_tokens_decoder[index]
else:
snake_case : List[Any] = chr(index - self._num_special_tokens )
return token
def UpperCAmelCase ( self , A ) -> Tuple:
snake_case : Optional[Any] = B""""""
for token in tokens:
if token in self.special_tokens_decoder:
snake_case : int = self.special_tokens_decoder[token].encode("""utf-8""" )
elif token in self.added_tokens_decoder:
snake_case : List[str] = self.special_tokens_decoder[token].encode("""utf-8""" )
elif token in self.special_tokens_encoder:
snake_case : List[Any] = token.encode("""utf-8""" )
elif token in self.added_tokens_encoder:
snake_case : List[Any] = token.encode("""utf-8""" )
else:
snake_case : Tuple = bytes([ord(A )] )
bstring += tok_string
snake_case : Optional[Any] = bstring.decode("""utf-8""" , errors="""ignore""" )
return string
def UpperCAmelCase ( self , A , A = None ) -> Tuple[str]:
return ()
| 684 |
import json
import os
import re
import unicodedata
from json.encoder import INFINITY
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import regex
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging
from ...utils.generic import _is_jax, _is_numpy
lowerCamelCase : Tuple = logging.get_logger(__name__)
lowerCamelCase : Any = {
'artists_file': 'artists.json',
'lyrics_file': 'lyrics.json',
'genres_file': 'genres.json',
}
lowerCamelCase : Any = {
'artists_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json',
},
'genres_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json',
},
'lyrics_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json',
},
}
lowerCamelCase : Optional[int] = {
'jukebox': 5_1_2,
}
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = VOCAB_FILES_NAMES
_snake_case = PRETRAINED_VOCAB_FILES_MAP
_snake_case = PRETRAINED_LYRIC_TOKENS_SIZES
_snake_case = ["""input_ids""", """attention_mask"""]
def __init__( self , A , A , A , A=["v3", "v2", "v2"] , A=5_1_2 , A=5 , A="<|endoftext|>" , **A , ) -> Optional[Any]:
snake_case : Dict = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else unk_token
super().__init__(
unk_token=A , n_genres=A , version=A , max_n_lyric_tokens=A , **A , )
snake_case : Optional[Any] = version
snake_case : Optional[Any] = max_n_lyric_tokens
snake_case : Tuple = n_genres
with open(A , encoding="""utf-8""" ) as vocab_handle:
snake_case : Union[str, Any] = json.load(A )
with open(A , encoding="""utf-8""" ) as vocab_handle:
snake_case : str = json.load(A )
with open(A , encoding="""utf-8""" ) as vocab_handle:
snake_case : List[str] = json.load(A )
snake_case : Tuple = r"""[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+"""
# In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters.
if len(self.lyrics_encoder ) == 7_9:
snake_case : Optional[Any] = oov.replace(r"""\-'""" , r"""\-+'""" )
snake_case : Optional[Any] = regex.compile(A )
snake_case : Optional[Any] = {v: k for k, v in self.artists_encoder.items()}
snake_case : int = {v: k for k, v in self.genres_encoder.items()}
snake_case : List[Any] = {v: k for k, v in self.lyrics_encoder.items()}
@property
def UpperCAmelCase ( self ) -> Optional[Any]:
return len(self.artists_encoder ) + len(self.genres_encoder ) + len(self.lyrics_encoder )
def UpperCAmelCase ( self ) -> str:
return dict(self.artists_encoder , self.genres_encoder , self.lyrics_encoder )
def UpperCAmelCase ( self , A , A , A ) -> Optional[Any]:
snake_case : Optional[int] = [self.artists_encoder.get(A , 0 ) for artist in list_artists]
for genres in range(len(A ) ):
snake_case : Optional[int] = [self.genres_encoder.get(A , 0 ) for genre in list_genres[genres]]
snake_case : Union[str, Any] = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres] ))
snake_case : Optional[Any] = [[self.lyrics_encoder.get(A , 0 ) for character in list_lyrics[0]], [], []]
return artists_id, list_genres, lyric_ids
def UpperCAmelCase ( self , A ) -> List[str]:
return list(A )
def UpperCAmelCase ( self , A , A , A , **A ) -> List[str]:
snake_case , snake_case , snake_case : Any = self.prepare_for_tokenization(A , A , A )
snake_case : Tuple = self._tokenize(A )
return artist, genre, lyrics
def UpperCAmelCase ( self , A , A , A , A = False ) -> Tuple[str, str, str, Dict[str, Any]]:
for idx in range(len(self.version ) ):
if self.version[idx] == "v3":
snake_case : Tuple = artists[idx].lower()
snake_case : List[Any] = [genres[idx].lower()]
else:
snake_case : Union[str, Any] = self._normalize(artists[idx] ) + """.v2"""
snake_case : Any = [
self._normalize(A ) + """.v2""" for genre in genres[idx].split("""_""" )
] # split is for the full dictionary with combined genres
if self.version[0] == "v2":
snake_case : str = regex.compile(r"""[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+""" )
snake_case : Dict = """ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+'\"()[] \t\n"""
snake_case : Union[str, Any] = {vocab[index]: index + 1 for index in range(len(A ) )}
snake_case : Optional[int] = 0
snake_case : Union[str, Any] = len(A ) + 1
snake_case : Optional[int] = self.vocab
snake_case : str = {v: k for k, v in self.vocab.items()}
snake_case : int = """"""
else:
snake_case : Optional[int] = regex.compile(r"""[^A-Za-z0-9.,:;!?\-+'\"()\[\] \t\n]+""" )
snake_case : int = self._run_strip_accents(A )
snake_case : Any = lyrics.replace("""\\""" , """\n""" )
snake_case : Tuple = self.out_of_vocab.sub("""""" , A ), [], []
return artists, genres, lyrics
def UpperCAmelCase ( self , A ) -> List[Any]:
snake_case : int = unicodedata.normalize("""NFD""" , A )
snake_case : int = []
for char in text:
snake_case : Optional[Any] = unicodedata.category(A )
if cat == "Mn":
continue
output.append(A )
return "".join(A )
def UpperCAmelCase ( self , A ) -> str:
snake_case : Dict = (
[chr(A ) for i in range(ord("""a""" ) , ord("""z""" ) + 1 )]
+ [chr(A ) for i in range(ord("""A""" ) , ord("""Z""" ) + 1 )]
+ [chr(A ) for i in range(ord("""0""" ) , ord("""9""" ) + 1 )]
+ ["""."""]
)
snake_case : Dict = frozenset(A )
snake_case : Dict = re.compile(r"""_+""" )
snake_case : str = """""".join([c if c in accepted else """_""" for c in text.lower()] )
snake_case : List[Any] = pattern.sub("""_""" , A ).strip("""_""" )
return text
def UpperCAmelCase ( self , A ) -> str:
return " ".join(A )
def UpperCAmelCase ( self , A , A = None , A = False ) -> List[Any]:
# Convert to TensorType
if not isinstance(A , A ):
snake_case : Tuple = TensorType(A )
# Get a function reference for the correct framework
if tensor_type == TensorType.TENSORFLOW:
if not is_tf_available():
raise ImportError(
"""Unable to convert output to TensorFlow tensors format, TensorFlow is not installed.""" )
import tensorflow as tf
snake_case : Union[str, Any] = tf.constant
snake_case : int = tf.is_tensor
elif tensor_type == TensorType.PYTORCH:
if not is_torch_available():
raise ImportError("""Unable to convert output to PyTorch tensors format, PyTorch is not installed.""" )
import torch
snake_case : List[str] = torch.tensor
snake_case : Optional[Any] = torch.is_tensor
elif tensor_type == TensorType.JAX:
if not is_flax_available():
raise ImportError("""Unable to convert output to JAX tensors format, JAX is not installed.""" )
import jax.numpy as jnp # noqa: F811
snake_case : Optional[int] = jnp.array
snake_case : Dict = _is_jax
else:
snake_case : List[str] = np.asarray
snake_case : Tuple = _is_numpy
# Do the tensor conversion in batch
try:
if prepend_batch_axis:
snake_case : Any = [inputs]
if not is_tensor(A ):
snake_case : List[Any] = as_tensor(A )
except: # noqa E722
raise ValueError(
"""Unable to create tensor, you should probably activate truncation and/or padding """
"""with 'padding=True' 'truncation=True' to have batched tensors with the same length.""" )
return inputs
def __call__( self , A , A , A="" , A="pt" ) -> BatchEncoding:
snake_case : List[str] = [0, 0, 0]
snake_case : List[str] = [artist] * len(self.version )
snake_case : List[Any] = [genres] * len(self.version )
snake_case , snake_case , snake_case : Optional[int] = self.tokenize(A , A , A )
snake_case , snake_case , snake_case : int = self._convert_token_to_id(A , A , A )
snake_case : Any = [-INFINITY] * len(full_tokens[-1] )
snake_case : int = [
self.convert_to_tensors(
[input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] , tensor_type=A )
for i in range(len(self.version ) )
]
return BatchEncoding({"""input_ids""": input_ids, """attention_masks""": attention_masks} )
def UpperCAmelCase ( self , A , A = None ) -> Tuple[str]:
if not os.path.isdir(A ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case : Any = os.path.join(
A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""artists_file"""] )
with open(A , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.artists_encoder , ensure_ascii=A ) )
snake_case : Any = os.path.join(
A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""genres_file"""] )
with open(A , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.genres_encoder , ensure_ascii=A ) )
snake_case : Tuple = os.path.join(
A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""lyrics_file"""] )
with open(A , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.lyrics_encoder , ensure_ascii=A ) )
return (artists_file, genres_file, lyrics_file)
def UpperCAmelCase ( self , A , A , A ) -> List[Any]:
snake_case : Optional[int] = self.artists_decoder.get(A )
snake_case : Optional[Any] = [self.genres_decoder.get(A ) for genre in genres_index]
snake_case : Optional[int] = [self.lyrics_decoder.get(A ) for character in lyric_index]
return artist, genres, lyrics
| 684 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase : int = logging.get_logger(__name__)
lowerCamelCase : List[Any] = {
'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json',
'YituTech/conv-bert-medium-small': (
'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json'
),
'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json',
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """convbert"""
def __init__( self , A=3_0_5_2_2 , A=7_6_8 , A=1_2 , A=1_2 , A=3_0_7_2 , A="gelu" , A=0.1 , A=0.1 , A=5_1_2 , A=2 , A=0.02 , A=1e-1_2 , A=1 , A=0 , A=2 , A=7_6_8 , A=2 , A=9 , A=1 , A=None , **A , ) -> List[str]:
super().__init__(
pad_token_id=A , bos_token_id=A , eos_token_id=A , **A , )
snake_case : Tuple = vocab_size
snake_case : Tuple = hidden_size
snake_case : Tuple = num_hidden_layers
snake_case : int = num_attention_heads
snake_case : List[Any] = intermediate_size
snake_case : List[Any] = hidden_act
snake_case : Any = hidden_dropout_prob
snake_case : Any = attention_probs_dropout_prob
snake_case : int = max_position_embeddings
snake_case : Dict = type_vocab_size
snake_case : int = initializer_range
snake_case : List[str] = layer_norm_eps
snake_case : Dict = embedding_size
snake_case : Any = head_ratio
snake_case : Optional[int] = conv_kernel_size
snake_case : Tuple = num_groups
snake_case : Optional[Any] = classifier_dropout
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
@property
def UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
snake_case : List[Any] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
snake_case : List[Any] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 684 |
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> list:
snake_case : str = len(lowercase )
snake_case : Tuple = []
for i in range(len(lowercase ) - pat_len + 1 ):
snake_case : str = True
for j in range(lowercase ):
if s[i + j] != pattern[j]:
snake_case : Dict = False
break
if match_found:
position.append(lowercase )
return position
if __name__ == "__main__":
assert naive_pattern_search('ABCDEFG', 'DE') == [3]
print(naive_pattern_search('ABAAABCDBBABCDDEBCABC', 'ABC'))
| 684 | 1 |
lowerCamelCase : List[Any] = 2_5_6
# Modulus to hash a string
lowerCamelCase : Tuple = 1_0_0_0_0_0_3
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> bool:
snake_case : Optional[int] = len(lowercase )
snake_case : Optional[Any] = len(lowercase )
if p_len > t_len:
return False
snake_case : List[str] = 0
snake_case : Optional[int] = 0
snake_case : List[str] = 1
# Calculating the hash of pattern and substring of text
for i in range(lowercase ):
snake_case : Any = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
snake_case : str = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
snake_case : Optional[int] = (modulus_power * alphabet_size) % modulus
for i in range(0 ,t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
snake_case : Any = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def SCREAMING_SNAKE_CASE__ ( ) -> None:
snake_case : int = """abc1abc12"""
snake_case : Tuple = """alskfjaldsabc1abc1abc12k23adsfabcabc"""
snake_case : Dict = """alskfjaldsk23adsfabcabc"""
assert rabin_karp(lowercase ,lowercase ) and not rabin_karp(lowercase ,lowercase )
# Test 2)
snake_case : int = """ABABX"""
snake_case : Tuple = """ABABZABABYABABX"""
assert rabin_karp(lowercase ,lowercase )
# Test 3)
snake_case : Optional[int] = """AAAB"""
snake_case : Union[str, Any] = """ABAAAAAB"""
assert rabin_karp(lowercase ,lowercase )
# Test 4)
snake_case : Union[str, Any] = """abcdabcy"""
snake_case : Optional[int] = """abcxabcdabxabcdabcdabcy"""
assert rabin_karp(lowercase ,lowercase )
# Test 5)
snake_case : Tuple = """Lü"""
snake_case : Tuple = """Lüsai"""
assert rabin_karp(lowercase ,lowercase )
snake_case : Optional[Any] = """Lue"""
assert not rabin_karp(lowercase ,lowercase )
print("""Success.""" )
if __name__ == "__main__":
test_rabin_karp()
| 684 |
import numpy as np
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> np.array:
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 684 | 1 |
from argparse import ArgumentParser
from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
lowerCamelCase : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Union[str, Any]:
if not path:
return "pipe"
for ext in PipelineDataFormat.SUPPORTED_FORMATS:
if path.endswith(lowercase ):
return ext
raise Exception(
f"""Unable to determine file format from file extension {path}. """
f"""Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}""" )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> int:
snake_case : Dict = pipeline(
task=args.task ,model=args.model if args.model else None ,config=args.config ,tokenizer=args.tokenizer ,device=args.device ,)
snake_case : List[str] = try_infer_format_from_ext(args.input ) if args.format == """infer""" else args.format
snake_case : List[str] = PipelineDataFormat.from_str(
format=lowercase ,output_path=args.output ,input_path=args.input ,column=args.column if args.column else nlp.default_input_names ,overwrite=args.overwrite ,)
return RunCommand(lowercase ,lowercase )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
def __init__( self , A , A ) -> int:
snake_case : List[str] = nlp
snake_case : Union[str, Any] = reader
@staticmethod
def UpperCAmelCase ( A ) -> Union[str, Any]:
snake_case : str = parser.add_parser("""run""" , help="""Run a pipeline through the CLI""" )
run_parser.add_argument("""--task""" , choices=get_supported_tasks() , help="""Task to run""" )
run_parser.add_argument("""--input""" , type=A , help="""Path to the file to use for inference""" )
run_parser.add_argument("""--output""" , type=A , help="""Path to the file that will be used post to write results.""" )
run_parser.add_argument("""--model""" , type=A , help="""Name or path to the model to instantiate.""" )
run_parser.add_argument("""--config""" , type=A , help="""Name or path to the model's config to instantiate.""" )
run_parser.add_argument(
"""--tokenizer""" , type=A , help="""Name of the tokenizer to use. (default: same as the model name)""" )
run_parser.add_argument(
"""--column""" , type=A , help="""Name of the column to use as input. (For multi columns input as QA use column1,columns2)""" , )
run_parser.add_argument(
"""--format""" , type=A , default="""infer""" , choices=PipelineDataFormat.SUPPORTED_FORMATS , help="""Input format to read from""" , )
run_parser.add_argument(
"""--device""" , type=A , default=-1 , help="""Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)""" , )
run_parser.add_argument("""--overwrite""" , action="""store_true""" , help="""Allow overwriting the output file.""" )
run_parser.set_defaults(func=A )
def UpperCAmelCase ( self ) -> Tuple:
snake_case , snake_case : int = self._nlp, []
for entry in self._reader:
snake_case : Union[str, Any] = nlp(**A ) if self._reader.is_multi_columns else nlp(A )
if isinstance(A , A ):
outputs.append(A )
else:
outputs += output
# Saving data
if self._nlp.binary_output:
snake_case : List[str] = self._reader.save_binary(A )
logger.warning(f"""Current pipeline requires output to be in binary format, saving at {binary_path}""" )
else:
self._reader.save(A )
| 684 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCamelCase : Tuple = {'configuration_vit_mae': ['VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMAEConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : int = [
'VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMAEForPreTraining',
'ViTMAELayer',
'ViTMAEModel',
'ViTMAEPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Dict = [
'TFViTMAEForPreTraining',
'TFViTMAEModel',
'TFViTMAEPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
lowerCamelCase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 684 | 1 |
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class __lowercase :
"""simple docstring"""
def __init__( self , A , A=1_3 , A=7 , A=True , A=True , A=False , A=True , A=9_9 , A=3_2 , A=5 , A=4 , A=3_7 , A="gelu" , A=0.1 , A=0.1 , A=5_1_2 , A=1_6 , A=2 , A=0.02 , A=3 , A=4 , A=None , ) -> Optional[int]:
snake_case : Tuple = parent
snake_case : str = batch_size
snake_case : Optional[Any] = seq_length
snake_case : Union[str, Any] = is_training
snake_case : Optional[Any] = use_input_mask
snake_case : int = use_token_type_ids
snake_case : List[str] = use_labels
snake_case : Optional[int] = vocab_size
snake_case : List[str] = hidden_size
snake_case : Optional[int] = num_hidden_layers
snake_case : int = num_attention_heads
snake_case : Optional[int] = intermediate_size
snake_case : str = hidden_act
snake_case : List[str] = hidden_dropout_prob
snake_case : Optional[Any] = attention_probs_dropout_prob
snake_case : Optional[Any] = max_position_embeddings
snake_case : Optional[int] = type_vocab_size
snake_case : str = type_sequence_label_size
snake_case : Tuple = initializer_range
snake_case : Optional[int] = num_labels
snake_case : Union[str, Any] = num_choices
snake_case : List[Any] = scope
def UpperCAmelCase ( self ) -> List[str]:
snake_case : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case : List[str] = None
if self.use_input_mask:
snake_case : Any = random_attention_mask([self.batch_size, self.seq_length] )
snake_case : List[Any] = None
if self.use_token_type_ids:
snake_case : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case : Union[str, Any] = None
snake_case : Dict = None
snake_case : str = None
if self.use_labels:
snake_case : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
snake_case : Optional[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase ( self ) -> Any:
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A , initializer_range=self.initializer_range , )
def UpperCAmelCase ( self , A , A , A , A , A , A , A ) -> Tuple:
snake_case : Tuple = BioGptModel(config=A )
model.to(A )
model.eval()
snake_case : Optional[int] = model(A , attention_mask=A )
snake_case : Optional[Any] = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self , A , A , A , A , A , A , A , A , A , ) -> Optional[int]:
snake_case : Optional[int] = BioGptForCausalLM(config=A )
model.to(A )
model.eval()
snake_case : Union[str, Any] = model(A , attention_mask=A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase ( self , A , A , A , A , A , *A ) -> int:
snake_case : Tuple = BioGptModel(config=A )
model.to(A )
model.eval()
# create attention mask
snake_case : List[Any] = torch.ones(input_ids.shape , dtype=torch.long , device=A )
snake_case : Tuple = self.seq_length // 2
snake_case : Dict = 0
# first forward pass
snake_case , snake_case : List[Any] = model(A , attention_mask=A ).to_tuple()
# create hypothetical next token and extent to next_input_ids
snake_case : Optional[int] = ids_tensor((self.batch_size, 1) , config.vocab_size )
# change a random masked slice from input_ids
snake_case : str = ids_tensor((1,) , A ).item() + 1
snake_case : Union[str, Any] = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 )
snake_case : Optional[int] = random_other_next_tokens
# append to next input_ids and attn_mask
snake_case : List[str] = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case : Any = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=A )] , dim=1 , )
# get two different outputs
snake_case : int = model(A , attention_mask=A )["""last_hidden_state"""]
snake_case : Optional[int] = model(A , past_key_values=A , attention_mask=A )["""last_hidden_state"""]
# select random slice
snake_case : Optional[int] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case : List[str] = output_from_no_past[:, -1, random_slice_idx].detach()
snake_case : Tuple = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(A , A , atol=1e-3 ) )
def UpperCAmelCase ( self , A , A , A , A , A , *A ) -> Optional[Any]:
snake_case : str = BioGptModel(config=A ).to(A ).eval()
snake_case : Optional[Any] = torch.ones(input_ids.shape , dtype=torch.long , device=A )
# first forward pass
snake_case : Union[str, Any] = model(A , attention_mask=A , use_cache=A )
snake_case , snake_case : List[str] = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
snake_case : Any = ids_tensor((self.batch_size, 3) , config.vocab_size )
snake_case : Optional[int] = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
snake_case : List[str] = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case : Any = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
snake_case : Any = model(A , attention_mask=A )["""last_hidden_state"""]
snake_case : Tuple = model(A , attention_mask=A , past_key_values=A )[
"""last_hidden_state"""
]
# select random slice
snake_case : Optional[int] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case : List[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
snake_case : Optional[int] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(A , A , atol=1e-3 ) )
def UpperCAmelCase ( self , A , A , A , A , A , *A , A=False ) -> str:
snake_case : Tuple = BioGptForCausalLM(A )
model.to(A )
if gradient_checkpointing:
model.gradient_checkpointing_enable()
snake_case : Union[str, Any] = model(A , labels=A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
result.loss.backward()
def UpperCAmelCase ( self , A , *A ) -> Any:
snake_case : Any = BioGptModel(A )
snake_case : Optional[int] = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers )
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.0_01 )
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.01 )
def UpperCAmelCase ( self , A , A , A , A , A , *A ) -> str:
snake_case : List[str] = self.num_labels
snake_case : int = BioGptForTokenClassification(A )
model.to(A )
model.eval()
snake_case : Any = model(A , attention_mask=A , token_type_ids=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase ( self ) -> int:
snake_case : List[str] = self.prepare_config_and_inputs()
(
(
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) ,
) : int = config_and_inputs
snake_case : Dict = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __lowercase (UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
"""simple docstring"""
_snake_case = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
_snake_case = (BioGptForCausalLM,) if is_torch_available() else ()
_snake_case = (
{
"""feature-extraction""": BioGptModel,
"""text-classification""": BioGptForSequenceClassification,
"""text-generation""": BioGptForCausalLM,
"""token-classification""": BioGptForTokenClassification,
"""zero-shot""": BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
_snake_case = False
def UpperCAmelCase ( self ) -> List[str]:
snake_case : Optional[int] = BioGptModelTester(self )
snake_case : Dict = ConfigTester(self , config_class=A , hidden_size=3_7 )
def UpperCAmelCase ( self ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def UpperCAmelCase ( self ) -> Optional[Any]:
snake_case : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def UpperCAmelCase ( self ) -> int:
snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
snake_case : str = type
self.model_tester.create_and_check_model(*A )
def UpperCAmelCase ( self ) -> str:
snake_case : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*A )
def UpperCAmelCase ( self ) -> List[Any]:
snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*A , gradient_checkpointing=A )
def UpperCAmelCase ( self ) -> Union[str, Any]:
snake_case : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*A )
def UpperCAmelCase ( self ) -> Any:
snake_case : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*A )
def UpperCAmelCase ( self ) -> Optional[Any]:
snake_case : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*A )
@slow
def UpperCAmelCase ( self ) -> Any:
snake_case : Any = BioGptForCausalLM.from_pretrained("""microsoft/biogpt""" )
model.to(A )
snake_case : List[str] = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" )
snake_case : Dict = """left"""
# Define PAD Token = EOS Token = 50256
snake_case : Union[str, Any] = tokenizer.eos_token
snake_case : Any = model.config.eos_token_id
# use different length sentences to test batching
snake_case : List[str] = [
"""Hello, my dog is a little""",
"""Today, I""",
]
snake_case : Tuple = tokenizer(A , return_tensors="""pt""" , padding=A )
snake_case : Any = inputs["""input_ids"""].to(A )
snake_case : int = model.generate(
input_ids=A , attention_mask=inputs["""attention_mask"""].to(A ) , )
snake_case : Any = tokenizer(sentences[0] , return_tensors="""pt""" ).input_ids.to(A )
snake_case : List[str] = model.generate(input_ids=A )
snake_case : List[str] = inputs_non_padded.shape[-1] - inputs["""attention_mask"""][-1].long().sum().cpu().item()
snake_case : Any = tokenizer(sentences[1] , return_tensors="""pt""" ).input_ids.to(A )
snake_case : Union[str, Any] = model.generate(input_ids=A , max_length=model.config.max_length - num_paddings )
snake_case : Dict = tokenizer.batch_decode(A , skip_special_tokens=A )
snake_case : int = tokenizer.decode(output_non_padded[0] , skip_special_tokens=A )
snake_case : str = tokenizer.decode(output_padded[0] , skip_special_tokens=A )
snake_case : Optional[int] = [
"""Hello, my dog is a little bit bigger than a little bit.""",
"""Today, I have a good idea of how to use the information""",
]
self.assertListEqual(A , A )
self.assertListEqual(A , [non_padded_sentence, padded_sentence] )
@slow
def UpperCAmelCase ( self ) -> List[str]:
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case : Union[str, Any] = BioGptModel.from_pretrained(A )
self.assertIsNotNone(A )
def UpperCAmelCase ( self ) -> Optional[int]:
snake_case , snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case : List[str] = 3
snake_case : Any = input_dict["""input_ids"""]
snake_case : str = input_ids.ne(1 ).to(A )
snake_case : List[str] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
snake_case : Optional[Any] = BioGptForSequenceClassification(A )
model.to(A )
model.eval()
snake_case : Dict = model(A , attention_mask=A , labels=A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCAmelCase ( self ) -> List[str]:
snake_case , snake_case : Dict = self.model_tester.prepare_config_and_inputs_for_common()
snake_case : Tuple = 3
snake_case : Any = """multi_label_classification"""
snake_case : Optional[int] = input_dict["""input_ids"""]
snake_case : Any = input_ids.ne(1 ).to(A )
snake_case : Optional[Any] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
snake_case : Any = BioGptForSequenceClassification(A )
model.to(A )
model.eval()
snake_case : str = model(A , attention_mask=A , labels=A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@require_torch
class __lowercase (unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase ( self ) -> List[Any]:
snake_case : List[Any] = BioGptForCausalLM.from_pretrained("""microsoft/biogpt""" )
snake_case : List[str] = torch.tensor([[2, 4_8_0_5, 9, 6_5_6, 2_1]] )
snake_case : str = model(A )[0]
snake_case : List[str] = 4_2_3_8_4
snake_case : List[str] = torch.Size((1, 5, vocab_size) )
self.assertEqual(output.shape , A )
snake_case : Dict = torch.tensor(
[[[-9.52_36, -9.89_18, 10.45_57], [-11.04_69, -9.64_23, 8.10_22], [-8.86_64, -7.88_26, 5.53_25]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , A , atol=1e-4 ) )
@slow
def UpperCAmelCase ( self ) -> Optional[int]:
snake_case : Any = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" )
snake_case : List[Any] = BioGptForCausalLM.from_pretrained("""microsoft/biogpt""" )
model.to(A )
torch.manual_seed(0 )
snake_case : Dict = tokenizer("""COVID-19 is""" , return_tensors="""pt""" ).to(A )
snake_case : str = model.generate(
**A , min_length=1_0_0 , max_length=1_0_2_4 , num_beams=5 , early_stopping=A , )
snake_case : Dict = tokenizer.decode(output_ids[0] , skip_special_tokens=A )
snake_case : List[Any] = (
"""COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the"""
""" causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and"""
""" territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),"""
""" and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and"""
""" more than 800,000 deaths."""
)
self.assertEqual(A , A )
| 684 |
lowerCamelCase : Union[str, Any] = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
lowerCamelCase : Tuple = [{'type': 'code', 'content': INSTALL_CONTENT}]
lowerCamelCase : Union[str, Any] = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 684 | 1 |
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> int:
if not isinstance(lowercase ,lowercase ) or number < 0:
raise ValueError("""Input must be a non-negative integer""" )
snake_case : List[str] = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 684 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase : Any = logging.get_logger(__name__)
lowerCamelCase : Optional[int] = {'vocab_file': 'spm_char.model'}
lowerCamelCase : List[str] = {
'vocab_file': {
'microsoft/speecht5_asr': 'https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model',
'microsoft/speecht5_tts': 'https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model',
'microsoft/speecht5_vc': 'https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model',
}
}
lowerCamelCase : List[Any] = {
'microsoft/speecht5_asr': 1_0_2_4,
'microsoft/speecht5_tts': 1_0_2_4,
'microsoft/speecht5_vc': 1_0_2_4,
}
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = VOCAB_FILES_NAMES
_snake_case = PRETRAINED_VOCAB_FILES_MAP
_snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case = ["""input_ids""", """attention_mask"""]
def __init__( self , A , A="<s>" , A="</s>" , A="<unk>" , A="<pad>" , A = None , **A , ) -> None:
snake_case : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=A , eos_token=A , unk_token=A , pad_token=A , sp_model_kwargs=self.sp_model_kwargs , **A , )
snake_case : Tuple = vocab_file
snake_case : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(A )
@property
def UpperCAmelCase ( self ) -> List[Any]:
return self.sp_model.get_piece_size()
def UpperCAmelCase ( self ) -> Optional[int]:
snake_case : Any = {self.convert_ids_to_tokens(A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> List[str]:
snake_case : Optional[Any] = self.__dict__.copy()
snake_case : Optional[Any] = None
return state
def __setstate__( self , A ) -> Tuple:
snake_case : Any = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
snake_case : List[Any] = {}
snake_case : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase ( self , A ) -> List[str]:
return self.sp_model.encode(A , out_type=A )
def UpperCAmelCase ( self , A ) -> Tuple:
return self.sp_model.piece_to_id(A )
def UpperCAmelCase ( self , A ) -> int:
snake_case : Union[str, Any] = self.sp_model.IdToPiece(A )
return token
def UpperCAmelCase ( self , A ) -> Tuple:
snake_case : Optional[int] = []
snake_case : str = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(A ) + token
snake_case : Dict = []
else:
current_sub_tokens.append(A )
out_string += self.sp_model.decode(A )
return out_string.strip()
def UpperCAmelCase ( self , A , A=None ) -> List[int]:
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def UpperCAmelCase ( self , A , A = None , A = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A , token_ids_a=A , already_has_special_tokens=A )
snake_case : Any = [1]
if token_ids_a is None:
return ([0] * len(A )) + suffix_ones
return ([0] * len(A )) + ([0] * len(A )) + suffix_ones
def UpperCAmelCase ( self , A , A = None ) -> Tuple[str]:
if not os.path.isdir(A ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case : Optional[Any] = os.path.join(
A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A )
elif not os.path.isfile(self.vocab_file ):
with open(A , """wb""" ) as fi:
snake_case : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(A )
return (out_vocab_file,)
| 684 | 1 |
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
lowerCamelCase : int = True
except ImportError:
lowerCamelCase : str = False
lowerCamelCase : int = logging.get_logger(__name__) # pylint: disable=invalid-name
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> str:
return AddNewModelCommand(args.testing ,args.testing_file ,path=args.path )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
@staticmethod
def UpperCAmelCase ( A ) -> Union[str, Any]:
snake_case : Optional[int] = parser.add_parser("""add-new-model""" )
add_new_model_parser.add_argument("""--testing""" , action="""store_true""" , help="""If in testing mode.""" )
add_new_model_parser.add_argument("""--testing_file""" , type=A , help="""Configuration file on which to run.""" )
add_new_model_parser.add_argument(
"""--path""" , type=A , help="""Path to cookiecutter. Should only be used for testing purposes.""" )
add_new_model_parser.set_defaults(func=A )
def __init__( self , A , A , A=None , *A ) -> List[Any]:
snake_case : int = testing
snake_case : int = testing_file
snake_case : List[Any] = path
def UpperCAmelCase ( self ) -> Optional[int]:
warnings.warn(
"""The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. """
"""It is not actively maintained anymore, so might give a result that won't pass all tests and quality """
"""checks, you should use `transformers-cli add-new-model-like` instead.""" )
if not _has_cookiecutter:
raise ImportError(
"""Model creation dependencies are required to use the `add_new_model` command. Install them by running """
"""the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n""" )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
snake_case : List[Any] = [directory for directory in os.listdir() if """cookiecutter-template-""" == directory[:2_2]]
if len(A ) > 0:
raise ValueError(
"""Several directories starting with `cookiecutter-template-` in current working directory. """
"""Please clean your directory by removing all folders starting with `cookiecutter-template-` or """
"""change your working directory.""" )
snake_case : int = (
Path(A ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
snake_case : Union[str, Any] = path_to_transformer_root / """templates""" / """adding_a_new_model"""
# Execute cookiecutter
if not self._testing:
cookiecutter(str(A ) )
else:
with open(self._testing_file , """r""" ) as configuration_file:
snake_case : Dict = json.load(A )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ) , no_input=A , extra_context=A , )
snake_case : Union[str, Any] = [directory for directory in os.listdir() if """cookiecutter-template-""" in directory[:2_2]][0]
# Retrieve configuration
with open(directory + """/configuration.json""" , """r""" ) as configuration_file:
snake_case : str = json.load(A )
snake_case : Dict = configuration["""lowercase_modelname"""]
snake_case : Dict = configuration["""generate_tensorflow_pytorch_and_flax"""]
os.remove(f"""{directory}/configuration.json""" )
snake_case : Dict = """PyTorch""" in generate_tensorflow_pytorch_and_flax
snake_case : Optional[Any] = """TensorFlow""" in generate_tensorflow_pytorch_and_flax
snake_case : Any = """Flax""" in generate_tensorflow_pytorch_and_flax
snake_case : Optional[int] = f"""{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}"""
os.makedirs(A , exist_ok=A )
os.makedirs(f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}""" , exist_ok=A )
# Tests require submodules as they have parent imports
with open(f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py""" , """w""" ):
pass
shutil.move(
f"""{directory}/__init__.py""" , f"""{model_dir}/__init__.py""" , )
shutil.move(
f"""{directory}/configuration_{lowercase_model_name}.py""" , f"""{model_dir}/configuration_{lowercase_model_name}.py""" , )
def remove_copy_lines(A ):
with open(A , """r""" ) as f:
snake_case : List[str] = f.readlines()
with open(A , """w""" ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(A )
if output_pytorch:
if not self._testing:
remove_copy_lines(f"""{directory}/modeling_{lowercase_model_name}.py""" )
shutil.move(
f"""{directory}/modeling_{lowercase_model_name}.py""" , f"""{model_dir}/modeling_{lowercase_model_name}.py""" , )
shutil.move(
f"""{directory}/test_modeling_{lowercase_model_name}.py""" , f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py""" , )
else:
os.remove(f"""{directory}/modeling_{lowercase_model_name}.py""" )
os.remove(f"""{directory}/test_modeling_{lowercase_model_name}.py""" )
if output_tensorflow:
if not self._testing:
remove_copy_lines(f"""{directory}/modeling_tf_{lowercase_model_name}.py""" )
shutil.move(
f"""{directory}/modeling_tf_{lowercase_model_name}.py""" , f"""{model_dir}/modeling_tf_{lowercase_model_name}.py""" , )
shutil.move(
f"""{directory}/test_modeling_tf_{lowercase_model_name}.py""" , f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py""" , )
else:
os.remove(f"""{directory}/modeling_tf_{lowercase_model_name}.py""" )
os.remove(f"""{directory}/test_modeling_tf_{lowercase_model_name}.py""" )
if output_flax:
if not self._testing:
remove_copy_lines(f"""{directory}/modeling_flax_{lowercase_model_name}.py""" )
shutil.move(
f"""{directory}/modeling_flax_{lowercase_model_name}.py""" , f"""{model_dir}/modeling_flax_{lowercase_model_name}.py""" , )
shutil.move(
f"""{directory}/test_modeling_flax_{lowercase_model_name}.py""" , f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py""" , )
else:
os.remove(f"""{directory}/modeling_flax_{lowercase_model_name}.py""" )
os.remove(f"""{directory}/test_modeling_flax_{lowercase_model_name}.py""" )
shutil.move(
f"""{directory}/{lowercase_model_name}.md""" , f"""{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md""" , )
shutil.move(
f"""{directory}/tokenization_{lowercase_model_name}.py""" , f"""{model_dir}/tokenization_{lowercase_model_name}.py""" , )
shutil.move(
f"""{directory}/tokenization_fast_{lowercase_model_name}.py""" , f"""{model_dir}/tokenization_{lowercase_model_name}_fast.py""" , )
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(A , A , A ):
# Create temp file
snake_case , snake_case : List[str] = mkstemp()
snake_case : List[Any] = False
with fdopen(A , """w""" ) as new_file:
with open(A ) as old_file:
for line in old_file:
new_file.write(A )
if line_to_copy_below in line:
snake_case : Any = True
for line_to_copy in lines_to_copy:
new_file.write(A )
if not line_found:
raise ValueError(f"""Line {line_to_copy_below} was not found in file.""" )
# Copy the file permissions from the old file to the new file
copymode(A , A )
# Remove original file
remove(A )
# Move new file
move(A , A )
def skip_units(A ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(A ):
with open(A ) as datafile:
snake_case : str = []
snake_case : Dict = False
snake_case : List[Any] = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
snake_case : Dict = line.split("""\"""" )[1]
snake_case : List[Any] = skip_units(A )
elif "# Below: " in line and "##" not in line:
snake_case : Tuple = line.split("""\"""" )[1]
snake_case : Union[str, Any] = skip_units(A )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(A , A , A )
snake_case : List[str] = []
elif "# Replace with" in line and "##" not in line:
snake_case : Dict = []
elif "##" not in line:
lines_to_copy.append(A )
remove(A )
replace_in_files(f"""{directory}/to_replace_{lowercase_model_name}.py""" )
os.rmdir(A )
| 684 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Any = logging.get_logger(__name__)
lowerCamelCase : Optional[int] = {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json',
}
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """gpt_neox_japanese"""
def __init__( self , A=3_2_0_0_0 , A=2_5_6_0 , A=3_2 , A=3_2 , A=4 , A="gelu" , A=1.00 , A=1_0_0_0_0 , A=2_0_4_8 , A=0.02 , A=1e-5 , A=True , A=3_1_9_9_6 , A=3_1_9_9_9 , A=0.1 , A=0.0 , **A , ) -> str:
super().__init__(bos_token_id=A , eos_token_id=A , **A )
snake_case : Optional[Any] = vocab_size
snake_case : Optional[Any] = max_position_embeddings
snake_case : Union[str, Any] = hidden_size
snake_case : Union[str, Any] = num_hidden_layers
snake_case : Optional[int] = num_attention_heads
snake_case : Optional[int] = intermediate_multiple_size
snake_case : int = hidden_act
snake_case : str = rotary_pct
snake_case : Optional[Any] = rotary_emb_base
snake_case : Any = initializer_range
snake_case : Any = layer_norm_eps
snake_case : Optional[Any] = use_cache
snake_case : Tuple = attention_dropout
snake_case : Tuple = hidden_dropout
| 684 | 1 |
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __lowercase :
"""simple docstring"""
def __init__( self , A , A=1_3 , A=3_2 , A=2 , A=3 , A=1_6 , A=[1, 2, 1] , A=[2, 2, 4] , A=2 , A=2.0 , A=True , A=0.0 , A=0.0 , A=0.1 , A="gelu" , A=False , A=True , A=0.02 , A=1e-5 , A=True , A=None , A=True , A=1_0 , A=8 , ) -> List[str]:
snake_case : Union[str, Any] = parent
snake_case : List[Any] = batch_size
snake_case : List[Any] = image_size
snake_case : Optional[Any] = patch_size
snake_case : Tuple = num_channels
snake_case : Dict = embed_dim
snake_case : Tuple = depths
snake_case : Tuple = num_heads
snake_case : Optional[Any] = window_size
snake_case : Any = mlp_ratio
snake_case : int = qkv_bias
snake_case : Tuple = hidden_dropout_prob
snake_case : int = attention_probs_dropout_prob
snake_case : Optional[Any] = drop_path_rate
snake_case : str = hidden_act
snake_case : List[Any] = use_absolute_embeddings
snake_case : List[Any] = patch_norm
snake_case : Any = layer_norm_eps
snake_case : int = initializer_range
snake_case : Union[str, Any] = is_training
snake_case : Optional[Any] = scope
snake_case : str = use_labels
snake_case : str = type_sequence_label_size
snake_case : str = encoder_stride
def UpperCAmelCase ( self ) -> Optional[int]:
snake_case : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case : Optional[Any] = None
if self.use_labels:
snake_case : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case : List[Any] = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase ( self ) -> Tuple:
return SwinvaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def UpperCAmelCase ( self , A , A , A ) -> int:
snake_case : List[str] = SwinvaModel(config=A )
model.to(A )
model.eval()
snake_case : List[str] = model(A )
snake_case : List[str] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
snake_case : Union[str, Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def UpperCAmelCase ( self , A , A , A ) -> List[str]:
snake_case : str = SwinvaForMaskedImageModeling(config=A )
model.to(A )
model.eval()
snake_case : Dict = model(A )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
snake_case : Union[str, Any] = 1
snake_case : Optional[int] = SwinvaForMaskedImageModeling(A )
model.to(A )
model.eval()
snake_case : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case : List[str] = model(A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def UpperCAmelCase ( self , A , A , A ) -> int:
snake_case : Optional[int] = self.type_sequence_label_size
snake_case : Any = SwinvaForImageClassification(A )
model.to(A )
model.eval()
snake_case : int = model(A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase ( self ) -> Union[str, Any]:
snake_case : List[str] = self.prepare_config_and_inputs()
snake_case , snake_case , snake_case : Dict = config_and_inputs
snake_case : str = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __lowercase (UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
"""simple docstring"""
_snake_case = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
_snake_case = (
{"""feature-extraction""": SwinvaModel, """image-classification""": SwinvaForImageClassification}
if is_torch_available()
else {}
)
_snake_case = False
_snake_case = False
_snake_case = False
_snake_case = False
def UpperCAmelCase ( self ) -> Union[str, Any]:
snake_case : Tuple = SwinvaModelTester(self )
snake_case : str = ConfigTester(self , config_class=A , embed_dim=3_7 )
def UpperCAmelCase ( self ) -> List[str]:
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase ( self ) -> Tuple:
snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
@unittest.skip(reason="""Got `CUDA error: misaligned address` with PyTorch 2.0.0.""" )
def UpperCAmelCase ( self ) -> Dict:
pass
@unittest.skip(reason="""Swinv2 does not use inputs_embeds""" )
def UpperCAmelCase ( self ) -> Dict:
pass
def UpperCAmelCase ( self ) -> Union[str, Any]:
snake_case , snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case : Optional[int] = model_class(A )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
snake_case : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A , nn.Linear ) )
def UpperCAmelCase ( self ) -> Any:
snake_case , snake_case : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case : str = model_class(A )
snake_case : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case : Optional[int] = [*signature.parameters.keys()]
snake_case : Optional[int] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , A )
def UpperCAmelCase ( self ) -> str:
snake_case , snake_case : int = self.model_tester.prepare_config_and_inputs_for_common()
snake_case : Optional[Any] = True
for model_class in self.all_model_classes:
snake_case : List[Any] = True
snake_case : List[Any] = False
snake_case : Union[str, Any] = True
snake_case : List[Any] = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
snake_case : List[str] = model(**self._prepare_for_class(A , A ) )
snake_case : str = outputs.attentions
snake_case : Dict = len(self.model_tester.depths )
self.assertEqual(len(A ) , A )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
snake_case : List[str] = True
snake_case : List[Any] = config.window_size**2
snake_case : Optional[Any] = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
snake_case : Any = model(**self._prepare_for_class(A , A ) )
snake_case : Any = outputs.attentions
self.assertEqual(len(A ) , A )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
snake_case : Tuple = len(A )
# Check attention is always last and order is fine
snake_case : str = True
snake_case : Optional[Any] = True
snake_case : Optional[int] = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
snake_case : Dict = model(**self._prepare_for_class(A , A ) )
if hasattr(self.model_tester , """num_hidden_states_types""" ):
snake_case : int = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
snake_case : Optional[int] = 2
self.assertEqual(out_len + added_hidden_states , len(A ) )
snake_case : List[Any] = outputs.attentions
self.assertEqual(len(A ) , A )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
def UpperCAmelCase ( self , A , A , A , A ) -> Union[str, Any]:
snake_case : str = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
snake_case : List[str] = model(**self._prepare_for_class(A , A ) )
snake_case : List[Any] = outputs.hidden_states
snake_case : Union[str, Any] = getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(A ) , A )
# Swinv2 has a different seq_length
snake_case : Union[str, Any] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
snake_case : Optional[int] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
snake_case : Dict = outputs.reshaped_hidden_states
self.assertEqual(len(A ) , A )
snake_case , snake_case , snake_case , snake_case : List[str] = reshaped_hidden_states[0].shape
snake_case : Any = (
reshaped_hidden_states[0].view(A , A , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def UpperCAmelCase ( self ) -> List[Any]:
snake_case , snake_case : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case : List[str] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
snake_case : int = True
self.check_hidden_states_output(A , A , A , A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case : Tuple = True
self.check_hidden_states_output(A , A , A , A )
def UpperCAmelCase ( self ) -> Optional[Any]:
snake_case , snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case : Any = 3
snake_case : Any = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
snake_case : Dict = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
snake_case : List[Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
snake_case : Any = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
snake_case : List[str] = True
self.check_hidden_states_output(A , A , A , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case : Any = True
self.check_hidden_states_output(A , A , A , (padded_height, padded_width) )
def UpperCAmelCase ( self ) -> Any:
snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*A )
def UpperCAmelCase ( self ) -> str:
snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A )
@slow
def UpperCAmelCase ( self ) -> Optional[int]:
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case : Tuple = SwinvaModel.from_pretrained(A )
self.assertIsNotNone(A )
def UpperCAmelCase ( self ) -> List[Any]:
snake_case , snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case : str = _config_zero_init(A )
for model_class in self.all_model_classes:
snake_case : Union[str, Any] = model_class(config=A )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@require_vision
@require_torch
class __lowercase (unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCAmelCase ( self ) -> str:
return (
AutoImageProcessor.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase ( self ) -> Any:
snake_case : Optional[int] = SwinvaForImageClassification.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" ).to(
A )
snake_case : str = self.default_image_processor
snake_case : int = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
snake_case : List[str] = image_processor(images=A , return_tensors="""pt""" ).to(A )
# forward pass
with torch.no_grad():
snake_case : Tuple = model(**A )
# verify the logits
snake_case : Any = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , A )
snake_case : Optional[int] = torch.tensor([-0.39_47, -0.43_06, 0.00_26] ).to(A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , A , atol=1e-4 ) )
| 684 |
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> int:
snake_case : Optional[Any] = hex_num.strip()
if not hex_num:
raise ValueError("""No value was passed to the function""" )
snake_case : Any = hex_num[0] == """-"""
if is_negative:
snake_case : int = hex_num[1:]
try:
snake_case : List[Any] = int(lowercase ,16 )
except ValueError:
raise ValueError("""Invalid value was passed to the function""" )
snake_case : Dict = """"""
while int_num > 0:
snake_case : Dict = str(int_num % 2 ) + bin_str
int_num >>= 1
return int(("""-""" + bin_str) if is_negative else bin_str )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 684 | 1 |
from __future__ import annotations
import csv
import requests
from bsa import BeautifulSoup
def SCREAMING_SNAKE_CASE__ ( lowercase = "" ) -> dict[str, float]:
snake_case : Any = url or """https://www.imdb.com/chart/top/?ref_=nv_mv_250"""
snake_case : Any = BeautifulSoup(requests.get(lowercase ).text ,"""html.parser""" )
snake_case : Optional[Any] = soup.find_all("""td""" ,attrs="""titleColumn""" )
snake_case : List[str] = soup.find_all("""td""" ,class_="""ratingColumn imdbRating""" )
return {
title.a.text: float(rating.strong.text )
for title, rating in zip(lowercase ,lowercase )
}
def SCREAMING_SNAKE_CASE__ ( lowercase = "IMDb_Top_250_Movies.csv" ) -> None:
snake_case : Dict = get_imdb_top_aaa_movies()
with open(lowercase ,"""w""" ,newline="""""" ) as out_file:
snake_case : Union[str, Any] = csv.writer(lowercase )
writer.writerow(["""Movie title""", """IMDb rating"""] )
for title, rating in movies.items():
writer.writerow([title, rating] )
if __name__ == "__main__":
write_movies()
| 684 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCamelCase : Optional[int] = logging.get_logger(__name__)
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""pixel_values"""]
def __init__( self , A = True , A = None , A = PIL.Image.BICUBIC , A = True , A = None , A = 1 / 2_5_5 , A = True , A = True , A = None , A = None , **A , ) -> None:
super().__init__(**A )
snake_case : int = size if size is not None else {"""height""": 2_5_6, """width""": 2_5_6}
snake_case : int = get_size_dict(A )
snake_case : Optional[Any] = crop_size if crop_size is not None else {"""height""": 2_2_4, """width""": 2_2_4}
snake_case : Dict = get_size_dict(A , param_name="""crop_size""" )
snake_case : int = do_resize
snake_case : str = size
snake_case : Tuple = resample
snake_case : Any = do_center_crop
snake_case : Tuple = crop_size
snake_case : int = do_rescale
snake_case : Dict = rescale_factor
snake_case : Union[str, Any] = do_normalize
snake_case : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
snake_case : Optional[Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCAmelCase ( self , A , A , A = PIL.Image.BICUBIC , A = None , **A , ) -> np.ndarray:
snake_case : Dict = get_size_dict(A )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" )
return resize(
A , size=(size["""height"""], size["""width"""]) , resample=A , data_format=A , **A )
def UpperCAmelCase ( self , A , A , A = None , **A , ) -> np.ndarray:
snake_case : Any = get_size_dict(A )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" )
return center_crop(A , size=(size["""height"""], size["""width"""]) , data_format=A , **A )
def UpperCAmelCase ( self , A , A , A = None , **A , ) -> Tuple:
return rescale(A , scale=A , data_format=A , **A )
def UpperCAmelCase ( self , A , A , A , A = None , **A , ) -> np.ndarray:
return normalize(A , mean=A , std=A , data_format=A , **A )
def UpperCAmelCase ( self , A , A = None , A = None , A=None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = ChannelDimension.FIRST , **A , ) -> PIL.Image.Image:
snake_case : str = do_resize if do_resize is not None else self.do_resize
snake_case : Dict = resample if resample is not None else self.resample
snake_case : Dict = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case : Tuple = do_rescale if do_rescale is not None else self.do_rescale
snake_case : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case : List[str] = do_normalize if do_normalize is not None else self.do_normalize
snake_case : int = image_mean if image_mean is not None else self.image_mean
snake_case : List[str] = image_std if image_std is not None else self.image_std
snake_case : Dict = size if size is not None else self.size
snake_case : Tuple = get_size_dict(A )
snake_case : Dict = crop_size if crop_size is not None else self.crop_size
snake_case : List[str] = get_size_dict(A , param_name="""crop_size""" )
snake_case : int = make_list_of_images(A )
if not valid_images(A ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
snake_case : Optional[Any] = [to_numpy_array(A ) for image in images]
if do_resize:
snake_case : Dict = [self.resize(image=A , size=A , resample=A ) for image in images]
if do_center_crop:
snake_case : List[str] = [self.center_crop(image=A , size=A ) for image in images]
if do_rescale:
snake_case : List[str] = [self.rescale(image=A , scale=A ) for image in images]
if do_normalize:
snake_case : str = [self.normalize(image=A , mean=A , std=A ) for image in images]
snake_case : Union[str, Any] = [to_channel_dimension_format(A , A ) for image in images]
snake_case : List[Any] = {"""pixel_values""": images}
return BatchFeature(data=A , tensor_type=A )
| 684 | 1 |
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
lowerCamelCase : Union[str, Any] = re.compile(r'\b(a|an|the)\b', re.UNICODE)
lowerCamelCase : Tuple = None
def SCREAMING_SNAKE_CASE__ ( ) -> str:
snake_case : Union[str, Any] = argparse.ArgumentParser("""Official evaluation script for SQuAD version 2.0.""" )
parser.add_argument("""data_file""" ,metavar="""data.json""" ,help="""Input data JSON file.""" )
parser.add_argument("""pred_file""" ,metavar="""pred.json""" ,help="""Model predictions.""" )
parser.add_argument(
"""--out-file""" ,"""-o""" ,metavar="""eval.json""" ,help="""Write accuracy metrics to file (default is stdout).""" )
parser.add_argument(
"""--na-prob-file""" ,"""-n""" ,metavar="""na_prob.json""" ,help="""Model estimates of probability of no answer.""" )
parser.add_argument(
"""--na-prob-thresh""" ,"""-t""" ,type=lowercase ,default=1.0 ,help="""Predict \"\" if no-answer probability exceeds this (default = 1.0).""" ,)
parser.add_argument(
"""--out-image-dir""" ,"""-p""" ,metavar="""out_images""" ,default=lowercase ,help="""Save precision-recall curves to directory.""" )
parser.add_argument("""--verbose""" ,"""-v""" ,action="""store_true""" )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> int:
snake_case : Any = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
snake_case : int = bool(qa["""answers"""]["""text"""] )
return qid_to_has_ans
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Dict:
def remove_articles(lowercase ):
return ARTICLES_REGEX.sub(""" """ ,lowercase )
def white_space_fix(lowercase ):
return " ".join(text.split() )
def remove_punc(lowercase ):
snake_case : int = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(lowercase ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(lowercase ) ) ) )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Any:
if not s:
return []
return normalize_answer(lowercase ).split()
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> List[str]:
return int(normalize_answer(lowercase ) == normalize_answer(lowercase ) )
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> Tuple:
snake_case : Union[str, Any] = get_tokens(lowercase )
snake_case : str = get_tokens(lowercase )
snake_case : Union[str, Any] = collections.Counter(lowercase ) & collections.Counter(lowercase )
snake_case : Union[str, Any] = sum(common.values() )
if len(lowercase ) == 0 or len(lowercase ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
snake_case : int = 1.0 * num_same / len(lowercase )
snake_case : List[str] = 1.0 * num_same / len(lowercase )
snake_case : Union[str, Any] = (2 * precision * recall) / (precision + recall)
return fa
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> Any:
snake_case : Tuple = {}
snake_case : List[str] = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
snake_case : Optional[Any] = qa["""id"""]
snake_case : Dict = [t for t in qa["""answers"""]["""text"""] if normalize_answer(lowercase )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
snake_case : Optional[Any] = [""""""]
if qid not in preds:
print(f"""Missing prediction for {qid}""" )
continue
snake_case : Dict = preds[qid]
# Take max over all gold answers
snake_case : Optional[Any] = max(compute_exact(lowercase ,lowercase ) for a in gold_answers )
snake_case : Union[str, Any] = max(compute_fa(lowercase ,lowercase ) for a in gold_answers )
return exact_scores, fa_scores
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ,lowercase ) -> Union[str, Any]:
snake_case : str = {}
for qid, s in scores.items():
snake_case : Dict = na_probs[qid] > na_prob_thresh
if pred_na:
snake_case : Any = float(not qid_to_has_ans[qid] )
else:
snake_case : Tuple = s
return new_scores
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase=None ) -> str:
if not qid_list:
snake_case : Union[str, Any] = len(lowercase )
return collections.OrderedDict(
[
("""exact""", 100.0 * sum(exact_scores.values() ) / total),
("""f1""", 100.0 * sum(fa_scores.values() ) / total),
("""total""", total),
] )
else:
snake_case : List[Any] = len(lowercase )
return collections.OrderedDict(
[
("""exact""", 100.0 * sum(exact_scores[k] for k in qid_list ) / total),
("""f1""", 100.0 * sum(fa_scores[k] for k in qid_list ) / total),
("""total""", total),
] )
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ) -> int:
for k in new_eval:
snake_case : Any = new_eval[k]
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ,lowercase ) -> Optional[Any]:
plt.step(lowercase ,lowercase ,color="""b""" ,alpha=0.2 ,where="""post""" )
plt.fill_between(lowercase ,lowercase ,step="""post""" ,alpha=0.2 ,color="""b""" )
plt.xlabel("""Recall""" )
plt.ylabel("""Precision""" )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(lowercase )
plt.savefig(lowercase )
plt.clf()
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ,lowercase ,lowercase=None ,lowercase=None ) -> Optional[Any]:
snake_case : Optional[Any] = sorted(lowercase ,key=lambda lowercase : na_probs[k] )
snake_case : Optional[Any] = 0.0
snake_case : int = 1.0
snake_case : int = 0.0
snake_case : List[str] = [1.0]
snake_case : Optional[Any] = [0.0]
snake_case : str = 0.0
for i, qid in enumerate(lowercase ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
snake_case : List[str] = true_pos / float(i + 1 )
snake_case : Tuple = true_pos / float(lowercase )
if i == len(lowercase ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(lowercase )
recalls.append(lowercase )
if out_image:
plot_pr_curve(lowercase ,lowercase ,lowercase ,lowercase )
return {"ap": 100.0 * avg_prec}
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ,lowercase ,lowercase ,lowercase ) -> Union[str, Any]:
if out_image_dir and not os.path.exists(lowercase ):
os.makedirs(lowercase )
snake_case : List[Any] = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
snake_case : str = make_precision_recall_eval(
lowercase ,lowercase ,lowercase ,lowercase ,out_image=os.path.join(lowercase ,"""pr_exact.png""" ) ,title="""Precision-Recall curve for Exact Match score""" ,)
snake_case : int = make_precision_recall_eval(
lowercase ,lowercase ,lowercase ,lowercase ,out_image=os.path.join(lowercase ,"""pr_f1.png""" ) ,title="""Precision-Recall curve for F1 score""" ,)
snake_case : List[str] = {k: float(lowercase ) for k, v in qid_to_has_ans.items()}
snake_case : Dict = make_precision_recall_eval(
lowercase ,lowercase ,lowercase ,lowercase ,out_image=os.path.join(lowercase ,"""pr_oracle.png""" ) ,title="""Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)""" ,)
merge_eval(lowercase ,lowercase ,"""pr_exact""" )
merge_eval(lowercase ,lowercase ,"""pr_f1""" )
merge_eval(lowercase ,lowercase ,"""pr_oracle""" )
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ,lowercase ) -> Any:
if not qid_list:
return
snake_case : List[str] = [na_probs[k] for k in qid_list]
snake_case : Tuple = np.ones_like(lowercase ) / float(len(lowercase ) )
plt.hist(lowercase ,weights=lowercase ,bins=20 ,range=(0.0, 1.0) )
plt.xlabel("""Model probability of no-answer""" )
plt.ylabel("""Proportion of dataset""" )
plt.title(f"""Histogram of no-answer probability: {name}""" )
plt.savefig(os.path.join(lowercase ,f"""na_prob_hist_{name}.png""" ) )
plt.clf()
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ,lowercase ) -> Optional[int]:
snake_case : Any = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
snake_case : Any = num_no_ans
snake_case : List[Any] = cur_score
snake_case : Tuple = 0.0
snake_case : Optional[int] = sorted(lowercase ,key=lambda lowercase : na_probs[k] )
for i, qid in enumerate(lowercase ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
snake_case : List[str] = scores[qid]
else:
if preds[qid]:
snake_case : Tuple = -1
else:
snake_case : Optional[int] = 0
cur_score += diff
if cur_score > best_score:
snake_case : List[Any] = cur_score
snake_case : List[Any] = na_probs[qid]
return 100.0 * best_score / len(lowercase ), best_thresh
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ,lowercase ,lowercase ,lowercase ) -> Any:
snake_case , snake_case : List[str] = find_best_thresh(lowercase ,lowercase ,lowercase ,lowercase )
snake_case , snake_case : List[Any] = find_best_thresh(lowercase ,lowercase ,lowercase ,lowercase )
snake_case : str = best_exact
snake_case : Any = exact_thresh
snake_case : Optional[int] = best_fa
snake_case : List[Any] = fa_thresh
def SCREAMING_SNAKE_CASE__ ( ) -> List[Any]:
with open(OPTS.data_file ) as f:
snake_case : Any = json.load(lowercase )
snake_case : Dict = dataset_json["""data"""]
with open(OPTS.pred_file ) as f:
snake_case : Optional[int] = json.load(lowercase )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
snake_case : Optional[Any] = json.load(lowercase )
else:
snake_case : Union[str, Any] = {k: 0.0 for k in preds}
snake_case : List[Any] = make_qid_to_has_ans(lowercase ) # maps qid to True/False
snake_case : int = [k for k, v in qid_to_has_ans.items() if v]
snake_case : Dict = [k for k, v in qid_to_has_ans.items() if not v]
snake_case , snake_case : str = get_raw_scores(lowercase ,lowercase )
snake_case : str = apply_no_ans_threshold(lowercase ,lowercase ,lowercase ,OPTS.na_prob_thresh )
snake_case : List[str] = apply_no_ans_threshold(lowercase ,lowercase ,lowercase ,OPTS.na_prob_thresh )
snake_case : List[str] = make_eval_dict(lowercase ,lowercase )
if has_ans_qids:
snake_case : Optional[int] = make_eval_dict(lowercase ,lowercase ,qid_list=lowercase )
merge_eval(lowercase ,lowercase ,"""HasAns""" )
if no_ans_qids:
snake_case : List[Any] = make_eval_dict(lowercase ,lowercase ,qid_list=lowercase )
merge_eval(lowercase ,lowercase ,"""NoAns""" )
if OPTS.na_prob_file:
find_all_best_thresh(lowercase ,lowercase ,lowercase ,lowercase ,lowercase ,lowercase )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(lowercase ,lowercase ,lowercase ,lowercase ,lowercase ,OPTS.out_image_dir )
histogram_na_prob(lowercase ,lowercase ,OPTS.out_image_dir ,"""hasAns""" )
histogram_na_prob(lowercase ,lowercase ,OPTS.out_image_dir ,"""noAns""" )
if OPTS.out_file:
with open(OPTS.out_file ,"""w""" ) as f:
json.dump(lowercase ,lowercase )
else:
print(json.dumps(lowercase ,indent=2 ) )
if __name__ == "__main__":
lowerCamelCase : Tuple = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
main()
| 684 |
import inspect
import unittest
class __lowercase (unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> List[Any]:
try:
import diffusers # noqa: F401
except ImportError:
assert False
def UpperCAmelCase ( self ) -> Tuple:
import diffusers
from diffusers.dependency_versions_table import deps
snake_case : List[str] = inspect.getmembers(A , inspect.isclass )
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
snake_case : Tuple = """k-diffusion"""
elif backend == "invisible_watermark":
snake_case : Optional[int] = """invisible-watermark"""
assert backend in deps, f"""{backend} is not in the deps table!"""
| 684 | 1 |
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
lowerCamelCase : Optional[int] = {
'linear': get_linear_schedule_with_warmup,
'cosine': get_cosine_schedule_with_warmup,
'cosine_w_restarts': get_cosine_with_hard_restarts_schedule_with_warmup,
'polynomial': get_polynomial_decay_schedule_with_warmup,
'constant': get_constant_schedule,
'constant_w_warmup': get_constant_schedule_with_warmup,
}
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
def __init__( self , A=None , A=None , *A , **A ) -> Optional[int]:
super().__init__(*A , **A )
if config is None:
assert isinstance(self.model , A ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
f""" {self.model.__class__}"""
)
snake_case : Tuple = self.model.config
else:
snake_case : Union[str, Any] = config
snake_case : Tuple = data_args
snake_case : Any = self.config.tgt_vocab_size if isinstance(self.config , A ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
f"""The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for"""
""" padding..""" )
if self.args.label_smoothing == 0:
snake_case : List[str] = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
snake_case : Dict = label_smoothed_nll_loss
def UpperCAmelCase ( self , A ) -> List[str]:
if self.optimizer is None:
snake_case : Union[str, Any] = ["""bias""", """LayerNorm.weight"""]
snake_case : Dict = [
{
"""params""": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
"""weight_decay""": self.args.weight_decay,
},
{
"""params""": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
"""weight_decay""": 0.0,
},
]
snake_case : List[str] = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
snake_case : List[Any] = Adafactor
snake_case : Optional[int] = {"""scale_parameter""": False, """relative_step""": False}
else:
snake_case : List[str] = AdamW
snake_case : Any = {
"""betas""": (self.args.adam_betaa, self.args.adam_betaa),
"""eps""": self.args.adam_epsilon,
}
snake_case : str = self.args.learning_rate
if self.sharded_ddp:
snake_case : List[Any] = OSS(
params=A , optim=A , **A , )
else:
snake_case : List[Any] = optimizer_cls(A , **A )
if self.lr_scheduler is None:
snake_case : Any = self._get_lr_scheduler(A )
else: # ignoring --lr_scheduler
logger.warning("""scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.""" )
def UpperCAmelCase ( self , A ) -> Any:
snake_case : str = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
snake_case : Optional[Any] = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
snake_case : Optional[int] = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
snake_case : int = schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=A )
return scheduler
def UpperCAmelCase ( self ) -> Optional[torch.utils.data.Sampler]:
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def UpperCAmelCase ( self , A , A , A ) -> List[str]:
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
snake_case : Any = model(**A , use_cache=A )[0]
snake_case : Any = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
snake_case , snake_case : int = model(**A , labels=A , use_cache=A )[:2]
else:
# compute label smoothed loss
snake_case : Any = model(**A , use_cache=A )[0]
snake_case : Optional[int] = torch.nn.functional.log_softmax(A , dim=-1 )
snake_case , snake_case : Union[str, Any] = self.loss_fn(A , A , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def UpperCAmelCase ( self , A , A ) -> int:
snake_case : int = inputs.pop("""labels""" )
snake_case , snake_case : Union[str, Any] = self._compute_loss(A , A , A )
return loss
def UpperCAmelCase ( self , A , A , A , A = None , ) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:
snake_case : List[Any] = self._prepare_inputs(A )
snake_case : Tuple = {
"""max_length""": self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
"""num_beams""": self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
snake_case : List[Any] = self.model.generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , **A , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
snake_case : Any = self._pad_tensors_to_max_len(A , gen_kwargs["""max_length"""] )
snake_case : Optional[Any] = inputs.pop("""labels""" )
with torch.no_grad():
# compute loss on predict data
snake_case , snake_case : List[Any] = self._compute_loss(A , A , A )
snake_case : int = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
snake_case : Dict = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
snake_case : Union[str, Any] = self._pad_tensors_to_max_len(A , gen_kwargs["""max_length"""] )
return (loss, logits, labels)
def UpperCAmelCase ( self , A , A ) -> Union[str, Any]:
# If PAD token is not defined at least EOS token has to be defined
snake_case : Tuple = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
"""Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be"""
f""" padded to `max_length`={max_length}""" )
snake_case : List[str] = pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
snake_case : Tuple = tensor
return padded_tensor
| 684 |
import contextlib
import importlib
import io
import unittest
import transformers
# Try to import everything from transformers to ensure every object can be loaded.
from transformers import * # noqa F406
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch
from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available
if is_torch_available():
from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification
if is_tf_available():
from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification
if is_flax_available():
from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification
lowerCamelCase : Union[str, Any] = DUMMY_UNKNOWN_IDENTIFIER
# An actual model hosted on huggingface.co
lowerCamelCase : List[Any] = 'main'
# Default branch name
lowerCamelCase : Tuple = 'f2c752cfc5c0ab6f4bdec59acea69eefbee381c2'
# One particular commit (not the top of `main`)
lowerCamelCase : List[Any] = 'aaaaaaa'
# This commit does not exist, so we should 404.
lowerCamelCase : List[Any] = 'd9e9f15bc825e4b2c9249e9578f884bbcb5e3684'
# Sha-1 of config.json on the top of `main`, for checking purposes
lowerCamelCase : int = '4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3'
@contextlib.contextmanager
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[int]:
print("""Welcome!""" )
yield
print("""Bye!""" )
@contextlib.contextmanager
def SCREAMING_SNAKE_CASE__ ( ) -> List[str]:
print("""Bonjour!""" )
yield
print("""Au revoir!""" )
class __lowercase (unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> int:
# If the spec is missing, importlib would not be able to import the module dynamically.
assert transformers.__spec__ is not None
assert importlib.util.find_spec("""transformers""" ) is not None
class __lowercase (unittest.TestCase ):
"""simple docstring"""
@unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO )
def UpperCAmelCase ( self , A ) -> Optional[Any]:
with ContextManagers([] ):
print("""Transformers are awesome!""" )
# The print statement adds a new line at the end of the output
self.assertEqual(mock_stdout.getvalue() , """Transformers are awesome!\n""" )
@unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO )
def UpperCAmelCase ( self , A ) -> int:
with ContextManagers([context_en()] ):
print("""Transformers are awesome!""" )
# The output should be wrapped with an English welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , """Welcome!\nTransformers are awesome!\nBye!\n""" )
@unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO )
def UpperCAmelCase ( self , A ) -> int:
with ContextManagers([context_fr(), context_en()] ):
print("""Transformers are awesome!""" )
# The output should be wrapped with an English and French welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , """Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n""" )
@require_torch
def UpperCAmelCase ( self ) -> Optional[Any]:
self.assertEqual(find_labels(A ) , ["""labels"""] )
self.assertEqual(find_labels(A ) , ["""labels""", """next_sentence_label"""] )
self.assertEqual(find_labels(A ) , ["""start_positions""", """end_positions"""] )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
pass
self.assertEqual(find_labels(A ) , ["""labels"""] )
@require_tf
def UpperCAmelCase ( self ) -> str:
self.assertEqual(find_labels(A ) , ["""labels"""] )
self.assertEqual(find_labels(A ) , ["""labels""", """next_sentence_label"""] )
self.assertEqual(find_labels(A ) , ["""start_positions""", """end_positions"""] )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
pass
self.assertEqual(find_labels(A ) , ["""labels"""] )
@require_flax
def UpperCAmelCase ( self ) -> Any:
# Flax models don't have labels
self.assertEqual(find_labels(A ) , [] )
self.assertEqual(find_labels(A ) , [] )
self.assertEqual(find_labels(A ) , [] )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
pass
self.assertEqual(find_labels(A ) , [] )
| 684 | 1 |
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
lowerCamelCase : List[Any] = logging.get_logger(__name__)
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
def __init__( self , *A , **A ) -> None:
warnings.warn(
"""The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use DeiTImageProcessor instead.""" , A , )
super().__init__(*A , **A )
| 684 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
lowerCamelCase : Dict = {
'MIT/ast-finetuned-audioset-10-10-0.4593': (
'https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json'
),
}
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """audio-spectrogram-transformer"""
def __init__( self , A=7_6_8 , A=1_2 , A=1_2 , A=3_0_7_2 , A="gelu" , A=0.0 , A=0.0 , A=0.02 , A=1e-1_2 , A=1_6 , A=True , A=1_0 , A=1_0 , A=1_0_2_4 , A=1_2_8 , **A , ) -> int:
super().__init__(**A )
snake_case : Any = hidden_size
snake_case : Tuple = num_hidden_layers
snake_case : Any = num_attention_heads
snake_case : Dict = intermediate_size
snake_case : int = hidden_act
snake_case : int = hidden_dropout_prob
snake_case : Tuple = attention_probs_dropout_prob
snake_case : int = initializer_range
snake_case : int = layer_norm_eps
snake_case : Any = patch_size
snake_case : List[Any] = qkv_bias
snake_case : int = frequency_stride
snake_case : Any = time_stride
snake_case : Union[str, Any] = max_length
snake_case : Any = num_mel_bins
| 684 | 1 |
import os
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_doctest_list.py
lowerCamelCase : List[str] = '.'
if __name__ == "__main__":
lowerCamelCase : List[Any] = os.path.join(REPO_PATH, 'utils/documentation_tests.txt')
lowerCamelCase : List[str] = []
lowerCamelCase : str = []
with open(doctest_file_path) as fp:
for line in fp:
lowerCamelCase : List[str] = line.strip()
lowerCamelCase : Dict = os.path.join(REPO_PATH, line)
if not (os.path.isfile(path) or os.path.isdir(path)):
non_existent_paths.append(line)
all_paths.append(path)
if len(non_existent_paths) > 0:
lowerCamelCase : List[str] = '\n'.join(non_existent_paths)
raise ValueError(f"""`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}""")
if all_paths != sorted(all_paths):
raise ValueError('Files in `utils/documentation_tests.txt` are not in alphabetical order.')
| 684 |
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
lowerCamelCase : Any = logging.get_logger(__name__)
class __lowercase (enum.Enum ):
"""simple docstring"""
_snake_case = 0
_snake_case = 1
@add_end_docstrings(UpperCamelCase__ )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """generated"""
def __init__( self , *A , **A ) -> Optional[Any]:
super().__init__(*A , **A )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == """tf"""
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def UpperCAmelCase ( self , A=None , A=None , A=None , A=None , A=None , A=None , **A , ) -> Optional[int]:
snake_case : Tuple = {}
if truncation is not None:
snake_case : Union[str, Any] = truncation
snake_case : Dict = generate_kwargs
snake_case : int = {}
if return_tensors is not None and return_type is None:
snake_case : List[Any] = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
snake_case : List[str] = return_type
if clean_up_tokenization_spaces is not None:
snake_case : int = clean_up_tokenization_spaces
if stop_sequence is not None:
snake_case : Tuple = self.tokenizer.encode(A , add_special_tokens=A )
if len(A ) > 1:
warnings.warn(
"""Stopping on a multiple token sequence is not yet supported on transformers. The first token of"""
""" the stop sequence will be used as the stop sequence string in the interim.""" )
snake_case : List[str] = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def UpperCAmelCase ( self , A , A , A ) -> Union[str, Any]:
return True
def UpperCAmelCase ( self , *A , A ) -> Tuple:
snake_case : Union[str, Any] = self.model.config.prefix if self.model.config.prefix is not None else """"""
if isinstance(args[0] , A ):
if self.tokenizer.pad_token_id is None:
raise ValueError("""Please make sure that the tokenizer has a pad_token_id when using a batch input""" )
snake_case : Union[str, Any] = ([prefix + arg for arg in args[0]],)
snake_case : List[Any] = True
elif isinstance(args[0] , A ):
snake_case : str = (prefix + args[0],)
snake_case : str = False
else:
raise ValueError(
f""" `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`""" )
snake_case : Optional[Any] = self.tokenizer(*A , padding=A , truncation=A , return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self , *A , **A ) -> Union[str, Any]:
snake_case : Tuple = super().__call__(*A , **A )
if (
isinstance(args[0] , A )
and all(isinstance(A , A ) for el in args[0] )
and all(len(A ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def UpperCAmelCase ( self , A , A=TruncationStrategy.DO_NOT_TRUNCATE , **A ) -> str:
snake_case : Optional[Any] = self._parse_and_tokenize(A , truncation=A , **A )
return inputs
def UpperCAmelCase ( self , A , **A ) -> Tuple:
if self.framework == "pt":
snake_case , snake_case : List[str] = model_inputs["""input_ids"""].shape
elif self.framework == "tf":
snake_case , snake_case : Optional[Any] = tf.shape(model_inputs["""input_ids"""] ).numpy()
snake_case : Dict = generate_kwargs.get("""min_length""" , self.model.config.min_length )
snake_case : str = generate_kwargs.get("""max_length""" , self.model.config.max_length )
self.check_inputs(A , generate_kwargs["""min_length"""] , generate_kwargs["""max_length"""] )
snake_case : List[str] = self.model.generate(**A , **A )
snake_case : Dict = output_ids.shape[0]
if self.framework == "pt":
snake_case : List[Any] = output_ids.reshape(A , out_b // in_b , *output_ids.shape[1:] )
elif self.framework == "tf":
snake_case : Any = tf.reshape(A , (in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def UpperCAmelCase ( self , A , A=ReturnType.TEXT , A=False ) -> Union[str, Any]:
snake_case : Tuple = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
snake_case : Dict = {f"""{self.return_name}_token_ids""": output_ids}
elif return_type == ReturnType.TEXT:
snake_case : int = {
f"""{self.return_name}_text""": self.tokenizer.decode(
A , skip_special_tokens=A , clean_up_tokenization_spaces=A , )
}
records.append(A )
return records
@add_end_docstrings(UpperCamelCase__ )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """summary"""
def __call__( self , *A , **A ) -> str:
return super().__call__(*A , **A )
def UpperCAmelCase ( self , A , A , A ) -> bool:
if max_length < min_length:
logger.warning(f"""Your min_length={min_length} must be inferior than your max_length={max_length}.""" )
if input_length < max_length:
logger.warning(
f"""Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is """
"""a summarization task, where outputs shorter than the input are typically wanted, you might """
f"""consider decreasing max_length manually, e.g. summarizer('...', max_length={input_length//2})""" )
@add_end_docstrings(UpperCamelCase__ )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """translation"""
def UpperCAmelCase ( self , A , A , A ) -> Union[str, Any]:
if input_length > 0.9 * max_length:
logger.warning(
f"""Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider """
"""increasing your max_length manually, e.g. translator('...', max_length=400)""" )
return True
def UpperCAmelCase ( self , *A , A=TruncationStrategy.DO_NOT_TRUNCATE , A=None , A=None ) -> Optional[int]:
if getattr(self.tokenizer , """_build_translation_inputs""" , A ):
return self.tokenizer._build_translation_inputs(
*A , return_tensors=self.framework , truncation=A , src_lang=A , tgt_lang=A )
else:
return super()._parse_and_tokenize(*A , truncation=A )
def UpperCAmelCase ( self , A=None , A=None , **A ) -> Union[str, Any]:
snake_case , snake_case , snake_case : str = super()._sanitize_parameters(**A )
if src_lang is not None:
snake_case : Tuple = src_lang
if tgt_lang is not None:
snake_case : str = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
snake_case : Union[str, Any] = kwargs.get("""task""" , self.task )
snake_case : Any = task.split("""_""" )
if task and len(A ) == 4:
# translation, XX, to YY
snake_case : Optional[Any] = items[1]
snake_case : Dict = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self , *A , **A ) -> str:
return super().__call__(*A , **A )
| 684 | 1 |
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class __lowercase :
"""simple docstring"""
def __init__( self , A , A=1_3 , A=7 , A=True , A=True , A=False , A=True , A=9_9 , A=3_2 , A=5 , A=4 , A=3_7 , A="gelu" , A=0.1 , A=0.1 , A=5_1_2 , A=1_6 , A=2 , A=0.02 , A=3 , A=4 , A=None , ) -> Tuple:
snake_case : Any = parent
snake_case : int = batch_size
snake_case : List[str] = seq_length
snake_case : str = is_training
snake_case : int = use_input_mask
snake_case : str = use_token_type_ids
snake_case : int = use_labels
snake_case : Optional[Any] = vocab_size
snake_case : Optional[int] = hidden_size
snake_case : List[str] = num_hidden_layers
snake_case : List[Any] = num_attention_heads
snake_case : Optional[int] = intermediate_size
snake_case : Dict = hidden_act
snake_case : Union[str, Any] = hidden_dropout_prob
snake_case : Tuple = attention_probs_dropout_prob
snake_case : Tuple = max_position_embeddings
snake_case : Optional[int] = type_vocab_size
snake_case : Any = type_sequence_label_size
snake_case : Tuple = initializer_range
snake_case : Tuple = num_labels
snake_case : Optional[int] = num_choices
snake_case : Dict = scope
def UpperCAmelCase ( self ) -> Tuple:
snake_case : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case : List[str] = None
if self.use_input_mask:
snake_case : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
snake_case : str = None
if self.use_token_type_ids:
snake_case : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case : Optional[Any] = None
snake_case : Optional[int] = None
snake_case : str = None
if self.use_labels:
snake_case : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case : Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
snake_case : Tuple = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase ( self ) -> List[str]:
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A , initializer_range=self.initializer_range , )
def UpperCAmelCase ( self , A , A , A , A , A , A , A ) -> Dict:
snake_case : List[Any] = LlamaModel(config=A )
model.to(A )
model.eval()
snake_case : Optional[int] = model(A , attention_mask=A )
snake_case : Dict = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self , A , A , A , A , A , A , A , A , A , ) -> Optional[Any]:
snake_case : Union[str, Any] = True
snake_case : Dict = LlamaModel(A )
model.to(A )
model.eval()
snake_case : Optional[Any] = model(
A , attention_mask=A , encoder_hidden_states=A , encoder_attention_mask=A , )
snake_case : Optional[int] = model(
A , attention_mask=A , encoder_hidden_states=A , )
snake_case : Tuple = model(A , attention_mask=A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self , A , A , A , A , A , A , A , A , A , ) -> Tuple:
snake_case : int = LlamaForCausalLM(config=A )
model.to(A )
model.eval()
snake_case : Optional[Any] = model(A , attention_mask=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase ( self , A , A , A , A , A , A , A , A , A , ) -> Optional[int]:
snake_case : str = True
snake_case : Optional[int] = True
snake_case : str = LlamaForCausalLM(config=A )
model.to(A )
model.eval()
# first forward pass
snake_case : Dict = model(
A , attention_mask=A , encoder_hidden_states=A , encoder_attention_mask=A , use_cache=A , )
snake_case : Any = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
snake_case : Tuple = ids_tensor((self.batch_size, 3) , config.vocab_size )
snake_case : Union[str, Any] = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
snake_case : Optional[Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case : List[Any] = torch.cat([input_mask, next_mask] , dim=-1 )
snake_case : Optional[int] = model(
A , attention_mask=A , encoder_hidden_states=A , encoder_attention_mask=A , output_hidden_states=A , )["""hidden_states"""][0]
snake_case : str = model(
A , attention_mask=A , encoder_hidden_states=A , encoder_attention_mask=A , past_key_values=A , output_hidden_states=A , )["""hidden_states"""][0]
# select random slice
snake_case : Dict = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case : List[str] = output_from_no_past[:, -3:, random_slice_idx].detach()
snake_case : str = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(A , A , atol=1e-3 ) )
def UpperCAmelCase ( self ) -> int:
snake_case : List[str] = self.prepare_config_and_inputs()
(
(
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) ,
) : Optional[Any] = config_and_inputs
snake_case : Optional[int] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __lowercase (UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
"""simple docstring"""
_snake_case = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
_snake_case = (LlamaForCausalLM,) if is_torch_available() else ()
_snake_case = (
{
"""feature-extraction""": LlamaModel,
"""text-classification""": LlamaForSequenceClassification,
"""text-generation""": LlamaForCausalLM,
"""zero-shot""": LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
_snake_case = False
_snake_case = False
def UpperCAmelCase ( self ) -> List[str]:
snake_case : Tuple = LlamaModelTester(self )
snake_case : List[Any] = ConfigTester(self , config_class=A , hidden_size=3_7 )
def UpperCAmelCase ( self ) -> str:
self.config_tester.run_common_tests()
def UpperCAmelCase ( self ) -> List[Any]:
snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def UpperCAmelCase ( self ) -> Union[str, Any]:
snake_case : Tuple = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
snake_case : Tuple = type
self.model_tester.create_and_check_model(*A )
def UpperCAmelCase ( self ) -> Any:
snake_case , snake_case : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
snake_case : Tuple = 3
snake_case : Optional[int] = input_dict["""input_ids"""]
snake_case : List[str] = input_ids.ne(1 ).to(A )
snake_case : Optional[int] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
snake_case : str = LlamaForSequenceClassification(A )
model.to(A )
model.eval()
snake_case : str = model(A , attention_mask=A , labels=A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCAmelCase ( self ) -> Dict:
snake_case , snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common()
snake_case : List[Any] = 3
snake_case : Optional[int] = """single_label_classification"""
snake_case : Optional[Any] = input_dict["""input_ids"""]
snake_case : Optional[Any] = input_ids.ne(1 ).to(A )
snake_case : Optional[Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
snake_case : Optional[Any] = LlamaForSequenceClassification(A )
model.to(A )
model.eval()
snake_case : int = model(A , attention_mask=A , labels=A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCAmelCase ( self ) -> List[str]:
snake_case , snake_case : Dict = self.model_tester.prepare_config_and_inputs_for_common()
snake_case : List[str] = 3
snake_case : List[Any] = """multi_label_classification"""
snake_case : Union[str, Any] = input_dict["""input_ids"""]
snake_case : Union[str, Any] = input_ids.ne(1 ).to(A )
snake_case : Optional[Any] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
snake_case : Union[str, Any] = LlamaForSequenceClassification(A )
model.to(A )
model.eval()
snake_case : List[str] = model(A , attention_mask=A , labels=A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("""LLaMA buffers include complex numbers, which breaks this test""" )
def UpperCAmelCase ( self ) -> Optional[int]:
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def UpperCAmelCase ( self , A ) -> List[str]:
snake_case , snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case : List[Any] = ids_tensor([1, 1_0] , config.vocab_size )
snake_case : List[str] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
snake_case : Dict = LlamaModel(A )
original_model.to(A )
original_model.eval()
snake_case : Optional[Any] = original_model(A ).last_hidden_state
snake_case : Optional[int] = original_model(A ).last_hidden_state
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
snake_case : Optional[int] = {"""type""": scaling_type, """factor""": 10.0}
snake_case : Union[str, Any] = LlamaModel(A )
scaled_model.to(A )
scaled_model.eval()
snake_case : Optional[Any] = scaled_model(A ).last_hidden_state
snake_case : Optional[Any] = scaled_model(A ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(A , A , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(A , A , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(A , A , atol=1e-5 ) )
@require_torch
class __lowercase (unittest.TestCase ):
"""simple docstring"""
@unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" )
@slow
def UpperCAmelCase ( self ) -> Tuple:
snake_case : Tuple = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8]
snake_case : Optional[int] = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-7b-hf""" , device_map="""auto""" )
snake_case : str = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
snake_case : Optional[int] = torch.tensor([[-6.65_50, -4.12_27, -4.98_59, -3.24_06, 0.82_62, -3.00_33, 1.29_64, -3.36_99]] )
torch.testing.assert_close(out.mean(-1 ) , A , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
snake_case : Union[str, Any] = torch.tensor([-12.82_81, -7.44_53, -0.46_39, -8.06_25, -7.25_00, -8.00_00, -6.48_83, -7.76_95, -7.84_38, -7.03_12, -6.21_88, -7.13_28, -1.84_96, 1.99_61, -8.62_50, -6.72_27, -12.82_81, -6.94_92, -7.07_42, -7.78_52, -7.58_20, -7.90_62, -6.93_75, -7.98_05, -8.34_38, -8.15_62, -8.04_69, -7.62_50, -7.74_22, -7.33_98,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :3_0] , A , atol=1e-5 , rtol=1e-5 )
@unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" )
@slow
def UpperCAmelCase ( self ) -> List[str]:
snake_case : Optional[Any] = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8]
snake_case : Optional[int] = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-13b-hf""" , device_map="""auto""" )
snake_case : str = model(torch.tensor(A ) )
# Expected mean on dim = -1
snake_case : List[Any] = torch.tensor([[-2.06_22, -1.27_94, -1.16_38, -0.97_88, -1.46_03, -1.02_38, -1.78_93, -1.44_11]] )
torch.testing.assert_close(out.mean(-1 ) , A , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
snake_case : Union[str, Any] = torch.tensor([-8.14_06, -8.05_47, 2.74_61, -1.23_44, -0.14_48, -1.82_62, -1.00_20, -1.81_54, -1.68_95, -1.85_16, -2.35_74, -0.92_77, 3.75_98, 6.57_42, -1.29_98, -0.11_77, -8.14_06, -2.96_88, -2.91_99, -3.16_99, -3.52_54, -2.35_55, -2.79_88, -3.41_41, -2.82_62, -4.51_95, -3.33_79, -3.31_64, -2.78_32, -3.02_73] )
# fmt: on
torch.testing.assert_close(out[0, 0, :3_0] , A , atol=1e-5 , rtol=1e-5 )
@unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" )
@slow
def UpperCAmelCase ( self ) -> str:
snake_case : Optional[int] = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8]
snake_case : int = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-13b-chat-hf""" , device_map="""auto""" )
snake_case : Tuple = model(torch.tensor(A ) )
# Expected mean on dim = -1
snake_case : Optional[int] = torch.tensor([[-0.85_62, -1.85_20, -0.75_51, -0.41_62, -1.51_61, -1.20_38, -2.48_23, -2.32_54]] )
torch.testing.assert_close(out.mean(-1 ) , A , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
snake_case : Any = torch.tensor([-2.22_27, 4.88_28, 0.90_23, -0.45_78, -0.78_71, -0.10_33, -0.62_21, -0.57_86, -0.78_03, -1.06_74, -1.29_20, -0.15_70, 0.80_08, 2.07_23, -0.94_97, 0.27_71, -2.22_27, -0.76_12, -1.43_46, -1.20_61, -1.64_26, -0.30_00, -0.71_39, -1.19_34, -1.86_91, -1.69_73, -1.59_47, -1.27_05, -0.35_23, -0.55_13] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , A , atol=1e-2 , rtol=1e-2 )
@unittest.skip(
"""Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test""" )
@slow
def UpperCAmelCase ( self ) -> str:
snake_case : Union[str, Any] = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8]
snake_case : Optional[int] = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-70b-hf""" , device_map="""auto""" )
snake_case : Tuple = model(torch.tensor(A ) )
snake_case : int = torch.tensor(
[[-4.23_27, -3.33_60, -4.66_65, -4.76_31, -1.81_80, -3.41_70, -1.42_11, -3.18_10]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , A , atol=1e-2 , rtol=1e-2 )
# fmt: off
snake_case : Optional[Any] = torch.tensor([-9.49_22, -3.95_51, 1.79_98, -5.67_58, -5.10_55, -5.89_84, -4.83_20, -6.80_86, -6.53_91, -5.61_72, -5.58_20, -5.53_52, 1.78_81, 3.62_89, -6.51_17, -3.47_85, -9.50_00, -6.03_52, -6.81_25, -6.01_95, -6.68_36, -5.47_27, -6.28_12, -6.03_91, -7.33_98, -7.42_97, -7.48_44, -6.58_20, -5.87_89, -5.53_12] )
# fmt: on
torch.testing.assert_close(out[0, 0, :3_0] , A , atol=1e-5 , rtol=1e-5 )
@unittest.skip("""Model is curently gated""" )
@slow
def UpperCAmelCase ( self ) -> Optional[Any]:
snake_case : List[str] = """Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the \"princi"""
snake_case : Optional[Any] = """Simply put, the theory of relativity states that """
snake_case : str = LlamaTokenizer.from_pretrained("""meta-llama/Llama-2-13b-chat-hf""" )
snake_case : Optional[Any] = tokenizer.encode(A , return_tensors="""pt""" )
snake_case : Dict = LlamaForCausalLM.from_pretrained(
"""meta-llama/Llama-2-13b-chat-hf""" , device_map="""sequential""" , use_safetensors=A )
# greedy generation outputs
snake_case : Dict = model.generate(A , max_new_tokens=6_4 , top_p=A , temperature=1 , do_sample=A )
snake_case : List[str] = tokenizer.decode(generated_ids[0] , skip_special_tokens=A )
self.assertEqual(A , A )
| 684 |
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> str:
snake_case : int = []
for line in lines:
snake_case : Dict = re.sub(R"""#.*""" ,"""""" ,lowercase ) # remove comments
if line:
filtered_lines.append(lowercase )
snake_case : Optional[int] = """\n""".join(lowercase )
# Make a hash from all this code
snake_case : List[str] = full_str.encode("""utf-8""" )
return shaaaa(lowercase ).hexdigest()
# get importable module names and hash for caching
lowerCamelCase : Any = {
'csv': (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
'json': (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
'pandas': (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
'parquet': (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
'arrow': (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
'text': (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
'imagefolder': (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
'audiofolder': (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
lowerCamelCase : Optional[int] = {
'.csv': ('csv', {}),
'.tsv': ('csv', {'sep': '\t'}),
'.json': ('json', {}),
'.jsonl': ('json', {}),
'.parquet': ('parquet', {}),
'.arrow': ('arrow', {}),
'.txt': ('text', {}),
}
_EXTENSION_TO_MODULE.update({ext: ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
lowerCamelCase : Tuple = {'imagefolder', 'audiofolder'}
# Used to filter data files based on extensions given a module name
lowerCamelCase : Dict[str, List[str]] = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append('.zip')
_MODULE_TO_EXTENSIONS["audiofolder"].append('.zip')
| 684 | 1 |
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Dict:
snake_case : Dict = []
snake_case : Union[str, Any] = set({"""(""", """[""", """{"""} )
snake_case : Optional[Any] = set({""")""", """]""", """}"""} )
snake_case : List[Any] = {"""{""": """}""", """[""": """]""", """(""": """)"""}
for i in range(len(lowercase ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(lowercase ) == 0 or (len(lowercase ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(lowercase ) == 0
def SCREAMING_SNAKE_CASE__ ( ) -> List[str]:
snake_case : Any = input("""Enter sequence of brackets: """ )
if is_balanced(lowercase ):
print(lowercase ,"""is balanced""" )
else:
print(lowercase ,"""is not balanced""" )
if __name__ == "__main__":
main()
| 684 |
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ) -> Tuple:
# Initialise PyTorch model
snake_case : int = RemBertConfig.from_json_file(lowercase )
print("""Building PyTorch model from configuration: {}""".format(str(lowercase ) ) )
snake_case : Tuple = RemBertModel(lowercase )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(lowercase ,lowercase ,lowercase )
# Save pytorch-model
print("""Save PyTorch model to {}""".format(lowercase ) )
torch.save(model.state_dict() ,lowercase )
if __name__ == "__main__":
lowerCamelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--rembert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained RemBERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowerCamelCase : Dict = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 684 | 1 |
from itertools import product
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> list[int]:
snake_case : Tuple = sides_number
snake_case : List[str] = max_face_number * dice_number
snake_case : Any = [0] * (max_total + 1)
snake_case : int = 1
snake_case : List[str] = range(lowercase ,max_face_number + 1 )
for dice_numbers in product(lowercase ,repeat=lowercase ):
snake_case : Any = sum(lowercase )
totals_frequencies[total] += 1
return totals_frequencies
def SCREAMING_SNAKE_CASE__ ( ) -> float:
snake_case : List[str] = total_frequency_distribution(
sides_number=4 ,dice_number=9 )
snake_case : str = total_frequency_distribution(
sides_number=6 ,dice_number=6 )
snake_case : Optional[int] = 0
snake_case : List[str] = 9
snake_case : Union[str, Any] = 4 * 9
snake_case : Dict = 6
for peter_total in range(lowercase ,max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
snake_case : str = (4**9) * (6**6)
snake_case : int = peter_wins_count / total_games_number
snake_case : Optional[int] = round(lowercase ,ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(f"""{solution() = }""")
| 684 |
from ..utils import DummyObject, requires_backends
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Tuple:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> List[str]:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> int:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Any:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Union[str, Any]:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Tuple:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Optional[int]:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Union[str, Any]:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Union[str, Any]:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> str:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> List[Any]:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Optional[Any]:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Union[str, Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> int:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Union[str, Any]:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> List[Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Tuple:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Optional[Any]:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Optional[Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> str:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Any:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Tuple:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> int:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Dict:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Dict:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Optional[int]:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Optional[Any]:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> str:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> List[Any]:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Optional[int]:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Optional[Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Tuple:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Tuple:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> List[Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> str:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> str:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Tuple:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Dict:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Union[str, Any]:
requires_backends(cls , ["""flax"""] )
| 684 | 1 |
from typing import Optional, Tuple, Union
import torch
from einops import rearrange, reduce
from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel
from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
lowerCamelCase : Union[str, Any] = 8
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase=BITS ) -> str:
snake_case : str = x.device
snake_case : List[str] = (x * 255).int().clamp(0 ,255 )
snake_case : List[str] = 2 ** torch.arange(bits - 1 ,-1 ,-1 ,device=lowercase )
snake_case : int = rearrange(lowercase ,"""d -> d 1 1""" )
snake_case : Optional[int] = rearrange(lowercase ,"""b c h w -> b c 1 h w""" )
snake_case : Optional[Any] = ((x & mask) != 0).float()
snake_case : Dict = rearrange(lowercase ,"""b c d h w -> b (c d) h w""" )
snake_case : Any = bits * 2 - 1
return bits
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase=BITS ) -> List[str]:
snake_case : Union[str, Any] = x.device
snake_case : Optional[int] = (x > 0).int()
snake_case : Union[str, Any] = 2 ** torch.arange(bits - 1 ,-1 ,-1 ,device=lowercase ,dtype=torch.intaa )
snake_case : List[Any] = rearrange(lowercase ,"""d -> d 1 1""" )
snake_case : Union[str, Any] = rearrange(lowercase ,"""b (c d) h w -> b c d h w""" ,d=8 )
snake_case : Any = reduce(x * mask ,"""b c d h w -> b c h w""" ,"""sum""" )
return (dec / 255).clamp(0.0 ,1.0 )
def SCREAMING_SNAKE_CASE__ ( self ,lowercase ,lowercase ,lowercase ,lowercase = 0.0 ,lowercase = True ,lowercase=None ,lowercase = True ,) -> Union[DDIMSchedulerOutput, Tuple]:
if self.num_inference_steps is None:
raise ValueError(
"""Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler""" )
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> η
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# 1. get previous step value (=t-1)
snake_case : Optional[int] = timestep - self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
snake_case : List[Any] = self.alphas_cumprod[timestep]
snake_case : Union[str, Any] = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
snake_case : List[str] = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
snake_case : Dict = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
# 4. Clip "predicted x_0"
snake_case : Dict = self.bit_scale
if self.config.clip_sample:
snake_case : Optional[Any] = torch.clamp(lowercase ,-scale ,lowercase )
# 5. compute variance: "sigma_t(η)" -> see formula (16)
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
snake_case : Dict = self._get_variance(lowercase ,lowercase )
snake_case : Tuple = eta * variance ** 0.5
if use_clipped_model_output:
# the model_output is always re-derived from the clipped x_0 in Glide
snake_case : str = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
snake_case : int = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output
# 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
snake_case : Tuple = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if eta > 0:
# randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
snake_case : Tuple = model_output.device if torch.is_tensor(lowercase ) else """cpu"""
snake_case : Optional[Any] = torch.randn(model_output.shape ,dtype=model_output.dtype ,generator=lowercase ).to(lowercase )
snake_case : Optional[Any] = self._get_variance(lowercase ,lowercase ) ** 0.5 * eta * noise
snake_case : Union[str, Any] = prev_sample + variance
if not return_dict:
return (prev_sample,)
return DDIMSchedulerOutput(prev_sample=lowercase ,pred_original_sample=lowercase )
def SCREAMING_SNAKE_CASE__ ( self ,lowercase ,lowercase ,lowercase ,lowercase="epsilon" ,lowercase=None ,lowercase = True ,) -> Union[DDPMSchedulerOutput, Tuple]:
snake_case : Dict = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
snake_case , snake_case : str = torch.split(lowercase ,sample.shape[1] ,dim=1 )
else:
snake_case : Dict = None
# 1. compute alphas, betas
snake_case : int = self.alphas_cumprod[t]
snake_case : List[str] = self.alphas_cumprod[t - 1] if t > 0 else self.one
snake_case : Optional[int] = 1 - alpha_prod_t
snake_case : Any = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if prediction_type == "epsilon":
snake_case : Optional[Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif prediction_type == "sample":
snake_case : List[Any] = model_output
else:
raise ValueError(f"""Unsupported prediction_type {prediction_type}.""" )
# 3. Clip "predicted x_0"
snake_case : Union[str, Any] = self.bit_scale
if self.config.clip_sample:
snake_case : Tuple = torch.clamp(lowercase ,-scale ,lowercase )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
snake_case : Optional[Any] = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t
snake_case : List[str] = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
snake_case : Union[str, Any] = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
snake_case : Optional[Any] = 0
if t > 0:
snake_case : Dict = torch.randn(
model_output.size() ,dtype=model_output.dtype ,layout=model_output.layout ,generator=lowercase ).to(model_output.device )
snake_case : Optional[Any] = (self._get_variance(lowercase ,predicted_variance=lowercase ) ** 0.5) * noise
snake_case : Dict = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return DDPMSchedulerOutput(prev_sample=lowercase ,pred_original_sample=lowercase )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
def __init__( self , A , A , A = 1.0 , ) -> List[Any]:
super().__init__()
snake_case : List[Any] = bit_scale
snake_case : Any = (
ddim_bit_scheduler_step if isinstance(A , A ) else ddpm_bit_scheduler_step
)
self.register_modules(unet=A , scheduler=A )
@torch.no_grad()
def __call__( self , A = 2_5_6 , A = 2_5_6 , A = 5_0 , A = None , A = 1 , A = "pil" , A = True , **A , ) -> Union[Tuple, ImagePipelineOutput]:
snake_case : Tuple = torch.randn(
(batch_size, self.unet.config.in_channels, height, width) , generator=A , )
snake_case : Dict = decimal_to_bits(A ) * self.bit_scale
snake_case : Union[str, Any] = latents.to(self.device )
self.scheduler.set_timesteps(A )
for t in self.progress_bar(self.scheduler.timesteps ):
# predict the noise residual
snake_case : List[str] = self.unet(A , A ).sample
# compute the previous noisy sample x_t -> x_t-1
snake_case : Union[str, Any] = self.scheduler.step(A , A , A ).prev_sample
snake_case : Optional[int] = bits_to_decimal(A )
if output_type == "pil":
snake_case : int = self.numpy_to_pil(A )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A )
| 684 |
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
lowerCamelCase : List[str] = 3
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> int:
print("""Generating primitive root of p""" )
while True:
snake_case : Optional[int] = random.randrange(3 ,lowercase )
if pow(lowercase ,2 ,lowercase ) == 1:
continue
if pow(lowercase ,lowercase ,lowercase ) == 1:
continue
return g
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> tuple[tuple[int, int, int, int], tuple[int, int]]:
print("""Generating prime p...""" )
snake_case : Optional[int] = rabin_miller.generate_large_prime(lowercase ) # select large prime number.
snake_case : Optional[int] = primitive_root(lowercase ) # one primitive root on modulo p.
snake_case : Optional[Any] = random.randrange(3 ,lowercase ) # private_key -> have to be greater than 2 for safety.
snake_case : Tuple = cryptomath.find_mod_inverse(pow(lowercase ,lowercase ,lowercase ) ,lowercase )
snake_case : str = (key_size, e_a, e_a, p)
snake_case : Optional[Any] = (key_size, d)
return public_key, private_key
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> None:
if os.path.exists(f"""{name}_pubkey.txt""" ) or os.path.exists(f"""{name}_privkey.txt""" ):
print("""\nWARNING:""" )
print(
f"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"""
"""Use a different name or delete these files and re-run this program.""" )
sys.exit()
snake_case , snake_case : Optional[Any] = generate_key(lowercase )
print(f"""\nWriting public key to file {name}_pubkey.txt...""" )
with open(f"""{name}_pubkey.txt""" ,"""w""" ) as fo:
fo.write(f"""{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}""" )
print(f"""Writing private key to file {name}_privkey.txt...""" )
with open(f"""{name}_privkey.txt""" ,"""w""" ) as fo:
fo.write(f"""{private_key[0]},{private_key[1]}""" )
def SCREAMING_SNAKE_CASE__ ( ) -> None:
print("""Making key files...""" )
make_key_files("""elgamal""" ,2048 )
print("""Key files generation successful""" )
if __name__ == "__main__":
main()
| 684 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase : Any = {
'configuration_nllb_moe': [
'NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP',
'NllbMoeConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : str = [
'NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST',
'NllbMoeForConditionalGeneration',
'NllbMoeModel',
'NllbMoePreTrainedModel',
'NllbMoeTop2Router',
'NllbMoeSparseMLP',
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
lowerCamelCase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 684 |
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ) -> int:
if exponent == 1:
return base
if exponent % 2 == 0:
snake_case : Dict = _modexpt(lowercase ,exponent // 2 ,lowercase ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(lowercase ,exponent - 1 ,lowercase )) % modulo_value
def SCREAMING_SNAKE_CASE__ ( lowercase = 1777 ,lowercase = 1855 ,lowercase = 8 ) -> int:
snake_case : int = base
for _ in range(1 ,lowercase ):
snake_case : List[str] = _modexpt(lowercase ,lowercase ,10**digits )
return result
if __name__ == "__main__":
print(f"""{solution() = }""")
| 684 | 1 |
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> list:
for i in range(len(lowercase ) - 1 ,0 ,-1 ):
snake_case : Any = False
for j in range(lowercase ,0 ,-1 ):
if unsorted[j] < unsorted[j - 1]:
snake_case , snake_case : Optional[Any] = unsorted[j - 1], unsorted[j]
snake_case : Dict = True
for j in range(lowercase ):
if unsorted[j] > unsorted[j + 1]:
snake_case , snake_case : Dict = unsorted[j + 1], unsorted[j]
snake_case : Tuple = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase : Any = input('Enter numbers separated by a comma:\n').strip()
lowerCamelCase : Optional[int] = [int(item) for item in user_input.split(',')]
print(f"""{cocktail_shaker_sort(unsorted) = }""")
| 684 |
from itertools import product
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> list[int]:
snake_case : Tuple = sides_number
snake_case : List[str] = max_face_number * dice_number
snake_case : Any = [0] * (max_total + 1)
snake_case : int = 1
snake_case : List[str] = range(lowercase ,max_face_number + 1 )
for dice_numbers in product(lowercase ,repeat=lowercase ):
snake_case : Any = sum(lowercase )
totals_frequencies[total] += 1
return totals_frequencies
def SCREAMING_SNAKE_CASE__ ( ) -> float:
snake_case : List[str] = total_frequency_distribution(
sides_number=4 ,dice_number=9 )
snake_case : str = total_frequency_distribution(
sides_number=6 ,dice_number=6 )
snake_case : Optional[int] = 0
snake_case : List[str] = 9
snake_case : Union[str, Any] = 4 * 9
snake_case : Dict = 6
for peter_total in range(lowercase ,max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
snake_case : str = (4**9) * (6**6)
snake_case : int = peter_wins_count / total_games_number
snake_case : Optional[int] = round(lowercase ,ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(f"""{solution() = }""")
| 684 | 1 |
import inspect
import unittest
class __lowercase (unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> List[Any]:
try:
import diffusers # noqa: F401
except ImportError:
assert False
def UpperCAmelCase ( self ) -> Tuple:
import diffusers
from diffusers.dependency_versions_table import deps
snake_case : List[str] = inspect.getmembers(A , inspect.isclass )
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
snake_case : Tuple = """k-diffusion"""
elif backend == "invisible_watermark":
snake_case : Optional[int] = """invisible-watermark"""
assert backend in deps, f"""{backend} is not in the deps table!"""
| 684 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
| 684 | 1 |
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils import logging
lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ,lowercase=False ) -> Any:
try:
import torch # noqa: F401
except ImportError:
logger.error(
"""Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see"""
""" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"""
""" instructions.""" )
raise
if not is_sharded:
snake_case : Any = os.path.abspath(lowercase )
logger.info(f"""Loading PyTorch weights from {pt_path}""" )
snake_case : Any = torch.load(lowercase ,map_location="""cpu""" )
logger.info(f"""PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.""" )
snake_case : Union[str, Any] = convert_pytorch_state_dict_to_flax(lowercase ,lowercase )
else:
# model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files
snake_case : List[Any] = convert_pytorch_sharded_state_dict_to_flax(lowercase ,lowercase )
return flax_state_dict
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ,lowercase ,) -> (Tuple[str], np.ndarray):
def is_key_or_prefix_key_in_dict(lowercase ) -> bool:
return len(set(lowercase ) & {key, (model_prefix,) + key} ) > 0
# layer norm
snake_case : List[Any] = pt_tuple_key[:-1] + ("""scale""",)
if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(lowercase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer mean
snake_case : str = pt_tuple_key[:-1] + ("""mean""",)
if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(lowercase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer var
snake_case : Optional[int] = pt_tuple_key[:-1] + ("""var""",)
if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(lowercase ):
return renamed_pt_tuple_key, pt_tensor
# embedding
snake_case : Optional[Any] = pt_tuple_key[:-1] + ("""embedding""",)
if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(lowercase ):
return renamed_pt_tuple_key, pt_tensor
# conv layer
snake_case : int = pt_tuple_key[:-1] + ("""kernel""",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(lowercase ):
snake_case : Optional[Any] = pt_tensor.transpose(2 ,3 ,1 ,0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
snake_case : Optional[int] = pt_tuple_key[:-1] + ("""kernel""",)
if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(lowercase ):
snake_case : Tuple = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
snake_case : int = pt_tuple_key[:-1] + ("""weight""",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
snake_case : str = pt_tuple_key[:-1] + ("""bias""",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
snake_case : Dict = None
if pt_tuple_key[-3::2] == ("parametrizations", "original0"):
snake_case : Optional[Any] = pt_tuple_key[-2] + """_g"""
elif pt_tuple_key[-3::2] == ("parametrizations", "original1"):
snake_case : List[str] = pt_tuple_key[-2] + """_v"""
if name is not None:
snake_case : Optional[Any] = pt_tuple_key[:-3] + (name,)
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> Optional[Any]:
# convert pytorch tensor to numpy
snake_case : Tuple = {k: v.numpy() for k, v in pt_state_dict.items()}
snake_case : Dict = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers
if "params" in flax_model.params:
snake_case : List[str] = flax_model.params["""params"""]
else:
snake_case : List[str] = flax_model.params
snake_case : List[Any] = flatten_dict(lowercase )
# add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
snake_case : Optional[int] = flatten_dict(flax_model.params["""batch_stats"""] )
random_flax_state_dict.update(lowercase )
snake_case : Optional[Any] = {}
snake_case : Tuple = (model_prefix not in flax_model_params) and (
model_prefix in {k.split(""".""" )[0] for k in pt_state_dict.keys()}
)
snake_case : Any = (model_prefix in flax_model_params) and (
model_prefix not in {k.split(""".""" )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
snake_case : Union[str, Any] = tuple(pt_key.split(""".""" ) )
# remove base model prefix if necessary
snake_case : List[Any] = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
snake_case : Tuple = pt_tuple_key[1:]
# Correctly rename weight parameters
snake_case , snake_case : str = rename_key_and_reshape_tensor(
lowercase ,lowercase ,lowercase ,lowercase )
# add model prefix if necessary
snake_case : Dict = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
snake_case : Tuple = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
f"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1] or "var" in flax_key[-1]:
snake_case : str = jnp.asarray(lowercase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(lowercase ,lowercase )
continue
# also add unexpected weight so that warning is thrown
snake_case : int = jnp.asarray(lowercase )
else:
# also add unexpected weight so that warning is thrown
snake_case : str = jnp.asarray(lowercase )
return unflatten_dict(lowercase )
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> Dict:
import torch
# Load the index
snake_case : Any = {}
for shard_file in shard_filenames:
# load using msgpack utils
snake_case : Tuple = torch.load(lowercase )
snake_case : Optional[int] = {k: v.numpy() for k, v in pt_state_dict.items()}
snake_case : List[Any] = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
snake_case : List[Any] = flax_model.params["""params"""]
snake_case : Optional[int] = flatten_dict(lowercase )
random_flax_state_dict.update(flatten_dict(flax_model.params["""batch_stats"""] ) )
else:
snake_case : Optional[int] = flax_model.params
snake_case : Optional[int] = flatten_dict(lowercase )
snake_case : List[str] = (model_prefix not in flax_model_params) and (
model_prefix in {k.split(""".""" )[0] for k in pt_state_dict.keys()}
)
snake_case : int = (model_prefix in flax_model_params) and (
model_prefix not in {k.split(""".""" )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
snake_case : int = tuple(pt_key.split(""".""" ) )
# remove base model prefix if necessary
snake_case : Dict = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
snake_case : Dict = pt_tuple_key[1:]
# Correctly rename weight parameters
snake_case , snake_case : Union[str, Any] = rename_key_and_reshape_tensor(
lowercase ,lowercase ,lowercase ,lowercase )
# add model prefix if necessary
snake_case : Optional[int] = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
snake_case : int = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
f"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1]:
snake_case : Tuple = jnp.asarray(lowercase )
continue
if "var" in flax_key[-1]:
snake_case : List[Any] = jnp.asarray(lowercase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(lowercase ,lowercase )
continue
# also add unexpected weight so that warning is thrown
snake_case : List[str] = jnp.asarray(lowercase )
else:
# also add unexpected weight so that warning is thrown
snake_case : Optional[Any] = jnp.asarray(lowercase )
return unflatten_dict(lowercase )
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> List[str]:
snake_case : str = os.path.abspath(lowercase )
logger.info(f"""Loading Flax weights from {flax_checkpoint_path}""" )
# import correct flax class
snake_case : str = getattr(lowercase ,"""Flax""" + model.__class__.__name__ )
# load flax weight dict
with open(lowercase ,"""rb""" ) as state_f:
try:
snake_case : str = from_bytes(lowercase ,state_f.read() )
except UnpicklingError:
raise EnvironmentError(f"""Unable to convert {flax_checkpoint_path} to Flax deserializable object. """ )
return load_flax_weights_in_pytorch_model(lowercase ,lowercase )
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> Union[str, Any]:
try:
import torch # noqa: F401
except ImportError:
logger.error(
"""Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see"""
""" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"""
""" instructions.""" )
raise
# check if we have bf16 weights
snake_case : Optional[Any] = flatten_dict(jax.tree_util.tree_map(lambda lowercase : x.dtype == jnp.bfloataa ,lowercase ) ).values()
if any(lowercase ):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
"""Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` """
"""before loading those in PyTorch model.""" )
snake_case : Dict = jax.tree_util.tree_map(
lambda lowercase : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params ,lowercase )
snake_case : List[Any] = flatten_dict(lowercase )
snake_case : Optional[Any] = pt_model.state_dict()
snake_case : int = (pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in {k.split(""".""" )[0] for k in pt_model_dict.keys()}
)
snake_case : Dict = (pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in {k.split(""".""" )[0] for k in pt_model_dict.keys()}
)
# keep track of unexpected & missing keys
snake_case : int = []
snake_case : Tuple = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
snake_case : List[str] = flax_key_tuple[0] == pt_model.base_model_prefix
snake_case : Union[str, Any] = """.""".join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
snake_case : Optional[int] = flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
snake_case : Optional[int] = (pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(lowercase ) not in pt_model_dict:
# conv layer
snake_case : Optional[Any] = flax_key_tuple[:-1] + ("""weight""",)
snake_case : Union[str, Any] = jnp.transpose(lowercase ,(3, 2, 0, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(lowercase ) not in pt_model_dict:
# linear layer
snake_case : List[Any] = flax_key_tuple[:-1] + ("""weight""",)
snake_case : List[Any] = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
snake_case : List[Any] = flax_key_tuple[:-1] + ("""weight""",)
# adding batch stats from flax batch norm to pt
elif "mean" in flax_key_tuple[-1]:
snake_case : Any = flax_key_tuple[:-1] + ("""running_mean""",)
elif "var" in flax_key_tuple[-1]:
snake_case : List[Any] = flax_key_tuple[:-1] + ("""running_var""",)
if "batch_stats" in flax_state:
snake_case : List[str] = """.""".join(flax_key_tuple[1:] ) # Remove the params/batch_stats header
else:
snake_case : str = """.""".join(lowercase )
# We also need to look at `pt_model_dict` and see if there are keys requiring further transformation.
snake_case : Optional[Any] = {}
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
for key in pt_model_dict:
snake_case : Tuple = key.split(""".""" )
snake_case : Union[str, Any] = None
if key_components[-3::2] == ["parametrizations", "original0"]:
snake_case : List[Any] = key_components[-2] + """_g"""
elif key_components[-3::2] == ["parametrizations", "original1"]:
snake_case : Tuple = key_components[-2] + """_v"""
if name is not None:
snake_case : List[str] = key_components[:-3] + [name]
snake_case : Optional[int] = """.""".join(lowercase )
snake_case : Optional[Any] = key
if flax_key in special_pt_names:
snake_case : Any = special_pt_names[flax_key]
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
f"""Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected """
f"""to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
else:
# add weight to pytorch dict
snake_case : str = np.asarray(lowercase ) if not isinstance(lowercase ,np.ndarray ) else flax_tensor
snake_case : List[Any] = torch.from_numpy(lowercase )
# remove from missing keys
missing_keys.remove(lowercase )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(lowercase )
pt_model.load_state_dict(lowercase )
# re-transform missing_keys to list
snake_case : Optional[int] = list(lowercase )
if len(lowercase ) > 0:
logger.warning(
"""Some weights of the Flax model were not used when initializing the PyTorch model"""
f""" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing"""
f""" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture"""
""" (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This"""
f""" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect"""
""" to be exactly identical (e.g. initializing a BertForSequenceClassification model from a"""
""" FlaxBertForSequenceClassification model).""" )
else:
logger.warning(f"""All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n""" )
if len(lowercase ) > 0:
logger.warning(
f"""Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly"""
f""" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to"""
""" use it for predictions and inference.""" )
else:
logger.warning(
f"""All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n"""
"""If your task is similar to the task the model of the checkpoint was trained on, """
f"""you can already use {pt_model.__class__.__name__} for predictions without further training.""" )
return pt_model
| 684 |
import os
def SCREAMING_SNAKE_CASE__ ( ) -> Dict:
with open(os.path.dirname(lowercase ) + """/grid.txt""" ) as f:
snake_case : Tuple = [] # noqa: E741
for _ in range(20 ):
l.append([int(lowercase ) for x in f.readline().split()] )
snake_case : Optional[Any] = 0
# right
for i in range(20 ):
for j in range(17 ):
snake_case : List[Any] = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
snake_case : Tuple = temp
# down
for i in range(17 ):
for j in range(20 ):
snake_case : Any = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
snake_case : str = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
snake_case : int = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
snake_case : int = temp
# diagonal 2
for i in range(17 ):
for j in range(3 ,20 ):
snake_case : Any = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
snake_case : Any = temp
return maximum
if __name__ == "__main__":
print(solution())
| 684 | 1 |
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
lowerCamelCase : str = (3, 9, -1_1, 0, 7, 5, 1, -1)
lowerCamelCase : Dict = (4, 6, 2, 0, 8, 1_0, 3, -2)
@dataclass
class __lowercase :
"""simple docstring"""
_snake_case = 42
_snake_case = 42
class __lowercase :
"""simple docstring"""
def __init__( self , A ) -> None:
snake_case : Node | None = None
for i in sorted(A , reverse=A ):
snake_case : Any = Node(A , self.head )
def __iter__( self ) -> Iterator[int]:
snake_case : Optional[int] = self.head
while node:
yield node.data
snake_case : List[str] = node.next_node
def __len__( self ) -> int:
return sum(1 for _ in self )
def __str__( self ) -> str:
return " -> ".join([str(A ) for node in self] )
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> SortedLinkedList:
return SortedLinkedList(list(lowercase ) + list(lowercase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase : Optional[Any] = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 684 |
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> list:
for i in range(len(lowercase ) - 1 ,0 ,-1 ):
snake_case : Any = False
for j in range(lowercase ,0 ,-1 ):
if unsorted[j] < unsorted[j - 1]:
snake_case , snake_case : Optional[Any] = unsorted[j - 1], unsorted[j]
snake_case : Dict = True
for j in range(lowercase ):
if unsorted[j] > unsorted[j + 1]:
snake_case , snake_case : Dict = unsorted[j + 1], unsorted[j]
snake_case : Tuple = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase : Any = input('Enter numbers separated by a comma:\n').strip()
lowerCamelCase : Optional[int] = [int(item) for item in user_input.split(',')]
print(f"""{cocktail_shaker_sort(unsorted) = }""")
| 684 | 1 |
lowerCamelCase : Optional[Any] = 9.8_0665
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase = g ) -> float:
if fluid_density <= 0:
raise ValueError("""Impossible fluid density""" )
if volume < 0:
raise ValueError("""Impossible Object volume""" )
if gravity <= 0:
raise ValueError("""Impossible Gravity""" )
return fluid_density * gravity * volume
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
| 684 |
import json
import os
import re
import unicodedata
from json.encoder import INFINITY
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import regex
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging
from ...utils.generic import _is_jax, _is_numpy
lowerCamelCase : Tuple = logging.get_logger(__name__)
lowerCamelCase : Any = {
'artists_file': 'artists.json',
'lyrics_file': 'lyrics.json',
'genres_file': 'genres.json',
}
lowerCamelCase : Any = {
'artists_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json',
},
'genres_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json',
},
'lyrics_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json',
},
}
lowerCamelCase : Optional[int] = {
'jukebox': 5_1_2,
}
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = VOCAB_FILES_NAMES
_snake_case = PRETRAINED_VOCAB_FILES_MAP
_snake_case = PRETRAINED_LYRIC_TOKENS_SIZES
_snake_case = ["""input_ids""", """attention_mask"""]
def __init__( self , A , A , A , A=["v3", "v2", "v2"] , A=5_1_2 , A=5 , A="<|endoftext|>" , **A , ) -> Optional[Any]:
snake_case : Dict = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else unk_token
super().__init__(
unk_token=A , n_genres=A , version=A , max_n_lyric_tokens=A , **A , )
snake_case : Optional[Any] = version
snake_case : Optional[Any] = max_n_lyric_tokens
snake_case : Tuple = n_genres
with open(A , encoding="""utf-8""" ) as vocab_handle:
snake_case : Union[str, Any] = json.load(A )
with open(A , encoding="""utf-8""" ) as vocab_handle:
snake_case : str = json.load(A )
with open(A , encoding="""utf-8""" ) as vocab_handle:
snake_case : List[str] = json.load(A )
snake_case : Tuple = r"""[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+"""
# In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters.
if len(self.lyrics_encoder ) == 7_9:
snake_case : Optional[Any] = oov.replace(r"""\-'""" , r"""\-+'""" )
snake_case : Optional[Any] = regex.compile(A )
snake_case : Optional[Any] = {v: k for k, v in self.artists_encoder.items()}
snake_case : int = {v: k for k, v in self.genres_encoder.items()}
snake_case : List[Any] = {v: k for k, v in self.lyrics_encoder.items()}
@property
def UpperCAmelCase ( self ) -> Optional[Any]:
return len(self.artists_encoder ) + len(self.genres_encoder ) + len(self.lyrics_encoder )
def UpperCAmelCase ( self ) -> str:
return dict(self.artists_encoder , self.genres_encoder , self.lyrics_encoder )
def UpperCAmelCase ( self , A , A , A ) -> Optional[Any]:
snake_case : Optional[int] = [self.artists_encoder.get(A , 0 ) for artist in list_artists]
for genres in range(len(A ) ):
snake_case : Optional[int] = [self.genres_encoder.get(A , 0 ) for genre in list_genres[genres]]
snake_case : Union[str, Any] = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres] ))
snake_case : Optional[Any] = [[self.lyrics_encoder.get(A , 0 ) for character in list_lyrics[0]], [], []]
return artists_id, list_genres, lyric_ids
def UpperCAmelCase ( self , A ) -> List[str]:
return list(A )
def UpperCAmelCase ( self , A , A , A , **A ) -> List[str]:
snake_case , snake_case , snake_case : Any = self.prepare_for_tokenization(A , A , A )
snake_case : Tuple = self._tokenize(A )
return artist, genre, lyrics
def UpperCAmelCase ( self , A , A , A , A = False ) -> Tuple[str, str, str, Dict[str, Any]]:
for idx in range(len(self.version ) ):
if self.version[idx] == "v3":
snake_case : Tuple = artists[idx].lower()
snake_case : List[Any] = [genres[idx].lower()]
else:
snake_case : Union[str, Any] = self._normalize(artists[idx] ) + """.v2"""
snake_case : Any = [
self._normalize(A ) + """.v2""" for genre in genres[idx].split("""_""" )
] # split is for the full dictionary with combined genres
if self.version[0] == "v2":
snake_case : str = regex.compile(r"""[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+""" )
snake_case : Dict = """ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+'\"()[] \t\n"""
snake_case : Union[str, Any] = {vocab[index]: index + 1 for index in range(len(A ) )}
snake_case : Optional[int] = 0
snake_case : Union[str, Any] = len(A ) + 1
snake_case : Optional[int] = self.vocab
snake_case : str = {v: k for k, v in self.vocab.items()}
snake_case : int = """"""
else:
snake_case : Optional[int] = regex.compile(r"""[^A-Za-z0-9.,:;!?\-+'\"()\[\] \t\n]+""" )
snake_case : int = self._run_strip_accents(A )
snake_case : Any = lyrics.replace("""\\""" , """\n""" )
snake_case : Tuple = self.out_of_vocab.sub("""""" , A ), [], []
return artists, genres, lyrics
def UpperCAmelCase ( self , A ) -> List[Any]:
snake_case : int = unicodedata.normalize("""NFD""" , A )
snake_case : int = []
for char in text:
snake_case : Optional[Any] = unicodedata.category(A )
if cat == "Mn":
continue
output.append(A )
return "".join(A )
def UpperCAmelCase ( self , A ) -> str:
snake_case : Dict = (
[chr(A ) for i in range(ord("""a""" ) , ord("""z""" ) + 1 )]
+ [chr(A ) for i in range(ord("""A""" ) , ord("""Z""" ) + 1 )]
+ [chr(A ) for i in range(ord("""0""" ) , ord("""9""" ) + 1 )]
+ ["""."""]
)
snake_case : Dict = frozenset(A )
snake_case : Dict = re.compile(r"""_+""" )
snake_case : str = """""".join([c if c in accepted else """_""" for c in text.lower()] )
snake_case : List[Any] = pattern.sub("""_""" , A ).strip("""_""" )
return text
def UpperCAmelCase ( self , A ) -> str:
return " ".join(A )
def UpperCAmelCase ( self , A , A = None , A = False ) -> List[Any]:
# Convert to TensorType
if not isinstance(A , A ):
snake_case : Tuple = TensorType(A )
# Get a function reference for the correct framework
if tensor_type == TensorType.TENSORFLOW:
if not is_tf_available():
raise ImportError(
"""Unable to convert output to TensorFlow tensors format, TensorFlow is not installed.""" )
import tensorflow as tf
snake_case : Union[str, Any] = tf.constant
snake_case : int = tf.is_tensor
elif tensor_type == TensorType.PYTORCH:
if not is_torch_available():
raise ImportError("""Unable to convert output to PyTorch tensors format, PyTorch is not installed.""" )
import torch
snake_case : List[str] = torch.tensor
snake_case : Optional[Any] = torch.is_tensor
elif tensor_type == TensorType.JAX:
if not is_flax_available():
raise ImportError("""Unable to convert output to JAX tensors format, JAX is not installed.""" )
import jax.numpy as jnp # noqa: F811
snake_case : Optional[int] = jnp.array
snake_case : Dict = _is_jax
else:
snake_case : List[str] = np.asarray
snake_case : Tuple = _is_numpy
# Do the tensor conversion in batch
try:
if prepend_batch_axis:
snake_case : Any = [inputs]
if not is_tensor(A ):
snake_case : List[Any] = as_tensor(A )
except: # noqa E722
raise ValueError(
"""Unable to create tensor, you should probably activate truncation and/or padding """
"""with 'padding=True' 'truncation=True' to have batched tensors with the same length.""" )
return inputs
def __call__( self , A , A , A="" , A="pt" ) -> BatchEncoding:
snake_case : List[str] = [0, 0, 0]
snake_case : List[str] = [artist] * len(self.version )
snake_case : List[Any] = [genres] * len(self.version )
snake_case , snake_case , snake_case : Optional[int] = self.tokenize(A , A , A )
snake_case , snake_case , snake_case : int = self._convert_token_to_id(A , A , A )
snake_case : Any = [-INFINITY] * len(full_tokens[-1] )
snake_case : int = [
self.convert_to_tensors(
[input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] , tensor_type=A )
for i in range(len(self.version ) )
]
return BatchEncoding({"""input_ids""": input_ids, """attention_masks""": attention_masks} )
def UpperCAmelCase ( self , A , A = None ) -> Tuple[str]:
if not os.path.isdir(A ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case : Any = os.path.join(
A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""artists_file"""] )
with open(A , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.artists_encoder , ensure_ascii=A ) )
snake_case : Any = os.path.join(
A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""genres_file"""] )
with open(A , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.genres_encoder , ensure_ascii=A ) )
snake_case : Tuple = os.path.join(
A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""lyrics_file"""] )
with open(A , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.lyrics_encoder , ensure_ascii=A ) )
return (artists_file, genres_file, lyrics_file)
def UpperCAmelCase ( self , A , A , A ) -> List[Any]:
snake_case : Optional[int] = self.artists_decoder.get(A )
snake_case : Optional[Any] = [self.genres_decoder.get(A ) for genre in genres_index]
snake_case : Optional[int] = [self.lyrics_decoder.get(A ) for character in lyric_index]
return artist, genres, lyrics
| 684 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase : List[str] = {'processing_layoutxlm': ['LayoutXLMProcessor']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Optional[Any] = ['LayoutXLMTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : List[str] = ['LayoutXLMTokenizerFast']
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
lowerCamelCase : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 684 |
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> list:
snake_case : str = len(lowercase )
snake_case : Tuple = []
for i in range(len(lowercase ) - pat_len + 1 ):
snake_case : str = True
for j in range(lowercase ):
if s[i + j] != pattern[j]:
snake_case : Dict = False
break
if match_found:
position.append(lowercase )
return position
if __name__ == "__main__":
assert naive_pattern_search('ABCDEFG', 'DE') == [3]
print(naive_pattern_search('ABAAABCDBBABCDDEBCABC', 'ABC'))
| 684 | 1 |
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
lowerCamelCase : List[Any] = logging.get_logger(__name__)
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
def __init__( self , *A , **A ) -> None:
warnings.warn(
"""The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use LayoutLMv2ImageProcessor instead.""" , A , )
super().__init__(*A , **A )
| 684 |
import numpy as np
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> np.array:
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 684 | 1 |
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> list:
snake_case : str = len(lowercase )
snake_case : Tuple = []
for i in range(len(lowercase ) - pat_len + 1 ):
snake_case : str = True
for j in range(lowercase ):
if s[i + j] != pattern[j]:
snake_case : Dict = False
break
if match_found:
position.append(lowercase )
return position
if __name__ == "__main__":
assert naive_pattern_search('ABCDEFG', 'DE') == [3]
print(naive_pattern_search('ABAAABCDBBABCDDEBCABC', 'ABC'))
| 684 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCamelCase : Tuple = {'configuration_vit_mae': ['VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMAEConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : int = [
'VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMAEForPreTraining',
'ViTMAELayer',
'ViTMAEModel',
'ViTMAEPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Dict = [
'TFViTMAEForPreTraining',
'TFViTMAEModel',
'TFViTMAEPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
lowerCamelCase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 684 | 1 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
lowerCamelCase : int = logging.get_logger(__name__)
lowerCamelCase : Any = {
'Intel/dpt-large': 'https://huggingface.co/Intel/dpt-large/resolve/main/config.json',
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """dpt"""
def __init__( self , A=7_6_8 , A=1_2 , A=1_2 , A=3_0_7_2 , A="gelu" , A=0.0 , A=0.0 , A=0.02 , A=1e-1_2 , A=3_8_4 , A=1_6 , A=3 , A=False , A=True , A=[2, 5, 8, 1_1] , A="project" , A=[4, 2, 1, 0.5] , A=[9_6, 1_9_2, 3_8_4, 7_6_8] , A=2_5_6 , A=-1 , A=False , A=True , A=0.4 , A=2_5_5 , A=0.1 , A=[1, 1_0_2_4, 2_4, 2_4] , A=[0, 1] , A=None , **A , ) -> Tuple:
super().__init__(**A )
snake_case : List[Any] = hidden_size
snake_case : List[Any] = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info("""Initializing the config with a `BiT` backbone.""" )
snake_case : Optional[Any] = {
"""global_padding""": """same""",
"""layer_type""": """bottleneck""",
"""depths""": [3, 4, 9],
"""out_features""": ["""stage1""", """stage2""", """stage3"""],
"""embedding_dynamic_padding""": True,
}
snake_case : Optional[Any] = BitConfig(**A )
elif isinstance(A , A ):
logger.info("""Initializing the config with a `BiT` backbone.""" )
snake_case : Tuple = BitConfig(**A )
elif isinstance(A , A ):
snake_case : Dict = backbone_config
else:
raise ValueError(
f"""backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.""" )
snake_case : Union[str, Any] = backbone_featmap_shape
snake_case : str = neck_ignore_stages
if readout_type != "project":
raise ValueError("""Readout type must be 'project' when using `DPT-hybrid` mode.""" )
else:
snake_case : Union[str, Any] = None
snake_case : Tuple = None
snake_case : List[Any] = []
snake_case : Tuple = num_hidden_layers
snake_case : Dict = num_attention_heads
snake_case : Dict = intermediate_size
snake_case : Optional[Any] = hidden_act
snake_case : Optional[Any] = hidden_dropout_prob
snake_case : int = attention_probs_dropout_prob
snake_case : Tuple = initializer_range
snake_case : int = layer_norm_eps
snake_case : Tuple = image_size
snake_case : int = patch_size
snake_case : Union[str, Any] = num_channels
snake_case : Tuple = qkv_bias
snake_case : List[str] = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError("""Readout_type must be one of ['ignore', 'add', 'project']""" )
snake_case : Any = readout_type
snake_case : int = reassemble_factors
snake_case : Union[str, Any] = neck_hidden_sizes
snake_case : Optional[int] = fusion_hidden_size
snake_case : Optional[int] = head_in_index
snake_case : Tuple = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
snake_case : int = use_auxiliary_head
snake_case : Tuple = auxiliary_loss_weight
snake_case : str = semantic_loss_ignore_index
snake_case : Tuple = semantic_classifier_dropout
def UpperCAmelCase ( self ) -> Tuple:
snake_case : Any = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
snake_case : Optional[Any] = self.backbone_config.to_dict()
snake_case : Tuple = self.__class__.model_type
return output
| 684 |
lowerCamelCase : Union[str, Any] = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
lowerCamelCase : Tuple = [{'type': 'code', 'content': INSTALL_CONTENT}]
lowerCamelCase : Union[str, Any] = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 684 | 1 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
lowerCamelCase : Optional[Any] = '\nHuman: <<task>>\n\nAssistant: '
lowerCamelCase : int = 'huggingface-tools/default-prompts'
lowerCamelCase : Any = {'chat': 'chat_prompt_template.txt', 'run': 'run_prompt_template.txt'}
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase="run" ) -> Union[str, Any]:
if prompt_or_repo_id is None:
snake_case : int = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search("""\\s""" ,lowercase ) is not None:
return prompt_or_repo_id
snake_case : Optional[int] = cached_file(
lowercase ,PROMPT_FILES[mode] ,repo_type="""dataset""" ,user_agent={"""agent""": agent_name} )
with open(lowercase ,"""r""" ,encoding="""utf-8""" ) as f:
return f.read()
| 684 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase : Any = logging.get_logger(__name__)
lowerCamelCase : Optional[int] = {'vocab_file': 'spm_char.model'}
lowerCamelCase : List[str] = {
'vocab_file': {
'microsoft/speecht5_asr': 'https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model',
'microsoft/speecht5_tts': 'https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model',
'microsoft/speecht5_vc': 'https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model',
}
}
lowerCamelCase : List[Any] = {
'microsoft/speecht5_asr': 1_0_2_4,
'microsoft/speecht5_tts': 1_0_2_4,
'microsoft/speecht5_vc': 1_0_2_4,
}
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = VOCAB_FILES_NAMES
_snake_case = PRETRAINED_VOCAB_FILES_MAP
_snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case = ["""input_ids""", """attention_mask"""]
def __init__( self , A , A="<s>" , A="</s>" , A="<unk>" , A="<pad>" , A = None , **A , ) -> None:
snake_case : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=A , eos_token=A , unk_token=A , pad_token=A , sp_model_kwargs=self.sp_model_kwargs , **A , )
snake_case : Tuple = vocab_file
snake_case : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(A )
@property
def UpperCAmelCase ( self ) -> List[Any]:
return self.sp_model.get_piece_size()
def UpperCAmelCase ( self ) -> Optional[int]:
snake_case : Any = {self.convert_ids_to_tokens(A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> List[str]:
snake_case : Optional[Any] = self.__dict__.copy()
snake_case : Optional[Any] = None
return state
def __setstate__( self , A ) -> Tuple:
snake_case : Any = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
snake_case : List[Any] = {}
snake_case : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase ( self , A ) -> List[str]:
return self.sp_model.encode(A , out_type=A )
def UpperCAmelCase ( self , A ) -> Tuple:
return self.sp_model.piece_to_id(A )
def UpperCAmelCase ( self , A ) -> int:
snake_case : Union[str, Any] = self.sp_model.IdToPiece(A )
return token
def UpperCAmelCase ( self , A ) -> Tuple:
snake_case : Optional[int] = []
snake_case : str = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(A ) + token
snake_case : Dict = []
else:
current_sub_tokens.append(A )
out_string += self.sp_model.decode(A )
return out_string.strip()
def UpperCAmelCase ( self , A , A=None ) -> List[int]:
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def UpperCAmelCase ( self , A , A = None , A = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A , token_ids_a=A , already_has_special_tokens=A )
snake_case : Any = [1]
if token_ids_a is None:
return ([0] * len(A )) + suffix_ones
return ([0] * len(A )) + ([0] * len(A )) + suffix_ones
def UpperCAmelCase ( self , A , A = None ) -> Tuple[str]:
if not os.path.isdir(A ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case : Optional[Any] = os.path.join(
A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A )
elif not os.path.isfile(self.vocab_file ):
with open(A , """wb""" ) as fi:
snake_case : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(A )
return (out_vocab_file,)
| 684 | 1 |
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class __lowercase :
"""simple docstring"""
def __init__( self , A , A=1_3 , A=7 , A=True , A=True , A=True , A=True , A=True , A=False , A=False , A=False , A=2 , A=9_9 , A=0 , A=3_2 , A=5 , A=4 , A=0.1 , A=0.1 , A=5_1_2 , A=2 , A=0.02 , A=2 , A=4 , A="last" , A=True , A=None , A=0 , ) -> Union[str, Any]:
snake_case : Optional[Any] = parent
snake_case : Dict = batch_size
snake_case : List[str] = seq_length
snake_case : str = is_training
snake_case : str = use_input_lengths
snake_case : List[Any] = use_token_type_ids
snake_case : Optional[int] = use_labels
snake_case : Optional[int] = gelu_activation
snake_case : Dict = sinusoidal_embeddings
snake_case : Any = causal
snake_case : str = asm
snake_case : Dict = n_langs
snake_case : Optional[Any] = vocab_size
snake_case : Any = n_special
snake_case : List[str] = hidden_size
snake_case : List[Any] = num_hidden_layers
snake_case : Dict = num_attention_heads
snake_case : List[Any] = hidden_dropout_prob
snake_case : Dict = attention_probs_dropout_prob
snake_case : List[Any] = max_position_embeddings
snake_case : Optional[Any] = type_sequence_label_size
snake_case : Any = initializer_range
snake_case : List[str] = num_labels
snake_case : str = num_choices
snake_case : Union[str, Any] = summary_type
snake_case : Optional[int] = use_proj
snake_case : List[Any] = scope
snake_case : List[str] = bos_token_id
def UpperCAmelCase ( self ) -> Union[str, Any]:
snake_case : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case : int = random_attention_mask([self.batch_size, self.seq_length] )
snake_case : Any = None
if self.use_input_lengths:
snake_case : Tuple = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
snake_case : Any = None
if self.use_token_type_ids:
snake_case : Any = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
snake_case : Optional[int] = None
snake_case : List[Any] = None
snake_case : Dict = None
if self.use_labels:
snake_case : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case : List[str] = ids_tensor([self.batch_size] , 2 ).float()
snake_case : Any = ids_tensor([self.batch_size] , self.num_choices )
snake_case : Tuple = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def UpperCAmelCase ( self ) -> int:
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def UpperCAmelCase ( self , A , A , A , A , A , A , A , A , A , ) -> List[str]:
snake_case : List[Any] = XLMModel(config=A )
model.to(A )
model.eval()
snake_case : str = model(A , lengths=A , langs=A )
snake_case : Optional[Any] = model(A , langs=A )
snake_case : str = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self , A , A , A , A , A , A , A , A , A , ) -> Any:
snake_case : str = XLMWithLMHeadModel(A )
model.to(A )
model.eval()
snake_case : Union[str, Any] = model(A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase ( self , A , A , A , A , A , A , A , A , A , ) -> Union[str, Any]:
snake_case : List[str] = XLMForQuestionAnsweringSimple(A )
model.to(A )
model.eval()
snake_case : Tuple = model(A )
snake_case : List[str] = model(A , start_positions=A , end_positions=A )
snake_case : Any = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase ( self , A , A , A , A , A , A , A , A , A , ) -> Optional[Any]:
snake_case : Any = XLMForQuestionAnswering(A )
model.to(A )
model.eval()
snake_case : Union[str, Any] = model(A )
snake_case : Dict = model(
A , start_positions=A , end_positions=A , cls_index=A , is_impossible=A , p_mask=A , )
snake_case : List[Any] = model(
A , start_positions=A , end_positions=A , cls_index=A , is_impossible=A , )
((snake_case) , ) : List[str] = result_with_labels.to_tuple()
snake_case : int = model(A , start_positions=A , end_positions=A )
((snake_case) , ) : Optional[Any] = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def UpperCAmelCase ( self , A , A , A , A , A , A , A , A , A , ) -> Optional[Any]:
snake_case : Any = XLMForSequenceClassification(A )
model.to(A )
model.eval()
snake_case : Optional[Any] = model(A )
snake_case : Optional[int] = model(A , labels=A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase ( self , A , A , A , A , A , A , A , A , A , ) -> int:
snake_case : Tuple = self.num_labels
snake_case : Any = XLMForTokenClassification(A )
model.to(A )
model.eval()
snake_case : Tuple = model(A , attention_mask=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase ( self , A , A , A , A , A , A , A , A , A , ) -> str:
snake_case : Optional[Any] = self.num_choices
snake_case : Optional[Any] = XLMForMultipleChoice(config=A )
model.to(A )
model.eval()
snake_case : str = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case : Tuple = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case : Optional[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case : Dict = model(
A , attention_mask=A , token_type_ids=A , labels=A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase ( self ) -> List[Any]:
snake_case : Optional[int] = self.prepare_config_and_inputs()
(
(
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) ,
) : Tuple = config_and_inputs
snake_case : Dict = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """lengths""": input_lengths}
return config, inputs_dict
@require_torch
class __lowercase (UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
"""simple docstring"""
_snake_case = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
_snake_case = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
_snake_case = (
{
"""feature-extraction""": XLMModel,
"""fill-mask""": XLMWithLMHeadModel,
"""question-answering""": XLMForQuestionAnsweringSimple,
"""text-classification""": XLMForSequenceClassification,
"""text-generation""": XLMWithLMHeadModel,
"""token-classification""": XLMForTokenClassification,
"""zero-shot""": XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def UpperCAmelCase ( self , A , A , A , A , A ) -> Optional[int]:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("""Fast""" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def UpperCAmelCase ( self , A , A , A=False ) -> int:
snake_case : Any = super()._prepare_for_class(A , A , return_labels=A )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
snake_case : Union[str, Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=A )
snake_case : List[str] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=A )
return inputs_dict
def UpperCAmelCase ( self ) -> List[str]:
snake_case : int = XLMModelTester(self )
snake_case : int = ConfigTester(self , config_class=A , emb_dim=3_7 )
def UpperCAmelCase ( self ) -> int:
self.config_tester.run_common_tests()
def UpperCAmelCase ( self ) -> int:
snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*A )
def UpperCAmelCase ( self ) -> Union[str, Any]:
snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*A )
def UpperCAmelCase ( self ) -> Optional[Any]:
snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*A )
def UpperCAmelCase ( self ) -> List[Any]:
snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*A )
def UpperCAmelCase ( self ) -> int:
snake_case : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*A )
def UpperCAmelCase ( self ) -> List[str]:
snake_case : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*A )
def UpperCAmelCase ( self ) -> Tuple:
snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*A )
def UpperCAmelCase ( self , A , A , A , A , A , A=False , A=1 ) -> List[Any]:
self.assertIsInstance(A , A )
self.assertListEqual(
[isinstance(A , A ) for iter_attentions in attentions] , [True] * len(A ) )
self.assertEqual(len(A ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(A ):
# adds PAD dummy token
snake_case : Tuple = min_length + idx + 1
snake_case : Dict = min_length + idx + 1
snake_case : List[str] = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(A ) )
def UpperCAmelCase ( self , A , A , A , A , A , A=False , A=1 ) -> str:
self.assertIsInstance(A , A )
self.assertListEqual(
[isinstance(A , A ) for iter_hidden_states in hidden_states] , [True] * len(A ) , )
self.assertEqual(len(A ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(A ):
# adds PAD dummy token
snake_case : Optional[Any] = min_length + idx + 1
snake_case : Tuple = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(A ) , )
pass
@slow
def UpperCAmelCase ( self ) -> str:
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case : int = XLMModel.from_pretrained(A )
self.assertIsNotNone(A )
@require_torch
class __lowercase (unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase ( self ) -> Optional[int]:
snake_case : List[Any] = XLMWithLMHeadModel.from_pretrained("""xlm-mlm-en-2048""" )
model.to(A )
snake_case : List[Any] = torch.tensor([[1_4, 4_4_7]] , dtype=torch.long , device=A ) # the president
snake_case : int = [
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
snake_case : List[Any] = model.generate(A , do_sample=A )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , A )
| 684 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Any = logging.get_logger(__name__)
lowerCamelCase : Optional[int] = {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json',
}
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """gpt_neox_japanese"""
def __init__( self , A=3_2_0_0_0 , A=2_5_6_0 , A=3_2 , A=3_2 , A=4 , A="gelu" , A=1.00 , A=1_0_0_0_0 , A=2_0_4_8 , A=0.02 , A=1e-5 , A=True , A=3_1_9_9_6 , A=3_1_9_9_9 , A=0.1 , A=0.0 , **A , ) -> str:
super().__init__(bos_token_id=A , eos_token_id=A , **A )
snake_case : Optional[Any] = vocab_size
snake_case : Optional[Any] = max_position_embeddings
snake_case : Union[str, Any] = hidden_size
snake_case : Union[str, Any] = num_hidden_layers
snake_case : Optional[int] = num_attention_heads
snake_case : Optional[int] = intermediate_multiple_size
snake_case : int = hidden_act
snake_case : str = rotary_pct
snake_case : Optional[Any] = rotary_emb_base
snake_case : Any = initializer_range
snake_case : Any = layer_norm_eps
snake_case : Optional[Any] = use_cache
snake_case : Tuple = attention_dropout
snake_case : Tuple = hidden_dropout
| 684 | 1 |
import argparse
from argparse import Namespace
import torch
from torch import nn
from transformers import XGLMConfig, XGLMForCausalLM
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Optional[Any]:
snake_case : Optional[Any] = [
"""decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(lowercase ,lowercase )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Tuple:
snake_case , snake_case : Any = emb.weight.shape
snake_case : str = nn.Linear(lowercase ,lowercase ,bias=lowercase )
snake_case : Any = emb.weight.data
return lin_layer
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Any:
snake_case : List[Any] = torch.load(lowercase ,map_location="""cpu""" )
snake_case : Optional[Any] = Namespace(**checkpoint["""cfg"""]["""model"""] )
snake_case : Union[str, Any] = checkpoint["""model"""]
remove_ignore_keys_(lowercase )
snake_case : str = state_dict["""decoder.embed_tokens.weight"""].shape[0]
snake_case : Tuple = {key.replace("""decoder""" ,"""model""" ): val for key, val in state_dict.items()}
snake_case : Dict = XGLMConfig(
vocab_size=lowercase ,max_position_embeddings=args.max_target_positions ,num_layers=args.decoder_layers ,attention_heads=args.decoder_attention_heads ,ffn_dim=args.decoder_ffn_embed_dim ,d_model=args.decoder_embed_dim ,layerdrop=args.decoder_layerdrop ,dropout=args.dropout ,attention_dropout=args.attention_dropout ,activation_dropout=args.activation_dropout ,activation_function="""gelu""" ,scale_embedding=not args.no_scale_embedding ,tie_word_embeddings=args.share_decoder_input_output_embed ,)
snake_case : str = XGLMForCausalLM(lowercase )
snake_case : List[Any] = model.load_state_dict(lowercase ,strict=lowercase )
print(lowercase )
snake_case : Tuple = make_linear_from_emb(model.model.embed_tokens )
return model
if __name__ == "__main__":
lowerCamelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument('fairseq_path', type=str, help='path to a model.pt on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
lowerCamelCase : Optional[Any] = parser.parse_args()
lowerCamelCase : int = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path)
model.save_pretrained(args.pytorch_dump_folder_path)
| 684 |
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> int:
snake_case : Optional[Any] = hex_num.strip()
if not hex_num:
raise ValueError("""No value was passed to the function""" )
snake_case : Any = hex_num[0] == """-"""
if is_negative:
snake_case : int = hex_num[1:]
try:
snake_case : List[Any] = int(lowercase ,16 )
except ValueError:
raise ValueError("""Invalid value was passed to the function""" )
snake_case : Dict = """"""
while int_num > 0:
snake_case : Dict = str(int_num % 2 ) + bin_str
int_num >>= 1
return int(("""-""" + bin_str) if is_negative else bin_str )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 684 | 1 |
import argparse
import os
import shutil
from pathlib import Path
import onnx
import torch
from packaging import version
from torch.onnx import export
from diffusers import OnnxRuntimeModel, OnnxStableDiffusionPipeline, StableDiffusionPipeline
lowerCamelCase : int = version.parse(version.parse(torch.__version__).base_version) < version.parse('1.11')
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ,lowercase ,lowercase ,lowercase ,lowercase ,lowercase=False ,) -> Optional[int]:
output_path.parent.mkdir(parents=lowercase ,exist_ok=lowercase )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
lowercase ,lowercase ,f=output_path.as_posix() ,input_names=lowercase ,output_names=lowercase ,dynamic_axes=lowercase ,do_constant_folding=lowercase ,use_external_data_format=lowercase ,enable_onnx_checker=lowercase ,opset_version=lowercase ,)
else:
export(
lowercase ,lowercase ,f=output_path.as_posix() ,input_names=lowercase ,output_names=lowercase ,dynamic_axes=lowercase ,do_constant_folding=lowercase ,opset_version=lowercase ,)
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ,lowercase = False ) -> str:
snake_case : Optional[int] = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
snake_case : int = """cuda"""
elif fpaa and not torch.cuda.is_available():
raise ValueError("""`float16` model export is only supported on GPUs with CUDA""" )
else:
snake_case : Dict = """cpu"""
snake_case : Union[str, Any] = StableDiffusionPipeline.from_pretrained(lowercase ,torch_dtype=lowercase ).to(lowercase )
snake_case : Optional[Any] = Path(lowercase )
# TEXT ENCODER
snake_case : List[Any] = pipeline.text_encoder.config.max_position_embeddings
snake_case : Any = pipeline.text_encoder.config.hidden_size
snake_case : Optional[Any] = pipeline.tokenizer(
"""A sample prompt""" ,padding="""max_length""" ,max_length=pipeline.tokenizer.model_max_length ,truncation=lowercase ,return_tensors="""pt""" ,)
onnx_export(
pipeline.text_encoder ,model_args=(text_input.input_ids.to(device=lowercase ,dtype=torch.intaa )) ,output_path=output_path / """text_encoder""" / """model.onnx""" ,ordered_input_names=["""input_ids"""] ,output_names=["""last_hidden_state""", """pooler_output"""] ,dynamic_axes={
"""input_ids""": {0: """batch""", 1: """sequence"""},
} ,opset=lowercase ,)
del pipeline.text_encoder
# UNET
snake_case : Optional[Any] = pipeline.unet.config.in_channels
snake_case : Dict = pipeline.unet.config.sample_size
snake_case : Optional[Any] = output_path / """unet""" / """model.onnx"""
onnx_export(
pipeline.unet ,model_args=(
torch.randn(2 ,lowercase ,lowercase ,lowercase ).to(device=lowercase ,dtype=lowercase ),
torch.randn(2 ).to(device=lowercase ,dtype=lowercase ),
torch.randn(2 ,lowercase ,lowercase ).to(device=lowercase ,dtype=lowercase ),
False,
) ,output_path=lowercase ,ordered_input_names=["""sample""", """timestep""", """encoder_hidden_states""", """return_dict"""] ,output_names=["""out_sample"""] ,dynamic_axes={
"""sample""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""},
"""timestep""": {0: """batch"""},
"""encoder_hidden_states""": {0: """batch""", 1: """sequence"""},
} ,opset=lowercase ,use_external_data_format=lowercase ,)
snake_case : Optional[Any] = str(unet_path.absolute().as_posix() )
snake_case : List[Any] = os.path.dirname(lowercase )
snake_case : Optional[Any] = onnx.load(lowercase )
# clean up existing tensor files
shutil.rmtree(lowercase )
os.mkdir(lowercase )
# collate external tensor files into one
onnx.save_model(
lowercase ,lowercase ,save_as_external_data=lowercase ,all_tensors_to_one_file=lowercase ,location="""weights.pb""" ,convert_attribute=lowercase ,)
del pipeline.unet
# VAE ENCODER
snake_case : Optional[int] = pipeline.vae
snake_case : List[Any] = vae_encoder.config.in_channels
snake_case : Optional[Any] = vae_encoder.config.sample_size
# need to get the raw tensor output (sample) from the encoder
snake_case : Dict = lambda lowercase ,lowercase : vae_encoder.encode(lowercase ,lowercase )[0].sample()
onnx_export(
lowercase ,model_args=(
torch.randn(1 ,lowercase ,lowercase ,lowercase ).to(device=lowercase ,dtype=lowercase ),
False,
) ,output_path=output_path / """vae_encoder""" / """model.onnx""" ,ordered_input_names=["""sample""", """return_dict"""] ,output_names=["""latent_sample"""] ,dynamic_axes={
"""sample""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""},
} ,opset=lowercase ,)
# VAE DECODER
snake_case : Tuple = pipeline.vae
snake_case : int = vae_decoder.config.latent_channels
snake_case : Optional[int] = vae_decoder.config.out_channels
# forward only through the decoder part
snake_case : Optional[Any] = vae_encoder.decode
onnx_export(
lowercase ,model_args=(
torch.randn(1 ,lowercase ,lowercase ,lowercase ).to(device=lowercase ,dtype=lowercase ),
False,
) ,output_path=output_path / """vae_decoder""" / """model.onnx""" ,ordered_input_names=["""latent_sample""", """return_dict"""] ,output_names=["""sample"""] ,dynamic_axes={
"""latent_sample""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""},
} ,opset=lowercase ,)
del pipeline.vae
# SAFETY CHECKER
if pipeline.safety_checker is not None:
snake_case : Any = pipeline.safety_checker
snake_case : Optional[int] = safety_checker.config.vision_config.num_channels
snake_case : Dict = safety_checker.config.vision_config.image_size
snake_case : Optional[Any] = safety_checker.forward_onnx
onnx_export(
pipeline.safety_checker ,model_args=(
torch.randn(
1 ,lowercase ,lowercase ,lowercase ,).to(device=lowercase ,dtype=lowercase ),
torch.randn(1 ,lowercase ,lowercase ,lowercase ).to(device=lowercase ,dtype=lowercase ),
) ,output_path=output_path / """safety_checker""" / """model.onnx""" ,ordered_input_names=["""clip_input""", """images"""] ,output_names=["""out_images""", """has_nsfw_concepts"""] ,dynamic_axes={
"""clip_input""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""},
"""images""": {0: """batch""", 1: """height""", 2: """width""", 3: """channels"""},
} ,opset=lowercase ,)
del pipeline.safety_checker
snake_case : List[Any] = OnnxRuntimeModel.from_pretrained(output_path / """safety_checker""" )
snake_case : Optional[Any] = pipeline.feature_extractor
else:
snake_case : Optional[int] = None
snake_case : int = None
snake_case : List[str] = OnnxStableDiffusionPipeline(
vae_encoder=OnnxRuntimeModel.from_pretrained(output_path / """vae_encoder""" ) ,vae_decoder=OnnxRuntimeModel.from_pretrained(output_path / """vae_decoder""" ) ,text_encoder=OnnxRuntimeModel.from_pretrained(output_path / """text_encoder""" ) ,tokenizer=pipeline.tokenizer ,unet=OnnxRuntimeModel.from_pretrained(output_path / """unet""" ) ,scheduler=pipeline.scheduler ,safety_checker=lowercase ,feature_extractor=lowercase ,requires_safety_checker=safety_checker is not None ,)
onnx_pipeline.save_pretrained(lowercase )
print("""ONNX pipeline saved to""" ,lowercase )
del pipeline
del onnx_pipeline
snake_case : Union[str, Any] = OnnxStableDiffusionPipeline.from_pretrained(lowercase ,provider="""CPUExecutionProvider""" )
print("""ONNX pipeline is loadable""" )
if __name__ == "__main__":
lowerCamelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
'--model_path',
type=str,
required=True,
help='Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).',
)
parser.add_argument('--output_path', type=str, required=True, help='Path to the output model.')
parser.add_argument(
'--opset',
default=1_4,
type=int,
help='The version of the ONNX operator set to use.',
)
parser.add_argument('--fp16', action='store_true', default=False, help='Export the models in `float16` mode')
lowerCamelCase : List[Any] = parser.parse_args()
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
| 684 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCamelCase : Optional[int] = logging.get_logger(__name__)
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""pixel_values"""]
def __init__( self , A = True , A = None , A = PIL.Image.BICUBIC , A = True , A = None , A = 1 / 2_5_5 , A = True , A = True , A = None , A = None , **A , ) -> None:
super().__init__(**A )
snake_case : int = size if size is not None else {"""height""": 2_5_6, """width""": 2_5_6}
snake_case : int = get_size_dict(A )
snake_case : Optional[Any] = crop_size if crop_size is not None else {"""height""": 2_2_4, """width""": 2_2_4}
snake_case : Dict = get_size_dict(A , param_name="""crop_size""" )
snake_case : int = do_resize
snake_case : str = size
snake_case : Tuple = resample
snake_case : Any = do_center_crop
snake_case : Tuple = crop_size
snake_case : int = do_rescale
snake_case : Dict = rescale_factor
snake_case : Union[str, Any] = do_normalize
snake_case : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
snake_case : Optional[Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCAmelCase ( self , A , A , A = PIL.Image.BICUBIC , A = None , **A , ) -> np.ndarray:
snake_case : Dict = get_size_dict(A )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" )
return resize(
A , size=(size["""height"""], size["""width"""]) , resample=A , data_format=A , **A )
def UpperCAmelCase ( self , A , A , A = None , **A , ) -> np.ndarray:
snake_case : Any = get_size_dict(A )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" )
return center_crop(A , size=(size["""height"""], size["""width"""]) , data_format=A , **A )
def UpperCAmelCase ( self , A , A , A = None , **A , ) -> Tuple:
return rescale(A , scale=A , data_format=A , **A )
def UpperCAmelCase ( self , A , A , A , A = None , **A , ) -> np.ndarray:
return normalize(A , mean=A , std=A , data_format=A , **A )
def UpperCAmelCase ( self , A , A = None , A = None , A=None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = ChannelDimension.FIRST , **A , ) -> PIL.Image.Image:
snake_case : str = do_resize if do_resize is not None else self.do_resize
snake_case : Dict = resample if resample is not None else self.resample
snake_case : Dict = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case : Tuple = do_rescale if do_rescale is not None else self.do_rescale
snake_case : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case : List[str] = do_normalize if do_normalize is not None else self.do_normalize
snake_case : int = image_mean if image_mean is not None else self.image_mean
snake_case : List[str] = image_std if image_std is not None else self.image_std
snake_case : Dict = size if size is not None else self.size
snake_case : Tuple = get_size_dict(A )
snake_case : Dict = crop_size if crop_size is not None else self.crop_size
snake_case : List[str] = get_size_dict(A , param_name="""crop_size""" )
snake_case : int = make_list_of_images(A )
if not valid_images(A ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
snake_case : Optional[Any] = [to_numpy_array(A ) for image in images]
if do_resize:
snake_case : Dict = [self.resize(image=A , size=A , resample=A ) for image in images]
if do_center_crop:
snake_case : List[str] = [self.center_crop(image=A , size=A ) for image in images]
if do_rescale:
snake_case : List[str] = [self.rescale(image=A , scale=A ) for image in images]
if do_normalize:
snake_case : str = [self.normalize(image=A , mean=A , std=A ) for image in images]
snake_case : Union[str, Any] = [to_channel_dimension_format(A , A ) for image in images]
snake_case : List[Any] = {"""pixel_values""": images}
return BatchFeature(data=A , tensor_type=A )
| 684 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
lowerCamelCase : Dict = {
'MIT/ast-finetuned-audioset-10-10-0.4593': (
'https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json'
),
}
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """audio-spectrogram-transformer"""
def __init__( self , A=7_6_8 , A=1_2 , A=1_2 , A=3_0_7_2 , A="gelu" , A=0.0 , A=0.0 , A=0.02 , A=1e-1_2 , A=1_6 , A=True , A=1_0 , A=1_0 , A=1_0_2_4 , A=1_2_8 , **A , ) -> int:
super().__init__(**A )
snake_case : Any = hidden_size
snake_case : Tuple = num_hidden_layers
snake_case : Any = num_attention_heads
snake_case : Dict = intermediate_size
snake_case : int = hidden_act
snake_case : int = hidden_dropout_prob
snake_case : Tuple = attention_probs_dropout_prob
snake_case : int = initializer_range
snake_case : int = layer_norm_eps
snake_case : Any = patch_size
snake_case : List[Any] = qkv_bias
snake_case : int = frequency_stride
snake_case : Any = time_stride
snake_case : Union[str, Any] = max_length
snake_case : Any = num_mel_bins
| 684 |
import inspect
import unittest
class __lowercase (unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> List[Any]:
try:
import diffusers # noqa: F401
except ImportError:
assert False
def UpperCAmelCase ( self ) -> Tuple:
import diffusers
from diffusers.dependency_versions_table import deps
snake_case : List[str] = inspect.getmembers(A , inspect.isclass )
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
snake_case : Tuple = """k-diffusion"""
elif backend == "invisible_watermark":
snake_case : Optional[int] = """invisible-watermark"""
assert backend in deps, f"""{backend} is not in the deps table!"""
| 684 | 1 |
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ,lowercase ,lowercase ,lowercase ) -> Optional[int]:
if index == r:
for j in range(lowercase ):
print(data[j] ,end=""" """ )
print(""" """ )
return
# When no more elements are there to put in data[]
if i >= n:
return
# current is included, put next at next location
snake_case : Optional[int] = arr[i]
combination_util(lowercase ,lowercase ,lowercase ,index + 1 ,lowercase ,i + 1 )
# current is excluded, replace it with
# next (Note that i+1 is passed, but
# index is not changed)
combination_util(lowercase ,lowercase ,lowercase ,lowercase ,lowercase ,i + 1 )
# The main function that prints all combinations
# of size r in arr[] of size n. This function
# mainly uses combinationUtil()
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ) -> Optional[int]:
# A temporary array to store all combination one by one
snake_case : Optional[Any] = [0] * r
# Print all combination using temporary array 'data[]'
combination_util(lowercase ,lowercase ,lowercase ,0 ,lowercase ,0 )
if __name__ == "__main__":
# Driver code to check the function above
lowerCamelCase : Any = [1_0, 2_0, 3_0, 4_0, 5_0]
print_combination(arr, len(arr), 3)
# This code is contributed by Ambuj sahu
| 684 |
import contextlib
import importlib
import io
import unittest
import transformers
# Try to import everything from transformers to ensure every object can be loaded.
from transformers import * # noqa F406
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch
from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available
if is_torch_available():
from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification
if is_tf_available():
from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification
if is_flax_available():
from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification
lowerCamelCase : Union[str, Any] = DUMMY_UNKNOWN_IDENTIFIER
# An actual model hosted on huggingface.co
lowerCamelCase : List[Any] = 'main'
# Default branch name
lowerCamelCase : Tuple = 'f2c752cfc5c0ab6f4bdec59acea69eefbee381c2'
# One particular commit (not the top of `main`)
lowerCamelCase : List[Any] = 'aaaaaaa'
# This commit does not exist, so we should 404.
lowerCamelCase : List[Any] = 'd9e9f15bc825e4b2c9249e9578f884bbcb5e3684'
# Sha-1 of config.json on the top of `main`, for checking purposes
lowerCamelCase : int = '4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3'
@contextlib.contextmanager
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[int]:
print("""Welcome!""" )
yield
print("""Bye!""" )
@contextlib.contextmanager
def SCREAMING_SNAKE_CASE__ ( ) -> List[str]:
print("""Bonjour!""" )
yield
print("""Au revoir!""" )
class __lowercase (unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> int:
# If the spec is missing, importlib would not be able to import the module dynamically.
assert transformers.__spec__ is not None
assert importlib.util.find_spec("""transformers""" ) is not None
class __lowercase (unittest.TestCase ):
"""simple docstring"""
@unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO )
def UpperCAmelCase ( self , A ) -> Optional[Any]:
with ContextManagers([] ):
print("""Transformers are awesome!""" )
# The print statement adds a new line at the end of the output
self.assertEqual(mock_stdout.getvalue() , """Transformers are awesome!\n""" )
@unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO )
def UpperCAmelCase ( self , A ) -> int:
with ContextManagers([context_en()] ):
print("""Transformers are awesome!""" )
# The output should be wrapped with an English welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , """Welcome!\nTransformers are awesome!\nBye!\n""" )
@unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO )
def UpperCAmelCase ( self , A ) -> int:
with ContextManagers([context_fr(), context_en()] ):
print("""Transformers are awesome!""" )
# The output should be wrapped with an English and French welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , """Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n""" )
@require_torch
def UpperCAmelCase ( self ) -> Optional[Any]:
self.assertEqual(find_labels(A ) , ["""labels"""] )
self.assertEqual(find_labels(A ) , ["""labels""", """next_sentence_label"""] )
self.assertEqual(find_labels(A ) , ["""start_positions""", """end_positions"""] )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
pass
self.assertEqual(find_labels(A ) , ["""labels"""] )
@require_tf
def UpperCAmelCase ( self ) -> str:
self.assertEqual(find_labels(A ) , ["""labels"""] )
self.assertEqual(find_labels(A ) , ["""labels""", """next_sentence_label"""] )
self.assertEqual(find_labels(A ) , ["""start_positions""", """end_positions"""] )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
pass
self.assertEqual(find_labels(A ) , ["""labels"""] )
@require_flax
def UpperCAmelCase ( self ) -> Any:
# Flax models don't have labels
self.assertEqual(find_labels(A ) , [] )
self.assertEqual(find_labels(A ) , [] )
self.assertEqual(find_labels(A ) , [] )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
pass
self.assertEqual(find_labels(A ) , [] )
| 684 | 1 |
from io import BytesIO
from typing import List, Union
import requests
from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_decord_available():
import numpy as np
from decord import VideoReader
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
lowerCamelCase : List[str] = logging.get_logger(__name__)
@add_end_docstrings(UpperCamelCase__ )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
def __init__( self , *A , **A ) -> List[Any]:
super().__init__(*A , **A )
requires_backends(self , """decord""" )
self.check_model_type(A )
def UpperCAmelCase ( self , A=None , A=None , A=None ) -> str:
snake_case : List[str] = {}
if frame_sampling_rate is not None:
snake_case : str = frame_sampling_rate
if num_frames is not None:
snake_case : str = num_frames
snake_case : Optional[Any] = {}
if top_k is not None:
snake_case : int = top_k
return preprocess_params, {}, postprocess_params
def __call__( self , A , **A ) -> str:
return super().__call__(A , **A )
def UpperCAmelCase ( self , A , A=None , A=1 ) -> Tuple:
if num_frames is None:
snake_case : Optional[int] = self.model.config.num_frames
if video.startswith("""http://""" ) or video.startswith("""https://""" ):
snake_case : int = BytesIO(requests.get(A ).content )
snake_case : Optional[int] = VideoReader(A )
videoreader.seek(0 )
snake_case : Union[str, Any] = 0
snake_case : str = num_frames * frame_sampling_rate - 1
snake_case : Dict = np.linspace(A , A , num=A , dtype=np.intaa )
snake_case : Tuple = videoreader.get_batch(A ).asnumpy()
snake_case : Union[str, Any] = list(A )
snake_case : Tuple = self.image_processor(A , return_tensors=self.framework )
return model_inputs
def UpperCAmelCase ( self , A ) -> Union[str, Any]:
snake_case : int = self.model(**A )
return model_outputs
def UpperCAmelCase ( self , A , A=5 ) -> Optional[int]:
if top_k > self.model.config.num_labels:
snake_case : Any = self.model.config.num_labels
if self.framework == "pt":
snake_case : Tuple = model_outputs.logits.softmax(-1 )[0]
snake_case , snake_case : Optional[Any] = probs.topk(A )
else:
raise ValueError(f"""Unsupported framework: {self.framework}""" )
snake_case : Any = scores.tolist()
snake_case : List[str] = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(A , A )]
| 684 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
lowerCamelCase : Dict = {
'MIT/ast-finetuned-audioset-10-10-0.4593': (
'https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json'
),
}
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """audio-spectrogram-transformer"""
def __init__( self , A=7_6_8 , A=1_2 , A=1_2 , A=3_0_7_2 , A="gelu" , A=0.0 , A=0.0 , A=0.02 , A=1e-1_2 , A=1_6 , A=True , A=1_0 , A=1_0 , A=1_0_2_4 , A=1_2_8 , **A , ) -> int:
super().__init__(**A )
snake_case : Any = hidden_size
snake_case : Tuple = num_hidden_layers
snake_case : Any = num_attention_heads
snake_case : Dict = intermediate_size
snake_case : int = hidden_act
snake_case : int = hidden_dropout_prob
snake_case : Tuple = attention_probs_dropout_prob
snake_case : int = initializer_range
snake_case : int = layer_norm_eps
snake_case : Any = patch_size
snake_case : List[Any] = qkv_bias
snake_case : int = frequency_stride
snake_case : Any = time_stride
snake_case : Union[str, Any] = max_length
snake_case : Any = num_mel_bins
| 684 | 1 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""image_processor""", """tokenizer"""]
_snake_case = """LayoutLMv2ImageProcessor"""
_snake_case = ("""LayoutXLMTokenizer""", """LayoutXLMTokenizerFast""")
def __init__( self , A=None , A=None , **A ) -> Optional[Any]:
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , A , )
snake_case : Optional[Any] = kwargs.pop("""feature_extractor""" )
snake_case : Any = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(A , A )
def __call__( self , A , A = None , A = None , A = None , A = None , A = True , A = False , A = None , A = None , A = 0 , A = None , A = None , A = None , A = False , A = False , A = False , A = False , A = True , A = None , **A , ) -> BatchEncoding:
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"""You cannot provide bounding boxes """
"""if you initialized the image processor with apply_ocr set to True.""" )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"""You cannot provide word labels if you initialized the image processor with apply_ocr set to True.""" )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError("""You cannot return overflowing tokens without returning the offsets mapping.""" )
# first, apply the image processor
snake_case : Tuple = self.image_processor(images=A , return_tensors=A )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(A , A ):
snake_case : Optional[Any] = [text] # add batch dimension (as the image processor always adds a batch dimension)
snake_case : Optional[Any] = features["""words"""]
snake_case : List[Any] = self.tokenizer(
text=text if text is not None else features["""words"""] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["""boxes"""] , word_labels=A , add_special_tokens=A , padding=A , truncation=A , max_length=A , stride=A , pad_to_multiple_of=A , return_token_type_ids=A , return_attention_mask=A , return_overflowing_tokens=A , return_special_tokens_mask=A , return_offsets_mapping=A , return_length=A , verbose=A , return_tensors=A , **A , )
# add pixel values
snake_case : Dict = features.pop("""pixel_values""" )
if return_overflowing_tokens is True:
snake_case : Optional[int] = self.get_overflowing_images(A , encoded_inputs["""overflow_to_sample_mapping"""] )
snake_case : Optional[Any] = images
return encoded_inputs
def UpperCAmelCase ( self , A , A ) -> List[Any]:
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
snake_case : List[Any] = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(A ) != len(A ):
raise ValueError(
"""Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"""
f""" {len(A )} and {len(A )}""" )
return images_with_overflow
def UpperCAmelCase ( self , *A , **A ) -> Union[str, Any]:
return self.tokenizer.batch_decode(*A , **A )
def UpperCAmelCase ( self , *A , **A ) -> Optional[Any]:
return self.tokenizer.decode(*A , **A )
@property
def UpperCAmelCase ( self ) -> Optional[int]:
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def UpperCAmelCase ( self ) -> Optional[Any]:
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , A , )
return self.image_processor_class
@property
def UpperCAmelCase ( self ) -> Optional[int]:
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , A , )
return self.image_processor
| 684 |
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
lowerCamelCase : Any = logging.get_logger(__name__)
class __lowercase (enum.Enum ):
"""simple docstring"""
_snake_case = 0
_snake_case = 1
@add_end_docstrings(UpperCamelCase__ )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """generated"""
def __init__( self , *A , **A ) -> Optional[Any]:
super().__init__(*A , **A )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == """tf"""
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def UpperCAmelCase ( self , A=None , A=None , A=None , A=None , A=None , A=None , **A , ) -> Optional[int]:
snake_case : Tuple = {}
if truncation is not None:
snake_case : Union[str, Any] = truncation
snake_case : Dict = generate_kwargs
snake_case : int = {}
if return_tensors is not None and return_type is None:
snake_case : List[Any] = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
snake_case : List[str] = return_type
if clean_up_tokenization_spaces is not None:
snake_case : int = clean_up_tokenization_spaces
if stop_sequence is not None:
snake_case : Tuple = self.tokenizer.encode(A , add_special_tokens=A )
if len(A ) > 1:
warnings.warn(
"""Stopping on a multiple token sequence is not yet supported on transformers. The first token of"""
""" the stop sequence will be used as the stop sequence string in the interim.""" )
snake_case : List[str] = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def UpperCAmelCase ( self , A , A , A ) -> Union[str, Any]:
return True
def UpperCAmelCase ( self , *A , A ) -> Tuple:
snake_case : Union[str, Any] = self.model.config.prefix if self.model.config.prefix is not None else """"""
if isinstance(args[0] , A ):
if self.tokenizer.pad_token_id is None:
raise ValueError("""Please make sure that the tokenizer has a pad_token_id when using a batch input""" )
snake_case : Union[str, Any] = ([prefix + arg for arg in args[0]],)
snake_case : List[Any] = True
elif isinstance(args[0] , A ):
snake_case : str = (prefix + args[0],)
snake_case : str = False
else:
raise ValueError(
f""" `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`""" )
snake_case : Optional[Any] = self.tokenizer(*A , padding=A , truncation=A , return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self , *A , **A ) -> Union[str, Any]:
snake_case : Tuple = super().__call__(*A , **A )
if (
isinstance(args[0] , A )
and all(isinstance(A , A ) for el in args[0] )
and all(len(A ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def UpperCAmelCase ( self , A , A=TruncationStrategy.DO_NOT_TRUNCATE , **A ) -> str:
snake_case : Optional[Any] = self._parse_and_tokenize(A , truncation=A , **A )
return inputs
def UpperCAmelCase ( self , A , **A ) -> Tuple:
if self.framework == "pt":
snake_case , snake_case : List[str] = model_inputs["""input_ids"""].shape
elif self.framework == "tf":
snake_case , snake_case : Optional[Any] = tf.shape(model_inputs["""input_ids"""] ).numpy()
snake_case : Dict = generate_kwargs.get("""min_length""" , self.model.config.min_length )
snake_case : str = generate_kwargs.get("""max_length""" , self.model.config.max_length )
self.check_inputs(A , generate_kwargs["""min_length"""] , generate_kwargs["""max_length"""] )
snake_case : List[str] = self.model.generate(**A , **A )
snake_case : Dict = output_ids.shape[0]
if self.framework == "pt":
snake_case : List[Any] = output_ids.reshape(A , out_b // in_b , *output_ids.shape[1:] )
elif self.framework == "tf":
snake_case : Any = tf.reshape(A , (in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def UpperCAmelCase ( self , A , A=ReturnType.TEXT , A=False ) -> Union[str, Any]:
snake_case : Tuple = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
snake_case : Dict = {f"""{self.return_name}_token_ids""": output_ids}
elif return_type == ReturnType.TEXT:
snake_case : int = {
f"""{self.return_name}_text""": self.tokenizer.decode(
A , skip_special_tokens=A , clean_up_tokenization_spaces=A , )
}
records.append(A )
return records
@add_end_docstrings(UpperCamelCase__ )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """summary"""
def __call__( self , *A , **A ) -> str:
return super().__call__(*A , **A )
def UpperCAmelCase ( self , A , A , A ) -> bool:
if max_length < min_length:
logger.warning(f"""Your min_length={min_length} must be inferior than your max_length={max_length}.""" )
if input_length < max_length:
logger.warning(
f"""Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is """
"""a summarization task, where outputs shorter than the input are typically wanted, you might """
f"""consider decreasing max_length manually, e.g. summarizer('...', max_length={input_length//2})""" )
@add_end_docstrings(UpperCamelCase__ )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """translation"""
def UpperCAmelCase ( self , A , A , A ) -> Union[str, Any]:
if input_length > 0.9 * max_length:
logger.warning(
f"""Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider """
"""increasing your max_length manually, e.g. translator('...', max_length=400)""" )
return True
def UpperCAmelCase ( self , *A , A=TruncationStrategy.DO_NOT_TRUNCATE , A=None , A=None ) -> Optional[int]:
if getattr(self.tokenizer , """_build_translation_inputs""" , A ):
return self.tokenizer._build_translation_inputs(
*A , return_tensors=self.framework , truncation=A , src_lang=A , tgt_lang=A )
else:
return super()._parse_and_tokenize(*A , truncation=A )
def UpperCAmelCase ( self , A=None , A=None , **A ) -> Union[str, Any]:
snake_case , snake_case , snake_case : str = super()._sanitize_parameters(**A )
if src_lang is not None:
snake_case : Tuple = src_lang
if tgt_lang is not None:
snake_case : str = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
snake_case : Union[str, Any] = kwargs.get("""task""" , self.task )
snake_case : Any = task.split("""_""" )
if task and len(A ) == 4:
# translation, XX, to YY
snake_case : Optional[Any] = items[1]
snake_case : Dict = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self , *A , **A ) -> str:
return super().__call__(*A , **A )
| 684 | 1 |
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
lowerCamelCase : str = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
lowerCamelCase : list[int] = [ord(letter) for letter in string.ascii_lowercase]
lowerCamelCase : set[int] = {ord(char) for char in VALID_CHARS}
lowerCamelCase : list[str] = ["the", "be", "to", "of", "and", "in", "that", "have"]
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> str | None:
snake_case : str = ""
snake_case : int
snake_case : int
snake_case : int
for keychar, cipherchar in zip(cycle(lowercase ) ,lowercase ):
snake_case : Union[str, Any] = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(lowercase )
return decoded
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> list[str]:
snake_case : list[str] = []
for key in product(lowercase ,repeat=3 ):
snake_case : Dict = try_key(lowercase ,lowercase )
if encoded is not None:
possibles.append(lowercase )
return possibles
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> list[str]:
return [possible for possible in possibles if common_word in possible.lower()]
def SCREAMING_SNAKE_CASE__ ( lowercase = "p059_cipher.txt" ) -> int:
snake_case : list[int]
snake_case : list[str]
snake_case : str
snake_case : str
snake_case : str = Path(lowercase ).parent.joinpath(lowercase ).read_text(encoding="""utf-8""" )
snake_case : List[Any] = [int(lowercase ) for number in data.strip().split(""",""" )]
snake_case : Optional[int] = filter_valid_chars(lowercase )
for common_word in COMMON_WORDS:
snake_case : str = filter_common_word(lowercase ,lowercase )
if len(lowercase ) == 1:
break
snake_case : str = possibles[0]
return sum(ord(lowercase ) for char in decoded_text )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 684 |
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> str:
snake_case : int = []
for line in lines:
snake_case : Dict = re.sub(R"""#.*""" ,"""""" ,lowercase ) # remove comments
if line:
filtered_lines.append(lowercase )
snake_case : Optional[int] = """\n""".join(lowercase )
# Make a hash from all this code
snake_case : List[str] = full_str.encode("""utf-8""" )
return shaaaa(lowercase ).hexdigest()
# get importable module names and hash for caching
lowerCamelCase : Any = {
'csv': (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
'json': (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
'pandas': (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
'parquet': (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
'arrow': (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
'text': (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
'imagefolder': (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
'audiofolder': (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
lowerCamelCase : Optional[int] = {
'.csv': ('csv', {}),
'.tsv': ('csv', {'sep': '\t'}),
'.json': ('json', {}),
'.jsonl': ('json', {}),
'.parquet': ('parquet', {}),
'.arrow': ('arrow', {}),
'.txt': ('text', {}),
}
_EXTENSION_TO_MODULE.update({ext: ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
lowerCamelCase : Tuple = {'imagefolder', 'audiofolder'}
# Used to filter data files based on extensions given a module name
lowerCamelCase : Dict[str, List[str]] = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append('.zip')
_MODULE_TO_EXTENSIONS["audiofolder"].append('.zip')
| 684 | 1 |
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ) -> str:
snake_case : Any = WavaVecaForSequenceClassification.from_pretrained(lowercase ,config=lowercase )
snake_case : Any = downstream_dict["""projector.weight"""]
snake_case : Optional[int] = downstream_dict["""projector.bias"""]
snake_case : Any = downstream_dict["""model.post_net.linear.weight"""]
snake_case : Tuple = downstream_dict["""model.post_net.linear.bias"""]
return model
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ) -> int:
snake_case : Union[str, Any] = WavaVecaForAudioFrameClassification.from_pretrained(lowercase ,config=lowercase )
snake_case : List[str] = downstream_dict["""model.linear.weight"""]
snake_case : Any = downstream_dict["""model.linear.bias"""]
return model
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ) -> Optional[Any]:
snake_case : Any = WavaVecaForXVector.from_pretrained(lowercase ,config=lowercase )
snake_case : Any = downstream_dict["""connector.weight"""]
snake_case : List[Any] = downstream_dict["""connector.bias"""]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
snake_case : Optional[Any] = downstream_dict[
f"""model.framelevel_feature_extractor.module.{i}.kernel.weight"""
]
snake_case : Optional[int] = downstream_dict[f"""model.framelevel_feature_extractor.module.{i}.kernel.bias"""]
snake_case : Optional[Any] = downstream_dict["""model.utterancelevel_feature_extractor.linear1.weight"""]
snake_case : Dict = downstream_dict["""model.utterancelevel_feature_extractor.linear1.bias"""]
snake_case : int = downstream_dict["""model.utterancelevel_feature_extractor.linear2.weight"""]
snake_case : str = downstream_dict["""model.utterancelevel_feature_extractor.linear2.bias"""]
snake_case : Optional[Any] = downstream_dict["""objective.W"""]
return model
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ,lowercase ) -> int:
snake_case : List[str] = torch.load(lowercase ,map_location="""cpu""" )
snake_case : Union[str, Any] = checkpoint["""Downstream"""]
snake_case : List[str] = WavaVecaConfig.from_pretrained(lowercase )
snake_case : Tuple = WavaVecaFeatureExtractor.from_pretrained(
lowercase ,return_attention_mask=lowercase ,do_normalize=lowercase )
snake_case : Tuple = hf_config.architectures[0]
if arch.endswith("""ForSequenceClassification""" ):
snake_case : Optional[Any] = convert_classification(lowercase ,lowercase ,lowercase )
elif arch.endswith("""ForAudioFrameClassification""" ):
snake_case : List[str] = convert_diarization(lowercase ,lowercase ,lowercase )
elif arch.endswith("""ForXVector""" ):
snake_case : Optional[int] = convert_xvector(lowercase ,lowercase ,lowercase )
else:
raise NotImplementedError(f"""S3PRL weights conversion is not supported for {arch}""" )
if hf_config.use_weighted_layer_sum:
snake_case : str = checkpoint["""Featurizer"""]["""weights"""]
hf_feature_extractor.save_pretrained(lowercase )
hf_model.save_pretrained(lowercase )
if __name__ == "__main__":
lowerCamelCase : Any = argparse.ArgumentParser()
parser.add_argument(
'--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.'
)
parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.')
parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.')
lowerCamelCase : Union[str, Any] = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 684 |
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ) -> Tuple:
# Initialise PyTorch model
snake_case : int = RemBertConfig.from_json_file(lowercase )
print("""Building PyTorch model from configuration: {}""".format(str(lowercase ) ) )
snake_case : Tuple = RemBertModel(lowercase )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(lowercase ,lowercase ,lowercase )
# Save pytorch-model
print("""Save PyTorch model to {}""".format(lowercase ) )
torch.save(model.state_dict() ,lowercase )
if __name__ == "__main__":
lowerCamelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--rembert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained RemBERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowerCamelCase : Dict = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 684 | 1 |
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
lowerCamelCase : List[str] = '%20'.join(argv[1:]) if len(argv) > 1 else quote(str(input('Search: ')))
print('Googling.....')
lowerCamelCase : str = f"""https://www.google.com/search?q={query}&num=100"""
lowerCamelCase : List[Any] = requests.get(
url,
headers={'User-Agent': str(UserAgent().random)},
)
try:
lowerCamelCase : List[str] = (
BeautifulSoup(res.text, 'html.parser')
.find('div', attrs={'class': 'yuRUbf'})
.find('a')
.get('href')
)
except AttributeError:
lowerCamelCase : Optional[Any] = parse_qs(
BeautifulSoup(res.text, 'html.parser')
.find('div', attrs={'class': 'kCrYT'})
.find('a')
.get('href')
)['url'][0]
webbrowser.open(link)
| 684 |
from ..utils import DummyObject, requires_backends
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Tuple:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> List[str]:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> int:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Any:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Union[str, Any]:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Tuple:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Optional[int]:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Union[str, Any]:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Union[str, Any]:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> str:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> List[Any]:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Optional[Any]:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Union[str, Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> int:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Union[str, Any]:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> List[Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Tuple:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Optional[Any]:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Optional[Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> str:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Any:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Tuple:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> int:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Dict:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Dict:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Optional[int]:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Optional[Any]:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> str:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> List[Any]:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Optional[int]:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Optional[Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Tuple:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Tuple:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> List[Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> str:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> str:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Tuple:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Dict:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Union[str, Any]:
requires_backends(cls , ["""flax"""] )
| 684 | 1 |
import argparse
import os
import re
lowerCamelCase : List[Any] = 'src/transformers/models/auto'
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
lowerCamelCase : List[str] = re.compile(r'[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict')
# re pattern that matches identifiers in mappings
lowerCamelCase : List[Any] = re.compile(r'\s*\(\s*"(\S[^"]+)"')
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase = False ) -> str:
with open(lowercase ,"""r""" ,encoding="""utf-8""" ) as f:
snake_case : Dict = f.read()
snake_case : Dict = content.split("""\n""" )
snake_case : int = []
snake_case : Tuple = 0
while line_idx < len(lowercase ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
snake_case : Optional[Any] = len(re.search(R"""^(\s*)\S""" ,lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(""" """ * indent + """(""" ):
new_lines.append(lines[line_idx] )
line_idx += 1
snake_case : Optional[int] = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
snake_case : Optional[int] = line_idx
while not lines[line_idx].startswith(""" """ * indent + """)""" ):
line_idx += 1
blocks.append("""\n""".join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
snake_case : Optional[int] = sorted(lowercase ,key=lambda lowercase : _re_identifier.search(lowercase ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(lowercase ,"""w""" ,encoding="""utf-8""" ) as f:
f.write("""\n""".join(lowercase ) )
elif "\n".join(lowercase ) != content:
return True
def SCREAMING_SNAKE_CASE__ ( lowercase = False ) -> str:
snake_case : Dict = [os.path.join(lowercase ,lowercase ) for f in os.listdir(lowercase ) if f.endswith(""".py""" )]
snake_case : List[str] = [sort_auto_mapping(lowercase ,overwrite=lowercase ) for fname in fnames]
if not overwrite and any(lowercase ):
snake_case : int = [f for f, d in zip(lowercase ,lowercase ) if d]
raise ValueError(
f"""The following files have auto mappings that need sorting: {", ".join(lowercase )}. Run `make style` to fix"""
""" this.""" )
if __name__ == "__main__":
lowerCamelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
lowerCamelCase : str = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 684 |
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
lowerCamelCase : List[str] = 3
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> int:
print("""Generating primitive root of p""" )
while True:
snake_case : Optional[int] = random.randrange(3 ,lowercase )
if pow(lowercase ,2 ,lowercase ) == 1:
continue
if pow(lowercase ,lowercase ,lowercase ) == 1:
continue
return g
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> tuple[tuple[int, int, int, int], tuple[int, int]]:
print("""Generating prime p...""" )
snake_case : Optional[int] = rabin_miller.generate_large_prime(lowercase ) # select large prime number.
snake_case : Optional[int] = primitive_root(lowercase ) # one primitive root on modulo p.
snake_case : Optional[Any] = random.randrange(3 ,lowercase ) # private_key -> have to be greater than 2 for safety.
snake_case : Tuple = cryptomath.find_mod_inverse(pow(lowercase ,lowercase ,lowercase ) ,lowercase )
snake_case : str = (key_size, e_a, e_a, p)
snake_case : Optional[Any] = (key_size, d)
return public_key, private_key
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> None:
if os.path.exists(f"""{name}_pubkey.txt""" ) or os.path.exists(f"""{name}_privkey.txt""" ):
print("""\nWARNING:""" )
print(
f"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"""
"""Use a different name or delete these files and re-run this program.""" )
sys.exit()
snake_case , snake_case : Optional[Any] = generate_key(lowercase )
print(f"""\nWriting public key to file {name}_pubkey.txt...""" )
with open(f"""{name}_pubkey.txt""" ,"""w""" ) as fo:
fo.write(f"""{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}""" )
print(f"""Writing private key to file {name}_privkey.txt...""" )
with open(f"""{name}_privkey.txt""" ,"""w""" ) as fo:
fo.write(f"""{private_key[0]},{private_key[1]}""" )
def SCREAMING_SNAKE_CASE__ ( ) -> None:
print("""Making key files...""" )
make_key_files("""elgamal""" ,2048 )
print("""Key files generation successful""" )
if __name__ == "__main__":
main()
| 684 | 1 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> list:
if len(lowercase ) == 0:
return []
snake_case , snake_case : Optional[Any] = min(lowercase ), max(lowercase )
snake_case : List[Any] = int(max_value - min_value ) + 1
snake_case : list[list] = [[] for _ in range(lowercase )]
for i in my_list:
buckets[int(i - min_value )].append(lowercase )
return [v for bucket in buckets for v in sorted(lowercase )]
if __name__ == "__main__":
from doctest import testmod
testmod()
assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bucket_sort([0, 1, -1_0, 1_5, 2, -2]) == [-1_0, -2, 0, 1, 2, 1_5]
| 684 |
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ) -> int:
if exponent == 1:
return base
if exponent % 2 == 0:
snake_case : Dict = _modexpt(lowercase ,exponent // 2 ,lowercase ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(lowercase ,exponent - 1 ,lowercase )) % modulo_value
def SCREAMING_SNAKE_CASE__ ( lowercase = 1777 ,lowercase = 1855 ,lowercase = 8 ) -> int:
snake_case : int = base
for _ in range(1 ,lowercase ):
snake_case : List[str] = _modexpt(lowercase ,lowercase ,10**digits )
return result
if __name__ == "__main__":
print(f"""{solution() = }""")
| 684 | 1 |
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
lowerCamelCase : Optional[Any] = get_tests_dir('fixtures')
class __lowercase (unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Optional[int]:
# A mock response for an HTTP head request to emulate server down
snake_case : Optional[int] = mock.Mock()
snake_case : List[str] = 5_0_0
snake_case : Optional[int] = {}
snake_case : Any = HTTPError
snake_case : int = {}
# Download this model to make sure it's in the cache.
snake_case : Any = ViTImageProcessor.from_pretrained("""hf-internal-testing/tiny-random-vit""" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("""requests.Session.request""" , return_value=A ) as mock_head:
snake_case : str = ViTImageProcessor.from_pretrained("""hf-internal-testing/tiny-random-vit""" )
# This check we did call the fake head request
mock_head.assert_called()
def UpperCAmelCase ( self ) -> Dict:
# This test is for deprecated behavior and can be removed in v5
snake_case : Optional[Any] = ViTImageProcessor.from_pretrained(
"""https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json""" )
def UpperCAmelCase ( self ) -> Optional[Any]:
with self.assertRaises(A ):
# config is in subfolder, the following should not work without specifying the subfolder
snake_case : Optional[Any] = AutoImageProcessor.from_pretrained("""hf-internal-testing/stable-diffusion-all-variants""" )
snake_case : Union[str, Any] = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/stable-diffusion-all-variants""" , subfolder="""feature_extractor""" )
self.assertIsNotNone(A )
@is_staging_test
class __lowercase (unittest.TestCase ):
"""simple docstring"""
@classmethod
def UpperCAmelCase ( cls ) -> str:
snake_case : str = TOKEN
HfFolder.save_token(A )
@classmethod
def UpperCAmelCase ( cls ) -> Optional[int]:
try:
delete_repo(token=cls._token , repo_id="""test-image-processor""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-image-processor-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-image-processor""" )
except HTTPError:
pass
def UpperCAmelCase ( self ) -> Optional[Any]:
snake_case : Optional[int] = ViTImageProcessor.from_pretrained(A )
image_processor.push_to_hub("""test-image-processor""" , use_auth_token=self._token )
snake_case : Optional[int] = ViTImageProcessor.from_pretrained(f"""{USER}/test-image-processor""" )
for k, v in image_processor.__dict__.items():
self.assertEqual(A , getattr(A , A ) )
# Reset repo
delete_repo(token=self._token , repo_id="""test-image-processor""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
A , repo_id="""test-image-processor""" , push_to_hub=A , use_auth_token=self._token )
snake_case : int = ViTImageProcessor.from_pretrained(f"""{USER}/test-image-processor""" )
for k, v in image_processor.__dict__.items():
self.assertEqual(A , getattr(A , A ) )
def UpperCAmelCase ( self ) -> str:
snake_case : Optional[Any] = ViTImageProcessor.from_pretrained(A )
image_processor.push_to_hub("""valid_org/test-image-processor""" , use_auth_token=self._token )
snake_case : Union[str, Any] = ViTImageProcessor.from_pretrained("""valid_org/test-image-processor""" )
for k, v in image_processor.__dict__.items():
self.assertEqual(A , getattr(A , A ) )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-image-processor""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
A , repo_id="""valid_org/test-image-processor-org""" , push_to_hub=A , use_auth_token=self._token )
snake_case : List[str] = ViTImageProcessor.from_pretrained("""valid_org/test-image-processor-org""" )
for k, v in image_processor.__dict__.items():
self.assertEqual(A , getattr(A , A ) )
def UpperCAmelCase ( self ) -> Optional[int]:
CustomImageProcessor.register_for_auto_class()
snake_case : Optional[int] = CustomImageProcessor.from_pretrained(A )
image_processor.push_to_hub("""test-dynamic-image-processor""" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map , {"""AutoImageProcessor""": """custom_image_processing.CustomImageProcessor"""} , )
snake_case : Optional[Any] = AutoImageProcessor.from_pretrained(
f"""{USER}/test-dynamic-image-processor""" , trust_remote_code=A )
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__ , """CustomImageProcessor""" )
| 684 |
from itertools import product
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> list[int]:
snake_case : Tuple = sides_number
snake_case : List[str] = max_face_number * dice_number
snake_case : Any = [0] * (max_total + 1)
snake_case : int = 1
snake_case : List[str] = range(lowercase ,max_face_number + 1 )
for dice_numbers in product(lowercase ,repeat=lowercase ):
snake_case : Any = sum(lowercase )
totals_frequencies[total] += 1
return totals_frequencies
def SCREAMING_SNAKE_CASE__ ( ) -> float:
snake_case : List[str] = total_frequency_distribution(
sides_number=4 ,dice_number=9 )
snake_case : str = total_frequency_distribution(
sides_number=6 ,dice_number=6 )
snake_case : Optional[int] = 0
snake_case : List[str] = 9
snake_case : Union[str, Any] = 4 * 9
snake_case : Dict = 6
for peter_total in range(lowercase ,max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
snake_case : str = (4**9) * (6**6)
snake_case : int = peter_wins_count / total_games_number
snake_case : Optional[int] = round(lowercase ,ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(f"""{solution() = }""")
| 684 | 1 |
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny model through reduction of a normal pre-trained model, but keeping the
# full vocab, merges file, and thus also resulting in a larger model due to a large vocab size.
# This gives ~3MB in total for all files.
#
# If you want a 50 times smaller than this see `fsmt-make-super-tiny-model.py`, which is slightly more complicated
#
#
# It will be used then as "stas/tiny-wmt19-en-de"
# Build
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
lowerCamelCase : Optional[int] = 'facebook/wmt19-en-de'
lowerCamelCase : List[Any] = FSMTTokenizer.from_pretrained(mname)
# get the correct vocab sizes, etc. from the master model
lowerCamelCase : Any = FSMTConfig.from_pretrained(mname)
config.update(
dict(
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
)
lowerCamelCase : List[Any] = FSMTForConditionalGeneration(config)
print(f"""num of params {tiny_model.num_parameters()}""")
# Test
lowerCamelCase : List[Any] = tokenizer(['Making tiny model'], return_tensors='pt')
lowerCamelCase : int = tiny_model(**batch)
print('test output:', len(outputs.logits[0]))
# Save
lowerCamelCase : Union[str, Any] = 'tiny-wmt19-en-de'
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(f"""Generated {mname_tiny}""")
# Upload
# transformers-cli upload tiny-wmt19-en-de
| 684 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
| 684 | 1 |
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Any:
return getitem, k
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> int:
return setitem, k, v
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Optional[Any]:
return delitem, k
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,*lowercase ) -> Union[str, Any]:
try:
return fun(lowercase ,*lowercase ), None
except Exception as e:
return None, e
lowerCamelCase : Union[str, Any] = (
_set('key_a', 'val_a'),
_set('key_b', 'val_b'),
)
lowerCamelCase : List[str] = [
_set('key_a', 'val_a'),
_set('key_a', 'val_b'),
]
lowerCamelCase : Dict = [
_set('key_a', 'val_a'),
_set('key_b', 'val_b'),
_del('key_a'),
_del('key_b'),
_set('key_a', 'val_a'),
_del('key_a'),
]
lowerCamelCase : Any = [
_get('key_a'),
_del('key_a'),
_set('key_a', 'val_a'),
_del('key_a'),
_del('key_a'),
_get('key_a'),
]
lowerCamelCase : int = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
lowerCamelCase : str = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set('key_a', 'val_b'),
]
@pytest.mark.parametrize(
"""operations""" ,(
pytest.param(_add_items ,id="""add items""" ),
pytest.param(_overwrite_items ,id="""overwrite items""" ),
pytest.param(_delete_items ,id="""delete items""" ),
pytest.param(_access_absent_items ,id="""access absent items""" ),
pytest.param(_add_with_resize_up ,id="""add with resize up""" ),
pytest.param(_add_with_resize_down ,id="""add with resize down""" ),
) ,)
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Union[str, Any]:
snake_case : Any = HashMap(initial_block_size=4 )
snake_case : Dict = {}
for _, (fun, *args) in enumerate(lowercase ):
snake_case , snake_case : Optional[int] = _run_operation(lowercase ,lowercase ,*lowercase )
snake_case , snake_case : Optional[int] = _run_operation(lowercase ,lowercase ,*lowercase )
assert my_res == py_res
assert str(lowercase ) == str(lowercase )
assert set(lowercase ) == set(lowercase )
assert len(lowercase ) == len(lowercase )
assert set(my.items() ) == set(py.items() )
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[int]:
def is_public(lowercase ) -> bool:
return not name.startswith("""_""" )
snake_case : Any = {name for name in dir({} ) if is_public(lowercase )}
snake_case : Tuple = {name for name in dir(HashMap() ) if is_public(lowercase )}
assert dict_public_names > hash_public_names
| 684 |
import os
def SCREAMING_SNAKE_CASE__ ( ) -> Dict:
with open(os.path.dirname(lowercase ) + """/grid.txt""" ) as f:
snake_case : Tuple = [] # noqa: E741
for _ in range(20 ):
l.append([int(lowercase ) for x in f.readline().split()] )
snake_case : Optional[Any] = 0
# right
for i in range(20 ):
for j in range(17 ):
snake_case : List[Any] = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
snake_case : Tuple = temp
# down
for i in range(17 ):
for j in range(20 ):
snake_case : Any = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
snake_case : str = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
snake_case : int = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
snake_case : int = temp
# diagonal 2
for i in range(17 ):
for j in range(3 ,20 ):
snake_case : Any = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
snake_case : Any = temp
return maximum
if __name__ == "__main__":
print(solution())
| 684 | 1 |
from __future__ import annotations
import math
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> list:
if len(lowercase ) != 2 or len(a[0] ) != 2 or len(lowercase ) != 2 or len(b[0] ) != 2:
raise Exception("""Matrices are not 2x2""" )
snake_case : List[str] = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> List[str]:
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(lowercase ) )
]
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> Optional[int]:
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(lowercase ) )
]
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> tuple[list, list, list, list]:
if len(lowercase ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception("""Odd matrices are not supported!""" )
snake_case : Tuple = len(lowercase )
snake_case : Dict = matrix_length // 2
snake_case : List[Any] = [[a[i][j] for j in range(lowercase ,lowercase )] for i in range(lowercase )]
snake_case : Optional[int] = [
[a[i][j] for j in range(lowercase ,lowercase )] for i in range(lowercase ,lowercase )
]
snake_case : Optional[int] = [[a[i][j] for j in range(lowercase )] for i in range(lowercase )]
snake_case : Any = [[a[i][j] for j in range(lowercase )] for i in range(lowercase ,lowercase )]
return top_left, top_right, bot_left, bot_right
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> tuple[int, int]:
return len(lowercase ), len(matrix[0] )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> None:
print("""\n""".join(str(lowercase ) for line in matrix ) )
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> list:
if matrix_dimensions(lowercase ) == (2, 2):
return default_matrix_multiplication(lowercase ,lowercase )
snake_case , snake_case , snake_case , snake_case : Any = split_matrix(lowercase )
snake_case , snake_case , snake_case , snake_case : Dict = split_matrix(lowercase )
snake_case : Optional[int] = actual_strassen(lowercase ,matrix_subtraction(lowercase ,lowercase ) )
snake_case : Any = actual_strassen(matrix_addition(lowercase ,lowercase ) ,lowercase )
snake_case : Union[str, Any] = actual_strassen(matrix_addition(lowercase ,lowercase ) ,lowercase )
snake_case : Any = actual_strassen(lowercase ,matrix_subtraction(lowercase ,lowercase ) )
snake_case : List[Any] = actual_strassen(matrix_addition(lowercase ,lowercase ) ,matrix_addition(lowercase ,lowercase ) )
snake_case : Union[str, Any] = actual_strassen(matrix_subtraction(lowercase ,lowercase ) ,matrix_addition(lowercase ,lowercase ) )
snake_case : Tuple = actual_strassen(matrix_subtraction(lowercase ,lowercase ) ,matrix_addition(lowercase ,lowercase ) )
snake_case : str = matrix_addition(matrix_subtraction(matrix_addition(lowercase ,lowercase ) ,lowercase ) ,lowercase )
snake_case : Union[str, Any] = matrix_addition(lowercase ,lowercase )
snake_case : Tuple = matrix_addition(lowercase ,lowercase )
snake_case : Tuple = matrix_subtraction(matrix_subtraction(matrix_addition(lowercase ,lowercase ) ,lowercase ) ,lowercase )
# construct the new matrix from our 4 quadrants
snake_case : int = []
for i in range(len(lowercase ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(lowercase ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> list:
if matrix_dimensions(lowercase )[1] != matrix_dimensions(lowercase )[0]:
snake_case : Any = (
"""Unable to multiply these matrices, please check the dimensions.\n"""
f"""Matrix A: {matrixa}\n"""
f"""Matrix B: {matrixa}"""
)
raise Exception(lowercase )
snake_case : Dict = matrix_dimensions(lowercase )
snake_case : Any = matrix_dimensions(lowercase )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
snake_case : Dict = max(*lowercase ,*lowercase )
snake_case : Union[str, Any] = int(math.pow(2 ,math.ceil(math.loga(lowercase ) ) ) )
snake_case : List[str] = matrixa
snake_case : List[Any] = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 ,lowercase ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] ,lowercase ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] ,lowercase ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
snake_case : List[str] = actual_strassen(lowercase ,lowercase )
# Removing the additional zeros
for i in range(0 ,lowercase ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] ,lowercase ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
lowerCamelCase : Optional[int] = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
lowerCamelCase : str = [[0, 2, 1, 1], [1_6, 2, 3, 3], [2, 2, 7, 7], [1_3, 1_1, 2_2, 4]]
print(strassen(matrixa, matrixa))
| 684 |
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> list:
for i in range(len(lowercase ) - 1 ,0 ,-1 ):
snake_case : Any = False
for j in range(lowercase ,0 ,-1 ):
if unsorted[j] < unsorted[j - 1]:
snake_case , snake_case : Optional[Any] = unsorted[j - 1], unsorted[j]
snake_case : Dict = True
for j in range(lowercase ):
if unsorted[j] > unsorted[j + 1]:
snake_case , snake_case : Dict = unsorted[j + 1], unsorted[j]
snake_case : Tuple = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase : Any = input('Enter numbers separated by a comma:\n').strip()
lowerCamelCase : Optional[int] = [int(item) for item in user_input.split(',')]
print(f"""{cocktail_shaker_sort(unsorted) = }""")
| 684 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.